mirror of
https://github.com/titanscouting/tra-superscript.git
synced 2024-11-10 06:54:45 +00:00
Merge pull request #12 from titanscouting/superscript-v1
Merge current changes to build-superscript
Former-commit-id: 6b4de40c49
This commit is contained in:
commit
f0ef4fea5d
8
.github/workflows/build-cli.yml
vendored
8
.github/workflows/build-cli.yml
vendored
@ -1,11 +1,11 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Superscript Unit Tests
|
||||
name: Build Superscript Linux
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
types: [published, created, edited]
|
||||
|
||||
jobs:
|
||||
generate:
|
||||
@ -14,4 +14,6 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@master
|
||||
- name: Echo test
|
||||
run: echo "test"
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -9,6 +9,10 @@
|
||||
**/tra_analysis/
|
||||
**/temp/*
|
||||
|
||||
**/*.pid
|
||||
|
||||
**/profile.*
|
||||
|
||||
**/errorlog.txt
|
||||
/dist/superscript.*
|
||||
/dist/superscript
|
@ -1,4 +1,4 @@
|
||||
set pathtospec="../src/superscript.spec"
|
||||
set pathtospec="../src/cli/superscript.spec"
|
||||
set pathtodist="../dist/"
|
||||
set pathtowork="temp/"
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
pathtospec="../src/superscript.spec"
|
||||
pathtospec="../src/cli/superscript.spec"
|
||||
pathtodist="../dist/"
|
||||
pathtowork="temp/"
|
||||
|
||||
|
188
src/cli/data.py
Normal file
188
src/cli/data.py
Normal file
@ -0,0 +1,188 @@
|
||||
import requests
|
||||
import pandas as pd
|
||||
|
||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
||||
api_key= apikey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key}, verify=False)
|
||||
out = []
|
||||
for i in x.json():
|
||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
return out
|
||||
|
||||
def get_team_match_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
out = {}
|
||||
for i in mdata.find({"competition" : competition, "team_scouted": team_num}):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def get_team_pit_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
out = {}
|
||||
return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"]
|
||||
|
||||
def get_team_metrics_data(client, competition, team_num):
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
||||
|
||||
def get_match_data_formatted(client, competition):
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = unkeyify_2l(get_team_match_data(client, competition, int(i)).transpose().to_dict())
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_metrics_data_formatted(client, competition):
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = get_team_metrics_data(client, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(client, competition):
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = get_team_pit_data(client, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_variable_data(client, competition):
|
||||
db = client.data_processing
|
||||
mdata = db.team_pit
|
||||
out = {}
|
||||
return mdata.find()
|
||||
|
||||
def get_pit_variable_formatted(client, competition):
|
||||
temp = get_pit_variable_data(client, competition)
|
||||
out = {}
|
||||
for i in temp:
|
||||
out[i["variable"]] = i["data"]
|
||||
return out
|
||||
|
||||
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
||||
|
||||
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
||||
|
||||
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
||||
|
||||
def get_analysis_flags(client, flag):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({flag:{"$exists":True}})
|
||||
|
||||
def set_analysis_flags(client, flag, data):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
||||
|
||||
def unkeyify_2l(layered_dict):
|
||||
out = {}
|
||||
for i in layered_dict.keys():
|
||||
add = []
|
||||
sortkey = []
|
||||
for j in layered_dict[i].keys():
|
||||
add.append([j,layered_dict[i][j]])
|
||||
add.sort(key = lambda x: x[0])
|
||||
out[i] = list(map(lambda x: x[1], add))
|
||||
return out
|
||||
|
||||
def get_previous_time(apikey):
|
||||
|
||||
previous_time = get_analysis_flags(apikey, "latest_update")
|
||||
|
||||
if previous_time == None:
|
||||
|
||||
set_analysis_flags(apikey, "latest_update", 0)
|
||||
previous_time = 0
|
||||
|
||||
else:
|
||||
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
return previous_time
|
||||
|
||||
def set_current_time(apikey, current_time):
|
||||
|
||||
set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
|
||||
|
||||
def load_match(apikey, competition):
|
||||
|
||||
return get_match_data_formatted(apikey, competition)
|
||||
|
||||
def load_metric(apikey, competition, match, group_name, metrics):
|
||||
|
||||
group = {}
|
||||
|
||||
for team in match[group_name]:
|
||||
|
||||
db_data = get_team_metrics_data(apikey, competition, team)
|
||||
|
||||
if db_data == None:
|
||||
|
||||
elo = {"score": metrics["elo"]["score"]}
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
ts = {"mu": metrics["ts"]["mu"], "sigm+a": metrics["ts"]["sigma"]}
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data["metrics"]
|
||||
|
||||
elo = metrics["elo"]
|
||||
gl2 = metrics["gl2"]
|
||||
ts = metrics["ts"]
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
return group
|
||||
|
||||
def load_pit(apikey, competition):
|
||||
|
||||
return get_pit_data_formatted(apikey, competition)
|
||||
|
||||
def push_match(apikey, competition, results):
|
||||
|
||||
for team in results:
|
||||
|
||||
push_team_tests_data(apikey, competition, team, results[team])
|
||||
|
||||
def push_metric(apikey, competition, metric):
|
||||
|
||||
for team in metric:
|
||||
|
||||
push_team_metrics_data(apikey, competition, team, metric[team])
|
||||
|
||||
def push_pit(apikey, competition, pit):
|
||||
|
||||
for variable in pit:
|
||||
|
||||
push_team_pit_data(apikey, competition, variable, pit[variable])
|
44
src/cli/interface.py
Normal file
44
src/cli/interface.py
Normal file
@ -0,0 +1,44 @@
|
||||
import sys
|
||||
import time
|
||||
from os import system, name
|
||||
import platform
|
||||
|
||||
empty_delim = " "
|
||||
hard_divided_delim = "|"
|
||||
soft_divided_delim = "|"
|
||||
l_brack = "["
|
||||
r_brack = "]"
|
||||
|
||||
ERR = "[ERR]"
|
||||
INF = "[INF]"
|
||||
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
|
||||
def log(target, level, message, code = 0):
|
||||
|
||||
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
|
||||
print(message, file = target)
|
||||
|
||||
def clear():
|
||||
if name == "nt":
|
||||
system("cls")
|
||||
else:
|
||||
system("clear")
|
||||
|
||||
def splash(version):
|
||||
|
||||
def hrule():
|
||||
print("#"+38*"-"+"#")
|
||||
def box(s):
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
print(temp)
|
||||
|
||||
hrule()
|
||||
box(" superscript version: " + version)
|
||||
box(" os: " + platform.system())
|
||||
box(" python: " + platform.python_version())
|
||||
hrule()
|
194
src/cli/processing.py
Normal file
194
src/cli/processing.py
Normal file
@ -0,0 +1,194 @@
|
||||
import numpy as np
|
||||
|
||||
from tra_analysis import Analysis as an
|
||||
from data import pull_new_tba_matches, push_metric, load_metric
|
||||
|
||||
import signal
|
||||
|
||||
def simplestats(data_test):
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
data = np.array(data_test[0])
|
||||
data = data[np.isfinite(data)]
|
||||
ranges = list(range(len(data)))
|
||||
|
||||
test = data_test[1]
|
||||
|
||||
if test == "basic_stats":
|
||||
return an.basic_stats(data)
|
||||
|
||||
if test == "historical_analysis":
|
||||
return an.histo_analysis([ranges, data])
|
||||
|
||||
if test == "regression_linear":
|
||||
return an.regression(ranges, data, ['lin'])
|
||||
|
||||
if test == "regression_logarithmic":
|
||||
return an.regression(ranges, data, ['log'])
|
||||
|
||||
if test == "regression_exponential":
|
||||
return an.regression(ranges, data, ['exp'])
|
||||
|
||||
if test == "regression_polynomial":
|
||||
return an.regression(ranges, data, ['ply'])
|
||||
|
||||
if test == "regression_sigmoidal":
|
||||
return an.regression(ranges, data, ['sig'])
|
||||
|
||||
def matchloop(client, competition, data, tests, exec_threads):
|
||||
|
||||
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
|
||||
|
||||
class AutoVivification(dict):
|
||||
def __getitem__(self, item):
|
||||
try:
|
||||
return dict.__getitem__(self, item)
|
||||
except KeyError:
|
||||
value = self[item] = type(self)()
|
||||
return value
|
||||
|
||||
return_vector = {}
|
||||
|
||||
team_filtered = []
|
||||
variable_filtered = []
|
||||
variable_data = []
|
||||
test_filtered = []
|
||||
result_filtered = []
|
||||
return_vector = AutoVivification()
|
||||
|
||||
for team in data:
|
||||
|
||||
for variable in data[team]:
|
||||
|
||||
if variable in tests:
|
||||
|
||||
for test in tests[variable]:
|
||||
|
||||
team_filtered.append(team)
|
||||
variable_filtered.append(variable)
|
||||
variable_data.append((data[team][variable], test))
|
||||
test_filtered.append(test)
|
||||
|
||||
result_filtered = exec_threads.map(simplestats, variable_data)
|
||||
i = 0
|
||||
|
||||
result_filtered = list(result_filtered)
|
||||
|
||||
for result in result_filtered:
|
||||
|
||||
filtered = test_filtered[i]
|
||||
|
||||
try:
|
||||
short = short_mapping[filtered]
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
|
||||
except KeyError: # not in mapping
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
|
||||
i += 1
|
||||
|
||||
return return_vector
|
||||
|
||||
def metricloop(tbakey, client, competition, timestamp, metrics): # listener based metrics update
|
||||
|
||||
elo_N = metrics["elo"]["N"]
|
||||
elo_K = metrics["elo"]["K"]
|
||||
|
||||
matches = pull_new_tba_matches(tbakey, competition, timestamp)
|
||||
|
||||
red = {}
|
||||
blu = {}
|
||||
|
||||
for match in matches:
|
||||
|
||||
red = load_metric(client, competition, match, "red", metrics)
|
||||
blu = load_metric(client, competition, match, "blue", metrics)
|
||||
|
||||
elo_red_total = 0
|
||||
elo_blu_total = 0
|
||||
|
||||
gl2_red_score_total = 0
|
||||
gl2_blu_score_total = 0
|
||||
|
||||
gl2_red_rd_total = 0
|
||||
gl2_blu_rd_total = 0
|
||||
|
||||
gl2_red_vol_total = 0
|
||||
gl2_blu_vol_total = 0
|
||||
|
||||
for team in red:
|
||||
|
||||
elo_red_total += red[team]["elo"]["score"]
|
||||
|
||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
elo_blu_total += blu[team]["elo"]["score"]
|
||||
|
||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||
|
||||
red_elo = {"score": elo_red_total / len(red)}
|
||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
||||
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
||||
|
||||
|
||||
if match["winner"] == "red":
|
||||
|
||||
observations = {"red": 1, "blu": 0}
|
||||
|
||||
elif match["winner"] == "blue":
|
||||
|
||||
observations = {"red": 0, "blu": 1}
|
||||
|
||||
else:
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
|
||||
for team in red:
|
||||
|
||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
||||
|
||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
||||
|
||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||
|
||||
temp_vector = {}
|
||||
temp_vector.update(red)
|
||||
temp_vector.update(blu)
|
||||
|
||||
push_metric(client, competition, temp_vector)
|
||||
|
||||
def pitloop(client, competition, pit, tests):
|
||||
|
||||
return_vector = {}
|
||||
for team in pit:
|
||||
for variable in pit[team]:
|
||||
if variable in tests:
|
||||
if not variable in return_vector:
|
||||
return_vector[variable] = []
|
||||
return_vector[variable].append(pit[team][variable])
|
||||
|
||||
return return_vector
|
536
src/cli/superscript.py
Normal file
536
src/cli/superscript.py
Normal file
@ -0,0 +1,536 @@
|
||||
# Titan Robotics Team 2022: Superscript Script
|
||||
# Written by Arthur Lu, Jacob Levine, and Dev Singh
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.0.0:
|
||||
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
|
||||
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
|
||||
- linux daemon now sends stderr to errorlog.txt
|
||||
- added verbose option to linux superscript to allow for interactive output
|
||||
- moved pymongo import to superscript.py
|
||||
- added profile option to linux superscript to profile runtime of script
|
||||
0.9.3:
|
||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||
0.9.2:
|
||||
- removed unessasary imports from data
|
||||
- minor changes to interface
|
||||
0.9.1:
|
||||
- fixed bugs in configuration item loading exception handling
|
||||
0.9.0:
|
||||
- moved printing and logging related functions to interface.py (changelog will stay in this file)
|
||||
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
|
||||
- added local variables for config location
|
||||
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
|
||||
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
|
||||
0.8.6:
|
||||
- added proper main function
|
||||
0.8.5:
|
||||
- added more gradeful KeyboardInterrupt exiting
|
||||
- redirected stderr to errorlog.txt
|
||||
0.8.4:
|
||||
- added better error message for missing config.json
|
||||
- added automatic config.json creation
|
||||
- added splash text with version and system info
|
||||
0.8.3:
|
||||
- updated matchloop with new regression format (requires tra_analysis 3.x)
|
||||
0.8.2:
|
||||
- readded while true to main function
|
||||
- added more thread config options
|
||||
0.8.1:
|
||||
- optimized matchloop further by bypassing GIL
|
||||
0.8.0:
|
||||
- added multithreading to matchloop
|
||||
- tweaked user log
|
||||
0.7.0:
|
||||
- finished implementing main function
|
||||
0.6.2:
|
||||
- integrated get_team_rankings.py as get_team_metrics() function
|
||||
- integrated visualize_pit.py as graph_pit_histogram() function
|
||||
0.6.1:
|
||||
- bug fixes with analysis.Metric() calls
|
||||
- modified metric functions to use config.json defined default values
|
||||
0.6.0:
|
||||
- removed main function
|
||||
- changed load_config function
|
||||
- added save_config function
|
||||
- added load_match function
|
||||
- renamed simpleloop to matchloop
|
||||
- moved simplestats function inside matchloop
|
||||
- renamed load_metrics to load_metric
|
||||
- renamed metricsloop to metricloop
|
||||
- split push to database functions amon push_match, push_metric, push_pit
|
||||
- moved
|
||||
0.5.2:
|
||||
- made changes due to refactoring of analysis
|
||||
0.5.1:
|
||||
- text fixes
|
||||
- removed matplotlib requirement
|
||||
0.5.0:
|
||||
- improved user interface
|
||||
0.4.2:
|
||||
- removed unessasary code
|
||||
0.4.1:
|
||||
- fixed bug where X range for regression was determined before sanitization
|
||||
- better sanitized data
|
||||
0.4.0:
|
||||
- fixed spelling issue in __changelog__
|
||||
- addressed nan bug in regression
|
||||
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
|
||||
- fixed errors in metrics computing
|
||||
0.3.0:
|
||||
- added analysis to pit data
|
||||
0.2.1:
|
||||
- minor stability patches
|
||||
- implemented db syncing for timestamps
|
||||
- fixed bugs
|
||||
0.2.0:
|
||||
- finalized testing and small fixes
|
||||
0.1.4:
|
||||
- finished metrics implement, trueskill is bugged
|
||||
0.1.3:
|
||||
- working
|
||||
0.1.2:
|
||||
- started implement of metrics
|
||||
0.1.1:
|
||||
- cleaned up imports
|
||||
0.1.0:
|
||||
- tested working, can push to database
|
||||
0.0.9:
|
||||
- tested working
|
||||
- prints out stats for the time being, will push to database later
|
||||
0.0.8:
|
||||
- added data import
|
||||
- removed tba import
|
||||
- finished main method
|
||||
0.0.7:
|
||||
- added load_config
|
||||
- optimized simpleloop for readibility
|
||||
- added __all__ entries
|
||||
- added simplestats engine
|
||||
- pending testing
|
||||
0.0.6:
|
||||
- fixes
|
||||
0.0.5:
|
||||
- imported pickle
|
||||
- created custom database object
|
||||
0.0.4:
|
||||
- fixed simpleloop to actually return a vector
|
||||
0.0.3:
|
||||
- added metricsloop which is unfinished
|
||||
0.0.2:
|
||||
- added simpleloop which is untested until data is provided
|
||||
0.0.1:
|
||||
- created script
|
||||
- added analysis, numba, numpy imports
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Arthur Lu <learthurgo@gmail.com>",
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"load_config",
|
||||
"save_config",
|
||||
]
|
||||
|
||||
# imports:
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import math
|
||||
from multiprocessing import Pool, freeze_support
|
||||
import os
|
||||
import pymongo
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import warnings
|
||||
import websockets
|
||||
|
||||
from interface import splash, log, ERR, INF, stdout, stderr
|
||||
from data import get_previous_time, set_current_time, load_match, push_match, load_pit, push_pit
|
||||
from processing import matchloop, metricloop, pitloop
|
||||
|
||||
config_path = "config.json"
|
||||
sample_json = """{
|
||||
"max-threads": 0.5,
|
||||
"team": "",
|
||||
"competition": "2020ilch",
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":""
|
||||
},
|
||||
"statistics":{
|
||||
"match":{
|
||||
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
|
||||
|
||||
},
|
||||
"metric":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}"""
|
||||
|
||||
def main(send, verbose = False, profile = False):
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
sys.stderr = open("errorlog.txt", "w")
|
||||
loop_exit_code = 0
|
||||
loop_stored_exception = None
|
||||
|
||||
if verbose:
|
||||
splash(__version__)
|
||||
|
||||
while True:
|
||||
|
||||
try:
|
||||
|
||||
loop_start = time.time()
|
||||
|
||||
current_time = time.time()
|
||||
send(stdout, INF, "current time: " + str(current_time))
|
||||
|
||||
send(stdout, INF, "loading config at <" + config_path + ">", code = 0)
|
||||
|
||||
config = {}
|
||||
if load_config(config_path, config) == 1:
|
||||
send(stderr, ERR, "could not find config at <" + config_path + ">, generating blank config and exiting", code = 100)
|
||||
sys.exit(1)
|
||||
|
||||
send(stdout, INF, "found and opened config at <" + config_path + ">", code = 0)
|
||||
|
||||
error_flag = False
|
||||
|
||||
try:
|
||||
competition = config["competition"]
|
||||
except:
|
||||
send(stderr, ERR, "could not find competition field in config", code = 101)
|
||||
error_flag = True
|
||||
try:
|
||||
match_tests = config["statistics"]["match"]
|
||||
except:
|
||||
send(stderr, ERR, "could not find match_tests field in config", code = 102)
|
||||
error_flag = True
|
||||
try:
|
||||
metrics_tests = config["statistics"]["metric"]
|
||||
except:
|
||||
send(stderr, ERR, "could not find metrics_tests field in config", code = 103)
|
||||
error_flag = True
|
||||
try:
|
||||
pit_tests = config["statistics"]["pit"]
|
||||
except:
|
||||
send(stderr, ERR, "could not find pit_tests field in config", code = 104)
|
||||
error_flag = True
|
||||
|
||||
if error_flag:
|
||||
sys.exit(1)
|
||||
error_flag = False
|
||||
|
||||
if competition == None or competition == "":
|
||||
send(stderr, ERR, "competition field in config must not be empty", code = 105)
|
||||
error_flag = True
|
||||
if match_tests == None:
|
||||
send(stderr, ERR, "match_tests field in config must not be empty", code = 106)
|
||||
error_flag = True
|
||||
if metrics_tests == None:
|
||||
send(stderr, ERR, "metrics_tests field in config must not be empty", code = 107)
|
||||
error_flag = True
|
||||
if pit_tests == None:
|
||||
send(stderr, ERR, "pit_tests field in config must not be empty", code = 108)
|
||||
error_flag = True
|
||||
|
||||
if error_flag:
|
||||
sys.exit(1)
|
||||
|
||||
send(stdout, INF, "found and loaded competition, match_tests, metrics_tests, pit_tests from config")
|
||||
|
||||
sys_max_threads = os.cpu_count()
|
||||
try:
|
||||
cfg_max_threads = config["max-threads"]
|
||||
except:
|
||||
send(stderr, ERR, "max-threads field in config must not be empty, refer to documentation for configuration options", code = 109)
|
||||
sys.exit(1)
|
||||
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
send(stderr, ERR, "max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, code = 110)
|
||||
sys.exit(1)
|
||||
|
||||
send(stdout, INF, "found and loaded max-threads from config")
|
||||
send(stdout, INF, "attempting to start " + str(alloc_processes) + " threads")
|
||||
try:
|
||||
exec_threads = Pool(processes = alloc_processes)
|
||||
except Exception as e:
|
||||
send(stderr, ERR, "unable to start threads", code = 200)
|
||||
send(stderr, INF, e)
|
||||
sys.exit(1)
|
||||
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
|
||||
|
||||
exit_flag = False
|
||||
|
||||
try:
|
||||
apikey = config["key"]["database"]
|
||||
except:
|
||||
send(stderr, ERR, "database key field in config must be present", code = 111)
|
||||
exit_flag = True
|
||||
try:
|
||||
tbakey = config["key"]["tba"]
|
||||
except:
|
||||
send(stderr, ERR, "tba key field in config must be present", code = 112)
|
||||
exit_flag = True
|
||||
|
||||
if apikey == None or apikey == "":
|
||||
send(stderr, ERR, "database key field in config must not be empty, please populate the database key")
|
||||
exit_flag = True
|
||||
if tbakey == None or tbakey == "":
|
||||
send(stderr, ERR, "tba key field in config must not be empty, please populate the tba key")
|
||||
exit_flag = True
|
||||
|
||||
if exit_flag:
|
||||
sys.exit(1)
|
||||
|
||||
send(stdout, INF, "found and loaded database and tba keys")
|
||||
|
||||
client = pymongo.MongoClient(apikey)
|
||||
|
||||
previous_time = get_previous_time(client)
|
||||
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "loading match data")
|
||||
match_data = load_match(client, competition)
|
||||
send(stdout, INF, "finished loading match data in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "performing analysis on match data")
|
||||
results = matchloop(client, competition, match_data, match_tests, exec_threads)
|
||||
send(stdout, INF, "finished match analysis in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "uploading match results to database")
|
||||
push_match(client, competition, results)
|
||||
send(stdout, INF, "finished uploading match results in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "performing analysis on team metrics")
|
||||
results = metricloop(tbakey, client, competition, current_time, metrics_tests)
|
||||
send(stdout, INF, "finished metric analysis and pushed to database in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "loading pit data")
|
||||
pit_data = load_pit(client, competition)
|
||||
send(stdout, INF, "finished loading pit data in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "performing analysis on pit data")
|
||||
results = pitloop(client, competition, pit_data, pit_tests)
|
||||
send(stdout, INF, "finished pit analysis in " + str(time.time() - start) + " seconds")
|
||||
|
||||
start = time.time()
|
||||
send(stdout, INF, "uploading pit results to database")
|
||||
push_pit(client, competition, results)
|
||||
send(stdout, INF, "finished uploading pit results in " + str(time.time() - start) + " seconds")
|
||||
|
||||
client.close()
|
||||
|
||||
set_current_time(client, current_time)
|
||||
send(stdout, INF, "finished all tests in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
|
||||
if "exec_threads" in locals():
|
||||
exec_threads.terminate()
|
||||
exec_threads.join()
|
||||
exec_threads.close()
|
||||
send(stdout, INF, "terminated threads, exiting")
|
||||
loop_stored_exception = sys.exc_info()
|
||||
loop_exit_code = 0
|
||||
break
|
||||
except Exception as e:
|
||||
send(stderr, ERR, "encountered an exception while running")
|
||||
print(e, file = stderr)
|
||||
loop_exit_code = 1
|
||||
break
|
||||
|
||||
if profile:
|
||||
return
|
||||
|
||||
sys.exit(loop_exit_code)
|
||||
|
||||
def load_config(path, config_vector):
|
||||
try:
|
||||
f = open(path, "r")
|
||||
config_vector.update(json.load(f))
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
f = open(path, "w")
|
||||
f.write(sample_json)
|
||||
f.close()
|
||||
return 1
|
||||
|
||||
def save_config(path, config_vector):
|
||||
try:
|
||||
f = open(path)
|
||||
json.dump(config_vector)
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
return 1
|
||||
|
||||
def start(pid_path, verbose = False, profile = False):
|
||||
|
||||
if profile:
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
pass
|
||||
|
||||
import cProfile, pstats, io
|
||||
profile = cProfile.Profile()
|
||||
profile.enable()
|
||||
main(send, profile = True)
|
||||
profile.disable()
|
||||
f = open("profile.txt", 'w+')
|
||||
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
||||
ps.print_stats()
|
||||
|
||||
elif verbose:
|
||||
|
||||
main(log, verbose = verbose)
|
||||
|
||||
else:
|
||||
|
||||
f = open('errorlog.txt', 'w+')
|
||||
with daemon.DaemonContext(
|
||||
working_directory=os.getcwd(),
|
||||
pidfile=pidfile.TimeoutPIDLockFile(pid_path),
|
||||
stderr=f
|
||||
):
|
||||
|
||||
async def handler(client, path):
|
||||
clients.append(client)
|
||||
while True:
|
||||
try:
|
||||
pong_waiter = await client.ping()
|
||||
await pong_waiter
|
||||
time.sleep(3)
|
||||
except Exception as e:
|
||||
clients.remove(client)
|
||||
break
|
||||
|
||||
async def send_one(client, data):
|
||||
await client.send(data)
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
message_clients = clients.copy()
|
||||
for client in message_clients:
|
||||
try:
|
||||
asyncio.run(send_one(client, message))
|
||||
except:
|
||||
pass
|
||||
|
||||
clients = []
|
||||
start_server = websockets.serve(handler, "0.0.0.0", 5678)
|
||||
|
||||
asyncio.get_event_loop().run_until_complete(start_server)
|
||||
threading.Thread(target = asyncio.get_event_loop().run_forever).start()
|
||||
|
||||
main(send)
|
||||
|
||||
def stop(pid_path):
|
||||
try:
|
||||
pf = open(pid_path, 'r')
|
||||
pid = int(pf.read().strip())
|
||||
pf.close()
|
||||
except IOError:
|
||||
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
|
||||
return
|
||||
|
||||
try:
|
||||
while True:
|
||||
os.kill(pid, SIGTERM)
|
||||
time.sleep(0.01)
|
||||
except OSError as err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if os.path.exists(pid_path):
|
||||
os.remove(pid_path)
|
||||
else:
|
||||
print(str(err))
|
||||
sys.exit(1)
|
||||
|
||||
def restart(pid_path):
|
||||
stop(pid_path)
|
||||
start(pid_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
freeze_support()
|
||||
start(None, verbose = True)
|
||||
|
||||
else:
|
||||
import daemon
|
||||
from daemon import pidfile
|
||||
from signal import SIGTERM
|
||||
pid_path = "tra-daemon.pid"
|
||||
if len(sys.argv) == 2:
|
||||
if 'start' == sys.argv[1]:
|
||||
start(pid_path)
|
||||
elif 'stop' == sys.argv[1]:
|
||||
stop(pid_path)
|
||||
elif 'restart' == sys.argv[1]:
|
||||
restart(pid_path)
|
||||
elif 'verbose' == sys.argv[1]:
|
||||
start(None, verbose = True)
|
||||
elif 'profile' == sys.argv[1]:
|
||||
start(None, profile=True)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile" % sys.argv[0])
|
||||
sys.exit(2)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile" % sys.argv[0])
|
||||
sys.exit(2)
|
@ -2,7 +2,6 @@
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(['superscript.py'],
|
||||
pathex=['/workspaces/tra-data-analysis/src'],
|
||||
binaries=[],
|
||||
@ -11,6 +10,8 @@ a = Analysis(['superscript.py'],
|
||||
"dnspython",
|
||||
"sklearn.utils._weight_vector",
|
||||
"requests",
|
||||
"websockets.legacy",
|
||||
"websockets.legacy.server",
|
||||
],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
@ -5,8 +5,6 @@ tra-analysis
|
||||
|
||||
dnspython
|
||||
pyinstaller
|
||||
requests
|
||||
pymongo
|
||||
|
||||
numpy
|
||||
scipy
|
||||
@ -15,4 +13,7 @@ six
|
||||
pyparsing
|
||||
pandas
|
||||
|
||||
kivy==2.0.0rc2
|
||||
kivy==2.0.0rc2
|
||||
|
||||
websockets
|
||||
python-daemon
|
Loading…
Reference in New Issue
Block a user