11 Commits
gui ... working

Author SHA1 Message Date
Arthur Lu
4836f48a34 pyinstaller onedir workaround for requests issue,
moved spec file to build folder


Former-commit-id: 4427eba5c993222cd2ebfdfc76a4685e25e18b0c
2022-02-24 02:56:08 +00:00
Arthur Lu
9a1a45f1c9 removed cert verification for requests library
Former-commit-id: 4914b98a21478a10be7d2f737dd6d5669b4c5681
2022-02-20 23:38:18 +00:00
Arthur Lu
d7ed695ad1 fixed build script,
added binutils to docker container for build requirements,
added required scipy modules to superscript.spec


Former-commit-id: 60a6fc1106dc90514bd1270138ab9166efa29290
2022-02-20 23:16:02 +00:00
Arthur Lu
21d92e65b2 add binary paths to devcontainer.json
Former-commit-id: a323f3161d6ea326b0215d5ed5ebbadae0f38597
2022-02-20 22:53:11 +00:00
Arthur Lu
0cace3cec3 improved exit code reporting
Former-commit-id: f36f45bc6a2773b8a821d0e046ce1b47fc4ced0f
2022-02-19 23:29:58 +00:00
Arthur Lu
80b63269df readded zmq messaging
Former-commit-id: e38653c4dc4ffe6aada19cac2251c3ec28bfbc2b
2022-02-19 22:58:58 +00:00
Arthur Lu
56447603e1 implemented logger,
dropped zmq because of constant inconsistency,
daemon process now outputs to file,
deleted client.py


Former-commit-id: d3251e5f6404d0dce42da49e5d577651e718e168
2022-02-19 08:52:49 +00:00
Arthur Lu
2130182212 fixed pull APIError calls (removed endpoint arg)
Former-commit-id: 5090eefdbd5b13bfda285d5480e79896dd3610a5
2022-02-19 06:50:34 +00:00
Arthur Lu
b1eff19ea4 removec __all__ property from superscript
Former-commit-id: 67c4684b5943467cc07b8cc638dc438ad0ee3e54
2022-02-19 06:46:39 +00:00
Arthur Lu
b43836899d moved config functions into Configuration class,
simplified exception class by removing error codes,
removed exec_threads from module parameters


Former-commit-id: 545ef765653970e1cdebac692eccd227effb2508
2022-02-19 06:19:13 +00:00
Arthur Lu
524a0a211d removed gui (last commit tagged "gui"),
removed print statement in pit module


Former-commit-id: 4978aee142eaf9431913b44eabfc0dfb79c7b600
2022-02-09 05:36:19 +00:00
24 changed files with 663 additions and 1444 deletions

View File

@@ -1,6 +1,6 @@
FROM python:slim FROM python:slim
WORKDIR / WORKDIR /
RUN apt-get -y update; apt-get -y upgrade RUN apt-get -y update; apt-get -y upgrade
RUN apt-get -y install git RUN apt-get -y install git binutils
COPY requirements.txt . COPY requirements.txt .
RUN pip install -r requirements.txt RUN pip install -r requirements.txt

View File

@@ -5,11 +5,11 @@
}, },
"settings": { "settings": {
"terminal.integrated.shell.linux": "/bin/bash", "terminal.integrated.shell.linux": "/bin/bash",
"python.pythonPath": "", "python.pythonPath": "/usr/local/bin/python",
"python.linting.enabled": true, "python.linting.enabled": true,
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.pylintPath": "", "python.linting.pylintPath": "/usr/local/bin/pylint",
"python.testing.pytestPath": "", "python.testing.pytestPath": "/usr/local/bin/pytest",
"editor.tabSize": 4, "editor.tabSize": 4,
"editor.insertSpaces": false "editor.insertSpaces": false
}, },

3
.gitignore vendored
View File

@@ -15,5 +15,4 @@
**/*.log **/*.log
**/errorlog.txt **/errorlog.txt
/dist/superscript.* /dist/*
/dist/superscript

View File

@@ -1,4 +1,4 @@
set pathtospec="../src/cli/superscript.spec" set pathtospec="../src/superscript.spec"
set pathtodist="../dist/" set pathtodist="../dist/"
set pathtowork="temp/" set pathtowork="temp/"

View File

@@ -1,4 +1,4 @@
pathtospec="../src/cli/superscript.spec" pathtospec="superscript.spec"
pathtodist="../dist/" pathtodist="../dist/"
pathtowork="temp/" pathtowork="temp/"

50
build/superscript.spec Normal file
View File

@@ -0,0 +1,50 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['../src/superscript.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=['dnspython', 'sklearn.utils._weight_vector', 'sklearn.utils._typedefs', 'sklearn.neighbors._partition_nodes', 'requests'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=['matplotlib'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(
a.pure,
a.zipped_data,
cipher=block_cipher
)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='superscript'
)

View File

@@ -1,244 +0,0 @@
import math
import json
from multiprocessing import Pool
import os
from cerberus import Validator
from exceptions import ConfigurationError
from data import set_database_config, get_database_config
from interface import stderr, stdout, INF, ERR
config_path = "config.json"
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def parse_config_persistent(send, config):
v = Validator(load_validation_schema(), allow_unknown = True)
isValidated = v.validate(config)
if not isValidated:
raise ConfigurationError(v.errors, 101)
apikey = config["persistent"]["key"]["database"]
tbakey = config["persistent"]["key"]["tba"]
preference = config["persistent"]["config-preference"]
sync = config["persistent"]["synchronize-config"]
return apikey, tbakey, preference, sync
def parse_config_variable(send, config):
sys_max_threads = os.cpu_count()
try:
cfg_max_threads = config["variable"]["max-threads"]
except:
raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109)
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
alloc_processes = sys_max_threads + cfg_max_threads
elif cfg_max_threads > 0 and cfg_max_threads < 1:
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
alloc_processes = cfg_max_threads
elif cfg_max_threads == 0:
alloc_processes = sys_max_threads
else:
raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110)
try:
exec_threads = Pool(processes = alloc_processes)
except Exception as e:
send(stderr, INF, e)
raise ConfigurationError("unable to start threads", 200)
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
try:
modules = config["variable"]["modules"]
except:
raise ConfigurationError("variable/modules field is invalid or missing", 102)
if modules == None:
raise ConfigurationError("variable/modules field is empty", 106)
send(stdout, INF, "found and loaded competition, match, metrics, pit from config")
return exec_threads, modules
def resolve_config_conflicts(send, client, config, preference, sync):
if sync:
if preference == "local" or preference == "client":
send(stdout, INF, "config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
if remote_config != config["variable"]:
set_database_config(client, config["variable"])
send(stdout, INF, "database config was different and was updated")
return config
elif preference == "remote" or preference == "database":
send(stdout, INF, "config-preference set to remote/database, loading remote config information")
remote_config= get_database_config(client)
if remote_config != config["variable"]:
config["variable"] = remote_config
if save_config(config_path, config):
raise ConfigurationError("local config was different but could not be updated", 121)
send(stdout, INF, "local config was different and was updated")
return config
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
else:
if preference == "local" or preference == "client":
send(stdout, INF, "config-preference set to local/client, loading local config information")
return config
elif preference == "remote" or preference == "database":
send(stdout, INF, "config-preference set to remote/database, loading database config information")
config["variable"] = get_database_config(client)
return config
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1
def load_validation_schema():
try:
with open("validation-schema.json", "r") as f:
return json.load(f)
except:
raise FileNotFoundError("Validation schema not found at validation-schema.json")
def save_config(path, config_vector):
f = open(path, "w+")
json.dump(config_vector, f, ensure_ascii=False, indent=4)
f.close()
return 0

View File

@@ -1,11 +0,0 @@
class APIError(Exception):
code = None
def __init__(self, str, endpoint):
super().__init__(str)
self.endpoint = endpoint
class ConfigurationError (Exception):
code = None
def __init__(self, str, code):
super().__init__(str)
self.code = code

View File

@@ -1,44 +0,0 @@
import sys
import time
from os import system, name
import platform
empty_delim = " "
hard_divided_delim = "|"
soft_divided_delim = "|"
l_brack = "["
r_brack = "]"
ERR = "[ERR]"
INF = "[INF]"
stdout = sys.stdout
stderr = sys.stderr
def log(target, level, message, code = 0):
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
print(message, file = target)
def clear():
if name == "nt":
system("cls")
else:
system("clear")
def splash(version):
def hrule():
print("#"+38*"-"+"#")
def box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
print(temp)
hrule()
box(" superscript version: " + version)
box(" os: " + platform.system())
box(" python: " + platform.python_version())
hrule()

View File

@@ -1,39 +0,0 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['superscript.py'],
pathex=['/workspaces/tra-data-analysis/src'],
binaries=[],
datas=[],
hiddenimports=[
"dnspython",
"sklearn.utils._weight_vector",
"requests",
],
hookspath=[],
runtime_hooks=[],
excludes=[
"matplotlib",
"pandas"
],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[('W ignore', None, 'OPTION')],
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@@ -1,27 +0,0 @@
{
"persistent": {
"type": "dict",
"require_all": true,
"schema": {
"key": {
"type": "dict",
"require_all":true,
"schema": {
"database": {"type":"string"},
"tba": {"type": "string"},
"tra": {
"type": "dict",
"require_all": true,
"schema": {
"CLIENT_ID": {"type": "string"},
"CLIENT_SECRET": {"type": "string"},
"url": {"type": "string"}
}
}
}
},
"config-preference": {"type": "string", "required": true},
"synchronize-config": {"type": "boolean", "required": true}
}
}
}

251
src/config.py Normal file
View File

@@ -0,0 +1,251 @@
import json
from exceptions import ConfigurationError
from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration:
path = None
config = {}
_sample_config = {
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":False
},
"variable":{
"event-delay":False,
"loop-delay":0,
"competition": "2020ilch",
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":True,
"low-balls":True,
"high-balls":True,
"wheel-success":True,
"strategic-focus":True,
"climb-mechanism":True,
"attitude":True
}
}
}
}
}
_validation_schema = {
"persistent": {
"type": "dict",
"required": True,
"require_all": True,
"schema": {
"key": {
"type": "dict",
"require_all":True,
"schema": {
"database": {"type":"string"},
"tba": {"type": "string"},
"tra": {
"type": "dict",
"require_all": True,
"schema": {
"CLIENT_ID": {"type": "string"},
"CLIENT_SECRET": {"type": "string"},
"url": {"type": "string"}
}
}
}
},
"config-preference": {"type": "string", "required": True},
"synchronize-config": {"type": "boolean", "required": True}
}
}
}
def __init__(self, path):
self.path = path
self.load_config()
self.validate_config()
def load_config(self):
try:
f = open(self.path, "r")
self.config.update(json.load(f))
f.close()
except:
self.config = self._sample_config
self.save_config()
f.close()
raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path")
def save_config(self):
f = open(self.path, "w+")
json.dump(self.config, f, ensure_ascii=False, indent=4)
f.close()
def validate_config(self):
v = Validator(self._validation_schema, allow_unknown = True)
isValidated = v.validate(self.config)
if not isValidated:
raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent":
return self.config["persistent"]
elif name == "key":
return self.config["persistent"]["key"]
elif name == "database":
# soon to be deprecated
return self.config["persistent"]["key"]["database"]
elif name == "tba":
return self.config["persistent"]["key"]["tba"]
elif name == "tra":
return self.config["persistent"]["key"]["tra"]
elif name == "priority":
return self.config["persistent"]["config-preference"]
elif name == "sync":
return self.config["persistent"]["synchronize-config"]
elif name == "variable":
return self.config["variable"]
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
return None
def __getitem__(self, key):
return self.config[key]
def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme
sync = self.sync
priority = self.priority
if sync:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"])
logger.info("database config was different and was updated")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
self.config["variable"] = remote_config
self.save_config()
# change variable to match remote
logger.info("local config was different and was updated")
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
else:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client)
# change variable to match remote without updating local version
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

View File

@@ -4,7 +4,7 @@ import pandas as pd
def pull_new_tba_matches(apikey, competition, cutoff): def pull_new_tba_matches(apikey, competition, cutoff):
api_key= apikey api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}, verify=False) x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
out = [] out = []
for i in x.json(): for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm": if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":

141
src/dep.py Normal file
View File

@@ -0,0 +1,141 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

7
src/exceptions.py Normal file
View File

@@ -0,0 +1,7 @@
class APIError(Exception):
def __init__(self, str):
super().__init__(str)
class ConfigurationError (Exception):
def __init__(self, str):
super().__init__(str)

View File

@@ -1,129 +0,0 @@
import requests
import pymongo
import pandas as pd
import time
def pull_new_tba_matches(apikey, competition, cutoff):
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key})
out = []
for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out
def get_team_match_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": team_num}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_pit_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"]
def get_team_metrics_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num})
def get_match_data_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i))
except:
pass
return out
def get_pit_data_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(apikey, competition, int(i))
except:
pass
return out
def get_pit_variable_data(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(apikey, competition):
temp = get_pit_variable_data(apikey, competition)
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
client = pymongo.MongoClient(apikey)
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
client = pymongo.MongoClient(apikey)
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
client = pymongo.MongoClient(apikey)
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(apikey, flag):
client = pymongo.MongoClient(apikey)
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(apikey, flag, data):
client = pymongo.MongoClient(apikey)
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def unkeyify_2l(layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out

View File

@@ -1,151 +0,0 @@
<Launch>:
orientation: "vertical"
NavigationLayout:
ScreenManager:
id: screen_manager
HomeScreen:
name: "Home"
BoxLayout:
orientation: "vertical"
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
GridLayout:
cols: 1
padding: 15, 15
spacing: 20, 20
MDTextFieldRect:
hint_text: "Console Log"
# size_hint: .8, None
# align: 'center'
# Widget:
SettingsScreen:
name: "Settings"
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
Widget:
InfoScreen:
name: "Info"
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
# GridLayout:
# cols: 2
# padding: 15, 15
# spacing: 20, 20
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "DB Key:"
halign: 'center'
MDTextField:
hint_text: "placeholder"
pos_hint: {"center_y": .5}
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "TBA Key:"
halign: 'center'
MDTextField:
hint_text: "placeholder"
pos_hint: {"center_y": .5}
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "CPU Use:"
halign: 'center'
MDLabel:
text: "placeholder"
halign: 'center'
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "Network:"
halign: 'center'
MDLabel:
text: "placeholder"
halign: 'center'
Widget:
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "Progress"
halign: 'center'
MDProgressBar:
id: progress
value: 50
StatsScreen:
name: "Stats"
MDCheckbox:
size_hint: None, None
size: "48dp", "48dp"
pos_hint: {'center_x': .5, 'center_y': .5}
on_active: Screen.test()
#Navigation Drawer -------------------------
MDNavigationDrawer:
id: nav_drawer
BoxLayout:
orientation: "vertical"
padding: "8dp"
spacing: "8dp"
MDLabel:
text: "Titan Scouting"
font_style: "Button"
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "Data Analysis"
font_style: "Caption"
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineAvatarListItem:
text: "Home"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "left"
screen_manager.current = "Home"
IconLeftWidget:
icon: "home"
OneLineAvatarListItem:
text: "Settings"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Settings"
IconLeftWidget:
icon: "cog"
OneLineAvatarListItem:
text: "Info"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Info"
IconLeftWidget:
icon: "cog"
OneLineAvatarListItem:
text: "Stats"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Stats"
IconLeftWidget:
icon: "cog"

View File

@@ -1,58 +0,0 @@
from kivy.lang import Builder
from kivymd.uix.screen import Screen
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.base import runTouchApp
from kivymd.uix.menu import MDDropdownMenu, MDMenuItem
from kivymd.app import MDApp
# import superscript as ss
# from tra_analysis import analysis as an
import data as d
from collections import defaultdict
import json
import math
import numpy as np
import os
from os import system, name
from pathlib import Path
from multiprocessing import Pool
import matplotlib.pyplot as plt
from concurrent.futures import ThreadPoolExecutor
import time
import warnings
# global exec_threads
# Screens
class HomeScreen(Screen):
pass
class SettingsScreen(Screen):
pass
class InfoScreen(Screen):
pass
class StatsScreen(Screen):
pass
class MyApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red"
return Builder.load_file("design.kv")
def test():
print("test")
if __name__ == "__main__":
MyApp().run()

View File

@@ -1,627 +0,0 @@
# Titan Robotics Team 2022: Superscript Script
# Written by Arthur Lu, Jacob Levine, and Dev Singh
# Notes:
# setup:
__version__ = "0.8.6"
# changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
0.8.6:
- added proper main function
0.8.5:
- added more gradeful KeyboardInterrupt exiting
- redirected stderr to errorlog.txt
0.8.4:
- added better error message for missing config.json
- added automatic config.json creation
- added splash text with version and system info
0.8.3:
- updated matchloop with new regression format (requires tra_analysis 3.x)
0.8.2:
- readded while true to main function
- added more thread config options
0.8.1:
- optimized matchloop further by bypassing GIL
0.8.0:
- added multithreading to matchloop
- tweaked user log
0.7.0:
- finished implementing main function
0.6.2:
- integrated get_team_rankings.py as get_team_metrics() function
- integrated visualize_pit.py as graph_pit_histogram() function
0.6.1:
- bug fixes with analysis.Metric() calls
- modified metric functions to use config.json defined default values
0.6.0:
- removed main function
- changed load_config function
- added save_config function
- added load_match function
- renamed simpleloop to matchloop
- moved simplestats function inside matchloop
- renamed load_metrics to load_metric
- renamed metricsloop to metricloop
- split push to database functions amon push_match, push_metric, push_pit
- moved
0.5.2:
- made changes due to refactoring of analysis
0.5.1:
- text fixes
- removed matplotlib requirement
0.5.0:
- improved user interface
0.4.2:
- removed unessasary code
0.4.1:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.4.0:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.3.0:
- added analysis to pit data
0.2.1:
- minor stability patches
- implemented db syncing for timestamps
- fixed bugs
0.2.0:
- finalized testing and small fixes
0.1.4:
- finished metrics implement, trueskill is bugged
0.1.3:
- working
0.1.2:
- started implement of metrics
0.1.1:
- cleaned up imports
0.1.0:
- tested working, can push to database
0.0.9:
- tested working
- prints out stats for the time being, will push to database later
0.0.8:
- added data import
- removed tba import
- finished main method
0.0.7:
- added load_config
- optimized simpleloop for readibility
- added __all__ entries
- added simplestats engine
- pending testing
0.0.6:
- fixes
0.0.5:
- imported pickle
- created custom database object
0.0.4:
- fixed simpleloop to actually return a vector
0.0.3:
- added metricsloop which is unfinished
0.0.2:
- added simpleloop which is untested until data is provided
0.0.1:
- created script
- added analysis, numba, numpy imports
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
"Jacob Levine <jlevine@imsa.edu>",
)
__all__ = [
"load_config",
"save_config",
"get_previous_time",
"load_match",
"matchloop",
"load_metric",
"metricloop",
"load_pit",
"pitloop",
"push_match",
"push_metric",
"push_pit",
]
# imports:
from tra_analysis import analysis as an
import data as d
from collections import defaultdict
import json
import math
import numpy as np
import os
from os import system, name
from pathlib import Path
from multiprocessing import Pool
import platform
import sys
import time
import warnings
global exec_threads
def main():
global exec_threads
sys.stderr = open("errorlog.txt", "w")
warnings.filterwarnings("ignore")
splash()
while (True):
try:
current_time = time.time()
print("[OK] time: " + str(current_time))
config = load_config("config.json")
competition = config["competition"]
match_tests = config["statistics"]["match"]
pit_tests = config["statistics"]["pit"]
metrics_tests = config["statistics"]["metric"]
print("[OK] configs loaded")
print("[OK] starting threads")
cfg_max_threads = config["max-threads"]
sys_max_threads = os.cpu_count()
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
alloc_processes = sys_max_threads + cfg_max_threads
elif cfg_max_threads > 0 and cfg_max_threads < 1:
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
alloc_processes = cfg_max_threads
elif cfg_max_threads == 0:
alloc_processes = sys_max_threads
else:
print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
exit()
exec_threads = Pool(processes = alloc_processes)
print("[OK] " + str(alloc_processes) + " threads started")
apikey = config["key"]["database"]
tbakey = config["key"]["tba"]
print("[OK] loaded keys")
previous_time = get_previous_time(apikey)
print("[OK] analysis backtimed to: " + str(previous_time))
print("[OK] loading data")
start = time.time()
match_data = load_match(apikey, competition)
pit_data = load_pit(apikey, competition)
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
print("[OK] running match stats")
start = time.time()
matchloop(apikey, competition, match_data, match_tests)
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
print("[OK] running team metrics")
start = time.time()
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
print("[OK] running pit analysis")
start = time.time()
pitloop(apikey, competition, pit_data, pit_tests)
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
set_current_time(apikey, current_time)
print("[OK] finished all tests, looping")
print_hrule()
except KeyboardInterrupt:
print("\n[OK] caught KeyboardInterrupt, killing processes")
exec_threads.terminate()
print("[OK] processes killed, exiting")
exit()
else:
pass
#clear()
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def print_hrule():
print("#"+38*"-"+"#")
def print_box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
print(temp)
def splash():
print_hrule()
print_box(" superscript version: " + __version__)
print_box(" os: " + platform.system())
print_box(" python: " + platform.python_version())
print_hrule()
def load_config(file):
config_vector = {}
try:
f = open(file)
except:
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
f = open(file, "w")
f.write(sample_json)
exit()
config_vector = json.load(f)
return config_vector
def save_config(file, config_vector):
with open(file) as f:
json.dump(config_vector, f)
def get_previous_time(apikey):
previous_time = d.get_analysis_flags(apikey, "latest_update")
if previous_time == None:
d.set_analysis_flags(apikey, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(apikey, current_time):
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
def load_match(apikey, competition):
return d.get_match_data_formatted(apikey, competition)
def simplestats(data_test):
data = np.array(data_test[0])
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
test = data_test[1]
if test == "basic_stats":
return an.basic_stats(data)
if test == "historical_analysis":
return an.histo_analysis([ranges, data])
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match]
global exec_threads
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
return_vector = {}
team_filtered = []
variable_filtered = []
variable_data = []
test_filtered = []
result_filtered = []
return_vector = AutoVivification()
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
team_filtered.append(team)
variable_filtered.append(variable)
variable_data.append((data[team][variable], test))
test_filtered.append(test)
result_filtered = exec_threads.map(simplestats, variable_data)
i = 0
result_filtered = list(result_filtered)
for result in result_filtered:
filtered = test_filtered[i]
try:
short = short_mapping[filtered]
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
except KeyError: # not in mapping
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
i += 1
push_match(apikey, competition, return_vector)
def load_metric(apikey, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = d.get_team_metrics_data(apikey, competition, team)
if d.get_team_metrics_data(apikey, competition, team) == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update
elo_N = metrics["elo"]["N"]
elo_K = metrics["elo"]["K"]
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
red = {}
blu = {}
for match in matches:
red = load_metric(apikey, competition, match, "red", metrics)
blu = load_metric(apikey, competition, match, "blue", metrics)
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
push_metric(apikey, competition, temp_vector)
def load_pit(apikey, competition):
return d.get_pit_data_formatted(apikey, competition)
def pitloop(apikey, competition, pit, tests):
return_vector = {}
for team in pit:
for variable in pit[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(pit[team][variable])
push_pit(apikey, competition, return_vector)
def push_match(apikey, competition, results):
for team in results:
d.push_team_tests_data(apikey, competition, team, results[team])
def push_metric(apikey, competition, metric):
for team in metric:
d.push_team_metrics_data(apikey, competition, team, metric[team])
def push_pit(apikey, competition, pit):
for variable in pit:
d.push_team_pit_data(apikey, competition, variable, pit[variable])
def get_team_metrics(apikey, tbakey, competition):
metrics = d.get_metrics_data_formatted(apikey, competition)
elo = {}
gl2 = {}
for team in metrics:
elo[team] = metrics[team]["metrics"]["elo"]["score"]
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
elo_ranked = []
for team in elo:
elo_ranked.append({"team": str(team), "elo": str(elo[team])})
gl2_ranked = []
for team in gl2:
gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])})
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
sample_json = """{
"max-threads": 0.5,
"team": "",
"competition": "2020ilch",
"key":{
"database":"",
"tba":""
},
"statistics":{
"match":{
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
},
"metric":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
},
"pit":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}"""
if __name__ == "__main__":
if sys.platform.startswith('win'):
multiprocessing.freeze_support()
main()

91
src/interface.py Normal file
View File

@@ -0,0 +1,91 @@
from logging import Logger as L
import datetime
import platform
import json
class Logger(L):
file = None
levels = {
0: "",
10:"[DEBUG] ",
20:"[INFO] ",
30:"[WARNING] ",
40:"[ERROR] ",
50:"[CRITICAL]",
}
targets = []
def __init__(self, verbose, profile, debug, file = None):
super().__init__("tra_logger")
self.file = file
if file != None:
self.targets.append(self._send_file)
if profile:
self.targets.append(self._send_null)
elif verbose:
self.targets.append(self._send_scli)
elif debug:
self.targets.append(self._send_scli)
else:
self.targets.append(self._send_null)
def _send_null(self, msg):
pass
def _send_scli(self, msg):
print(msg)
def _send_file(self, msg):
f = open(self.file, 'a')
f.write(msg + "\n")
f.close()
def get_time_formatted(self):
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z")
def log(self, level, msg):
for t in self.targets:
t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg)
def debug(self, msg):
self.log(10, msg)
def info(self, msg):
self.log(20, msg)
def warning(self, msg):
self.log(30, msg)
def error(self, msg):
self.log(40, msg)
def critical(self, msg):
self.log(50, msg)
def splash(self, version):
def hrule():
self.log(0, "#"+38*"-"+"#")
def box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
self.log(0, temp)
hrule()
box(" superscript version: " + version)
box(" os: " + platform.system())
box(" python: " + platform.python_version())
hrule()
def save_module_to_file(self, module, data, results):
f = open(module + ".log", "w")
json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4)
f.close()

View File

@@ -22,7 +22,7 @@ class Module(metaclass = abc.ABCMeta):
def validate_config(self, *args, **kwargs): def validate_config(self, *args, **kwargs):
raise NotImplementedError raise NotImplementedError
@abc.abstractmethod @abc.abstractmethod
def run(self, exec_threads, *args, **kwargs): def run(self, *args, **kwargs):
raise NotImplementedError raise NotImplementedError
class Match (Module): class Match (Module):
@@ -46,9 +46,9 @@ class Match (Module):
def validate_config(self): def validate_config(self):
return True, "" return True, ""
def run(self, exec_threads): def run(self):
self._load_data() self._load_data()
self._process_data(exec_threads) self._process_data()
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
@@ -85,7 +85,7 @@ class Match (Module):
if test == "regression_sigmoidal": if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig']) return an.regression(ranges, data, ['sig'])
def _process_data(self, exec_threads): def _process_data(self):
tests = self.config["tests"] tests = self.config["tests"]
data = self.data data = self.data
@@ -103,7 +103,6 @@ class Match (Module):
input_vector.append((team, variable, test, data[team][variable])) input_vector.append((team, variable, test, data[team][variable]))
self.data = input_vector self.data = input_vector
#self.results = list(exec_threads.map(self._simplestats, self.data))
self.results = [] self.results = []
for test_var_data in self.data: for test_var_data in self.data:
self.results.append(self._simplestats(test_var_data)) self.results.append(self._simplestats(test_var_data))
@@ -164,15 +163,15 @@ class Metric (Module):
def validate_config(self): def validate_config(self):
return True, "" return True, ""
def run(self, exec_threads): def run(self):
self._load_data() self._load_data()
self._process_data(exec_threads) self._process_data()
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp) self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
def _process_data(self, exec_threads): def _process_data(self):
elo_N = self.config["tests"]["elo"]["N"] elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"] elo_K = self.config["tests"]["elo"]["K"]
@@ -289,17 +288,16 @@ class Pit (Module):
def validate_config(self): def validate_config(self):
return True, "" return True, ""
def run(self, exec_threads): def run(self):
self._load_data() self._load_data()
self._process_data(exec_threads) self._process_data()
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition) self.data = d.load_pit(self.apikey, self.competition)
def _process_data(self, exec_threads): def _process_data(self):
tests = self.config["tests"] tests = self.config["tests"]
print(tests)
return_vector = {} return_vector = {}
for team in self.data: for team in self.data:
for variable in self.data[team]: for variable in self.data[team]:

View File

@@ -1,7 +1,6 @@
import requests import requests
import json
from exceptions import APIError from exceptions import APIError
from config import load_config from dep import load_config
url = "https://titanscouting.epochml.org" url = "https://titanscouting.epochml.org"
config_tra = {} config_tra = {}
@@ -19,7 +18,7 @@ def get_team_competition():
if json['success']: if json['success']:
return json['competition'] return json['competition']
else: else:
raise APIError(json, endpoint) raise APIError(json)
def get_team(): def get_team():
endpoint = '/api/fetchTeamCompetition' endpoint = '/api/fetchTeamCompetition'
@@ -32,7 +31,7 @@ def get_team():
if json['success']: if json['success']:
return json['team'] return json['team']
else: else:
raise APIError(json, endpoint) raise APIError(json)
def get_team_match_data(competition, team_num): def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData' endpoint = '/api/fetchAllTeamMatchData'
@@ -47,7 +46,7 @@ def get_team_match_data(competition, team_num):
if json['success']: if json['success']:
return json['data'][team_num] return json['data'][team_num]
else: else:
raise APIError(json, endpoint) raise APIError(json)
def get_teams_at_competition(competition): def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition' endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
@@ -61,4 +60,4 @@ def get_teams_at_competition(competition):
if json['success']: if json['success']:
return list(json['data'].keys()) return list(json['data'].keys())
else: else:
raise APIError(json, endpoint) raise APIError(json)

View File

@@ -23,6 +23,9 @@ __changelog__ = """changelog:
- config-preference option selects between prioritizing local config and prioritizing database config - config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one - synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else) - divided config options between persistent ones (keys), and variable ones (everything else)
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
- module.py contains classes, each one represents a single data analysis routine
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
0.9.3: 0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s) - improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions - passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
@@ -144,47 +147,29 @@ __author__ = (
"Jacob Levine <jlevine@imsa.edu>", "Jacob Levine <jlevine@imsa.edu>",
) )
__all__ = [
"load_config",
"save_config",
]
# imports: # imports:
import json import os, sys, time
from multiprocessing import freeze_support import pymongo # soon to be deprecated
import os
import pymongo
import sys
import time
import traceback import traceback
import warnings import warnings
import zmq from config import Configuration, ConfigurationError
import pull
from config import parse_config_persistent, parse_config_variable, resolve_config_conflicts, load_config, save_config, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches from data import get_previous_time, set_current_time, check_new_database_matches
from interface import splash, log, ERR, INF, stdout, stderr from interface import Logger
from module import Match, Metric, Pit from module import Match, Metric, Pit
import zmq
config_path = "config.json" config_path = "config.json"
def main(send, verbose = False, profile = False, debug = False): def main(logger, verbose, profile, debug, socket_send = None):
def close_all(): def close_all():
if "exec_threads" in locals():
exec_threads.terminate()
exec_threads.join()
exec_threads.close()
if "client" in locals(): if "client" in locals():
client.close() client.close()
if "f" in locals():
f.close()
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
exit_code = 0
if verbose: logger.splash(__version__)
splash(__version__)
modules = {"match": Match, "metric": Metric, "pit": Pit} modules = {"match": Match, "metric": Metric, "pit": Pit}
@@ -194,32 +179,33 @@ def main(send, verbose = False, profile = False, debug = False):
loop_start = time.time() loop_start = time.time()
send(stdout, INF, "current time: " + str(loop_start)) logger.info("current time: " + str(loop_start))
socket_send("current time: " + str(loop_start))
config = {} config = Configuration(config_path)
if load_config(config_path, config):
raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110)
send(stdout, INF, "found and loaded config at <" + config_path + ">") logger.info("found and loaded config at <" + config_path + ">")
socket_send("found and loaded config at <" + config_path + ">")
apikey, tbakey, preference, sync = parse_config_persistent(send, config) apikey, tbakey = config.database, config.tba
send(stdout, INF, "found and loaded database and tba keys") logger.info("found and loaded database and tba keys")
socket_send("found and loaded database and tba keys")
client = pymongo.MongoClient(apikey) client = pymongo.MongoClient(apikey)
send(stdout, INF, "established connection to database") logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client) previous_time = get_previous_time(client)
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
config = resolve_config_conflicts(send, client, config, preference, sync) logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
exec_threads, config_modules = parse_config_variable(send, config)
if 'competition' in config['variable']:
competition = config['variable']['competition']
else:
competition = pull.get_team_competition()
for m in config_modules: for m in config_modules:
if m in modules: if m in modules:
start = time.time() start = time.time()
@@ -227,71 +213,81 @@ def main(send, verbose = False, profile = False, debug = False):
valid = current_module.validate_config() valid = current_module.validate_config()
if not valid: if not valid:
continue continue
current_module.run(exec_threads) current_module.run()
send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds") logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug: if debug:
f = open(m + ".log", "w+") logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4)
f.close()
set_current_time(client, loop_start) set_current_time(client, loop_start)
close_all() close_all()
send(stdout, INF, "closed threads and database client") logger.info("closed threads and database client")
send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile: if profile:
exit_code = 0 return 0
break
if debug:
return 0
event_delay = config["variable"]["event-delay"] event_delay = config["variable"]["event-delay"]
if event_delay: if event_delay:
send(stdout, INF, "loop delayed until database returns new matches") logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False new_match = False
while not new_match: while not new_match:
time.sleep(1) time.sleep(1)
new_match = check_new_database_matches(client, competition) new_match = check_new_database_matches(client, competition)
send(stdout, INF, "database returned new matches") logger.info("database returned new matches")
socket_send("database returned new matches")
else: else:
loop_delay = float(config["variable"]["loop-delay"]) loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start) remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0: if remaining_time > 0:
send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds") logger.info("loop delayed by " + str(remaining_time) + " seconds")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time) time.sleep(remaining_time)
except KeyboardInterrupt: except KeyboardInterrupt:
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
close_all() close_all()
send(stdout, INF, "terminated threads, exiting") logger.info("detected KeyboardInterrupt, exiting")
break socket_send("detected KeyboardInterrupt, exiting")
return 0
except ConfigurationError as e: except ConfigurationError as e:
send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code) str_e = "".join(traceback.format_exception(e))
traceback.print_exc(file = stderr) logger.error("encountered a configuration error: " + str(e))
exit_code = 1 logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all() close_all()
break return 1
except Exception as e: except Exception as e:
send(stderr, ERR, "encountered an exception while running", code = 1) str_e = "".join(traceback.format_exception(e))
traceback.print_exc(file = stderr) logger.error("encountered an exception while running")
exit_code = 1 logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all() close_all()
break return 1
return exit_code
def start(pid_path, verbose = False, profile = False, debug = False): def start(pid_path, verbose, profile, debug):
if profile: if profile:
def send(target, level, message, code = 0): def send(msg):
pass pass
logger = Logger(verbose, profile, debug)
import cProfile, pstats, io import cProfile, pstats, io
profile = cProfile.Profile() profile = cProfile.Profile()
profile.enable() profile.enable()
exit_code = main(send, profile = True) exit_code = main(logger, verbose, profile, debug, socket_send = send)
profile.disable() profile.disable()
f = open("profile.txt", 'w+') f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime') ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
@@ -300,35 +296,53 @@ def start(pid_path, verbose = False, profile = False, debug = False):
elif verbose: elif verbose:
exit_code = main(log, verbose = verbose) def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code) sys.exit(exit_code)
elif debug: elif debug:
exit_code = main(log, verbose = True, profile = True, debug = debug) def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code) sys.exit(exit_code)
else: else:
f = open('errorlog.log', 'w+') logfile = "logfile.log"
f = open(logfile, 'w+')
f.close()
e = open('errorlog.log', 'w+')
with daemon.DaemonContext( with daemon.DaemonContext(
working_directory = os.getcwd(), working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path), pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = f stderr = e
): ):
context = zmq.Context() context = zmq.Context()
socket = context.socket(zmq.PUB) socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678") socket.bind("tcp://*:5678")
socket.send(b'status') socket.send(b'status')
def send(target, level, message, code = 0): def send(msg):
socket.send(bytes("status: " + message, 'utf-8')) socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
exit_code = main(send)
socket.close() socket.close()
f.close() f.close()
sys.exit(exit_code) sys.exit(exit_code)
def stop(pid_path): def stop(pid_path):
@@ -350,17 +364,16 @@ def stop(pid_path):
if os.path.exists(pid_path): if os.path.exists(pid_path):
os.remove(pid_path) os.remove(pid_path)
else: else:
traceback.print_exc(file = stderr) traceback.print_exc(file = sys.stderr)
sys.exit(1) sys.exit(1)
def restart(pid_path): def restart(pid_path):
stop(pid_path) stop(pid_path)
start(pid_path) start(pid_path, False, False, False)
if __name__ == "__main__": if __name__ == "__main__":
if sys.platform.startswith("win"): if sys.platform.startswith("win"):
freeze_support()
start(None, verbose = True) start(None, verbose = True)
else: else:
@@ -370,17 +383,17 @@ if __name__ == "__main__":
pid_path = "tra-daemon.pid" pid_path = "tra-daemon.pid"
if len(sys.argv) == 2: if len(sys.argv) == 2:
if 'start' == sys.argv[1]: if 'start' == sys.argv[1]:
start(pid_path) start(pid_path, False, False, False)
elif 'stop' == sys.argv[1]: elif 'stop' == sys.argv[1]:
stop(pid_path) stop(pid_path)
elif 'restart' == sys.argv[1]: elif 'restart' == sys.argv[1]:
restart(pid_path) restart(pid_path)
elif 'verbose' == sys.argv[1]: elif 'verbose' == sys.argv[1]:
start(None, verbose = True) start(None, True, False, False)
elif 'profile' == sys.argv[1]: elif 'profile' == sys.argv[1]:
start(None, profile=True) start(None, False, True, False)
elif 'debug' == sys.argv[1]: elif 'debug' == sys.argv[1]:
start(None, debug = True) start(None, False, False, True)
else: else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2) sys.exit(2)