48 Commits

Author SHA1 Message Date
Arthur Lu
93091b6bd2 appeased pylint in config.py attr lookup 2022-03-31 02:18:30 +00:00
Arthur Lu
0024a94f4e added file logging with default,
added basic progress bars for each module
2022-03-30 04:53:40 +00:00
Arthur Lu
5885224231 removed match printing,
CLI args use argparse

Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 23:09:37 +00:00
Arthur Lu
64ea7c227c removed commented code
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 22:23:24 +00:00
Arthur Lu
ddf6faeecf fixed metrics processing ordering,
added metrics logging
2022-03-29 21:15:24 +00:00
Arthur Lu
b4766d1b3e fixed Module template __init__ definition
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 16:49:38 +00:00
Arthur Lu
e04245952a merged data and pull functions into Client class,
removed pull.py dep.py,
modified existing code to work with new Client class
2022-03-29 05:48:39 +00:00
Arthur Lu
2ebaddb92c updated usage 2022-03-29 04:44:59 +00:00
Arthur Lu
8b09e155dc updated changelog 2022-03-29 04:42:26 +00:00
Arthur Lu
5ca474d158 finished removing socket functionality 2022-03-29 04:39:52 +00:00
Arthur Lu
e7a8a259fc finished removing daemon functionality 2022-03-29 04:35:01 +00:00
Arthur Lu
5553e3dddf fixed CLI options,
implemented better config attr search member,
fixed imports
2022-03-29 04:28:09 +00:00
Arthur Lu
0212e6b2ca pylint now uses tab indent 2022-03-29 04:15:47 +00:00
Arthur Lu
14f8901803 removed unnecessary imports 2022-03-28 23:22:42 +00:00
Arthur Lu
a5f9e55cf4 fixed build scripts 2022-03-28 23:15:13 +00:00
Arthur Lu
34f0b3f10c removed: daemonization,
socket messaging

added: CLI option to specify config file

Not working, requires data.py changes in competition branch
2022-03-28 22:42:04 +00:00
Arthur Lu
6b070c7b08 fixed merge changes 2022-03-15 05:31:51 +00:00
Arthur Lu
9279311664 Merge branch 'master' into superscript-v1 2022-03-15 05:27:11 +00:00
Dev Singh
de4d3d4967 Update README.md 2021-10-21 14:21:20 -05:00
Arthur Lu
d56411253c fixed badge url 2021-08-26 18:20:11 -07:00
Arthur Lu
c415225afe Update release badge 2021-08-26 18:11:25 -07:00
Arthur Lu
d684813ee0 Merge pull request #10 from titanscouting/automate-build
Automate build
2021-06-09 14:58:21 -07:00
Arthur Lu
26079f3180 fixed pathing for build-CLI.*
added temp directory to gitignore
2021-04-27 07:26:14 +00:00
Arthur Lu
99e722c400 removed ThreadPoolExecutor import 2021-04-25 06:05:33 +00:00
Arthur Lu
f5a0e0fe8c added sample build-cli workflow 2021-04-25 03:51:01 +00:00
Arthur Lu
28e423942f added .gitattributes 2021-04-15 19:41:10 +00:00
Arthur Lu
8977f8c277 added compiled binaries with no file endings
to gitignore
2021-04-13 04:05:46 +00:00
Arthur Lu
2b0f718aa5 removed compiled binaries
added compiled binaries in /dist/ to gitignore
2021-04-13 04:03:07 +00:00
Arthur Lu
30469a3211 removed matplotlib import
removed plotting pit analysis
fixed warning supression for win exe
superscript v 0.8.6
2021-04-12 15:13:54 -07:00
Arthur Lu
391d4e1996 created batch script for windows compilation 2021-04-12 14:39:00 -07:00
Arthur Lu
224f64e8b7 better fix for devcontainer.json 2021-04-12 06:30:21 +00:00
Arthur Lu
aa7d7ca927 quick patch for devcontainer.json 2021-04-12 06:27:50 +00:00
Arthur Lu
d10c16d483 superscript v 0.8.5 2021-04-10 06:08:18 +00:00
Arthur Lu
f211d00f2d superscript v 0.8.4 2021-04-09 23:45:16 +00:00
Arthur Lu
69c707689b superscript v 0.8.3 2021-04-03 20:47:45 +00:00
Arthur Lu
d2f9c802b3 built and verified threading fixes 2021-04-02 22:04:06 +00:00
Arthur Lu
99e28f5e83 fixed .gitignore
added build-CLI script
fixed threading in superscript
2021-04-02 21:58:35 +00:00
Arthur Lu
18dbc174bd deleted config.json
changed superscript config lookup to relative path
added additional requirements to requirements.txt
added build spec file for superscript
2021-04-02 21:35:05 +00:00
Arthur Lu
79689d69c8 fixed spelling in default config,
added config to git ignore
2021-04-02 01:28:25 +00:00
Dev Singh
80c3f1224b Merge pull request #3 from titanscouting/superscript-main
Merge initial changes
2021-04-01 13:40:29 -05:00
Dev Singh
960a1b3165 fix ut and file structure 2021-04-01 13:38:53 -05:00
Arthur Lu
89fcd366d3 Merge branch 'master' into superscript-main 2021-04-01 11:34:44 -07:00
Dev Singh
79cde44108 Create SECURITY.md 2021-04-01 13:11:38 -05:00
Dev Singh
2b896db9a9 Create MAINTAINERS 2021-04-01 13:11:22 -05:00
Dev Singh
483897c011 Merge pull request #1 from titanscouting/add-license-1
Create LICENSE
2021-04-01 13:11:03 -05:00
Dev Singh
9287d98fe2 Create LICENSE 2021-04-01 13:10:50 -05:00
Dev Singh
991751a340 Create CONTRIBUTING.md 2021-04-01 13:10:14 -05:00
Dev Singh
9d2476b5eb Create README.md 2021-04-01 13:09:18 -05:00
15 changed files with 367 additions and 608 deletions

View File

@@ -3,4 +3,4 @@ WORKDIR /
RUN apt-get -y update; apt-get -y upgrade RUN apt-get -y update; apt-get -y upgrade
RUN apt-get -y install git binutils RUN apt-get -y install git binutils
COPY requirements.txt . COPY requirements.txt .
RUN pip install -r requirements.txt RUN pip install -r requirements.txt

View File

@@ -9,9 +9,10 @@
"python.linting.enabled": true, "python.linting.enabled": true,
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.pylintPath": "/usr/local/bin/pylint", "python.linting.pylintPath": "/usr/local/bin/pylint",
"python.linting.pylintArgs": ["--indent-string", "\t"],
"python.testing.pytestPath": "/usr/local/bin/pytest", "python.testing.pytestPath": "/usr/local/bin/pytest",
"editor.tabSize": 4, "editor.tabSize": 4,
"editor.insertSpaces": false "editor.insertSpaces": false
}, },
"extensions": [ "extensions": [
"mhutchie.git-graph", "mhutchie.git-graph",

View File

@@ -6,8 +6,6 @@ pylint
pymongo pymongo
pyparsing pyparsing
pytest pytest
python-daemon
pyzmq
requests requests
scikit-learn scikit-learn
scipy scipy

View File

@@ -32,4 +32,4 @@ jobs:
repo_token: ${{ secrets.GITHUB_TOKEN }} repo_token: ${{ secrets.GITHUB_TOKEN }}
file: superscript file: superscript
asset_name: superscript asset_name: superscript
tag: ${{ github.ref }} tag: ${{ github.ref }}

2
.gitignore vendored
View File

@@ -15,4 +15,4 @@
**/*.log **/*.log
**/errorlog.txt **/errorlog.txt
/dist/* /dist/*

View File

@@ -43,4 +43,4 @@ don't worry, you may have just not configured the application correctly, but wou
# Build Statuses # Build Statuses
Coming soon! Coming soon!

View File

@@ -1,4 +1,4 @@
set pathtospec="../src/superscript.spec" set pathtospec="superscript.spec"
set pathtodist="../dist/" set pathtodist="../dist/"
set pathtowork="temp/" set pathtowork="temp/"

View File

@@ -11,40 +11,25 @@ a = Analysis(
hookspath=[], hookspath=[],
hooksconfig={}, hooksconfig={},
runtime_hooks=[], runtime_hooks=[],
excludes=['matplotlib'], excludes=[],
win_no_prefer_redirects=False, win_no_prefer_redirects=False,
win_private_assemblies=False, win_private_assemblies=False,
cipher=block_cipher, cipher=block_cipher,
noarchive=False noarchive=False
) )
pyz = PYZ( pyz = PYZ(a.pure, a.zipped_data,
a.pure, cipher=block_cipher)
a.zipped_data, exe = EXE(pyz,
cipher=block_cipher a.scripts,
) a.binaries,
exe = EXE( a.zipfiles,
pyz, a.datas,
a.scripts, [('W ignore', None, 'OPTION')],
[], name='superscript',
exclude_binaries=True, debug=False,
name='superscript', bootloader_ignore_signals=False,
debug=False, strip=False,
bootloader_ignore_signals=False, upx=True,
strip=False, upx_exclude=[],
upx=True, runtime_tmpdir=None,
console=True, console=True )
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='superscript'
)

View File

@@ -2,8 +2,6 @@ import json
from exceptions import ConfigurationError from exceptions import ConfigurationError
from cerberus import Validator from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration: class Configuration:
path = None path = None
@@ -185,33 +183,24 @@ class Configuration:
if not isValidated: if not isValidated:
raise ConfigurationError("config validation error: " + v.errors) raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE def __getattr__(self, name): # better hashed lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent": attr_lookup = {
return self.config["persistent"] "persistent": self.config["persistent"],
elif name == "key": "key": self.config["persistent"]["key"],
return self.config["persistent"]["key"] "database": self.config["persistent"]["key"]["database"],
elif name == "database": "tba": self.config["persistent"]["key"]["tba"],
# soon to be deprecated "tra": self.config["persistent"]["key"]["tra"],
return self.config["persistent"]["key"]["database"] "priority": self.config["persistent"]["config-preference"],
elif name == "tba": "sync": self.config["persistent"]["synchronize-config"],
return self.config["persistent"]["key"]["tba"] "variable": self.config["variable"],
elif name == "tra": "event_delay": self.config["variable"]["event-delay"],
return self.config["persistent"]["key"]["tra"] "loop_delay": self.config["variable"]["loop-delay"],
elif name == "priority": "competition": self.config["variable"]["competition"],
return self.config["persistent"]["config-preference"] "modules": self.config["variable"]["modules"]
elif name == "sync": }
return self.config["persistent"]["synchronize-config"] try:
elif name == "variable": return attr_lookup[name]
return self.config["variable"] except KeyError:
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
return None return None
def __getitem__(self, key): def __getitem__(self, key):
@@ -224,14 +213,14 @@ class Configuration:
if sync: if sync:
if priority == "local" or priority == "client": if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information") logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client) remote_config = client.get_database_config()
if remote_config != self.config["variable"]: if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"]) client.set_database_config(self.config["variable"])
logger.info("database config was different and was updated") logger.info("database config was different and was updated")
# no change to config # no change to config
elif priority == "remote" or priority == "database": elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information") logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client) remote_config = client.get_database_config()
if remote_config != self.config["variable"]: if remote_config != self.config["variable"]:
self.config["variable"] = remote_config self.config["variable"] = remote_config
self.save_config() self.save_config()
@@ -245,7 +234,7 @@ class Configuration:
# no change to config # no change to config
elif priority == "remote" or priority == "database": elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information") logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client) self.config["variable"] = client.get_database_config()
# change variable to match remote without updating local version # change variable to match remote without updating local version
else: else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"") raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

View File

@@ -1,199 +1,298 @@
import requests import requests
import pull
import pandas as pd import pandas as pd
import pymongo
from exceptions import APIError
def pull_new_tba_matches(apikey, competition, cutoff): class Client:
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
out = []
for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out
def get_team_match_data(client, competition, team_num): def __init__(self, config):
db = client.data_scouting self.competition = config.competition
mdata = db.matchdata self.tbakey = config.tba
out = {} self.mongoclient = pymongo.MongoClient(config.database)
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}): self.trakey = config.tra
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_pit_data(client, competition, team_num): def close(self):
db = client.data_scouting self.mongoclient.close()
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num): def pull_new_tba_matches(self, cutoff):
db = client.data_processing competition = self.competition
mdata = db.team_metrics api_key= self.tbakey
return mdata.find_one({"competition" : competition, "team": team_num}) x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.sort(key=lambda x: x['match'])
return out
def get_match_data_formatted(client, competition): def get_team_match_data(self, team_num):
teams_at_comp = pull.get_teams_at_competition(competition) client = self.mongoclient
out = {} competition = self.competition
for team in teams_at_comp: db = client.data_scouting
try: mdata = db.matchdata
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict()) out = {}
except: for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
pass out[i['match']] = i['data']
return out return pd.DataFrame(out)
def get_metrics_data_formatted(client, competition): def get_team_metrics_data(self, team_num):
teams_at_comp = pull.get_teams_at_competition(competition) client = self.mongoclient
out = {} competition = self.competition
for team in teams_at_comp: db = client.data_processing
try: mdata = db.team_metrics
out[int(team)] = get_team_metrics_data(client, competition, int(team)) return mdata.find_one({"competition" : competition, "team": team_num})
except:
pass
return out
def get_pit_data_formatted(client, competition): def get_team_pit_data(self, team_num):
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition) client = self.mongoclient
x = x.json() competition = self.competition
x = x['data'] db = client.data_scouting
x = x.keys() mdata = db.pitdata
out = {} return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
for i in x:
try:
out[int(i)] = get_team_pit_data(client, competition, int(i))
except:
pass
return out
def get_pit_variable_data(client, competition): def unkeyify_2l(self, layered_dict):
db = client.data_processing out = {}
mdata = db.team_pit for i in layered_dict.keys():
out = {} add = []
return mdata.find() sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_pit_variable_formatted(client, competition): def get_match_data_formatted(self):
temp = get_pit_variable_data(client, competition) teams_at_comp = self.get_teams_at_competition()
out = {} out = {}
for i in temp: for team in teams_at_comp:
out[i["variable"]] = i["data"] try:
return out out[int(team)] = self.unkeyify_2l(self.get_team_match_data(team).transpose().to_dict())
except:
pass
return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): def get_metrics_data_formatted(self):
db = client[dbname] competition = self.competition
mdata = db[colname] teams_at_comp = self.get_teams_at_competition()
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True) out = {}
for team in teams_at_comp:
try:
out[int(team)] = self.get_team_metrics_data(int(team))
except:
pass
return out
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"): def get_pit_data_formatted(self):
db = client[dbname] client = self.mongoclient
mdata = db[colname] competition = self.competition
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True) x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = self.get_team_pit_data(int(i))
except:
pass
return out
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"): def get_pit_variable_data(self):
db = client[dbname] client = self.mongoclient
mdata = db[colname] db = client.data_processing
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True) mdata = db.team_pit
return mdata.find()
def get_analysis_flags(client, flag): def get_pit_variable_formatted(self):
db = client.data_processing temp = self.get_pit_variable_data()
mdata = db.flags out = {}
return mdata.find_one({flag:{"$exists":True}}) for i in temp:
out[i["variable"]] = i["data"]
return out
def set_analysis_flags(client, flag, data): def push_team_tests_data(self, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client.data_processing client = self.mongoclient
mdata = db.flags competition = self.competition
return mdata.replace_one({flag:{"$exists":True}}, data, True) db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def unkeyify_2l(layered_dict): def push_team_metrics_data(self, team_num, data, dbname = "data_processing", colname = "team_metrics"):
out = {} client = self.mongoclient
for i in layered_dict.keys(): competition = self.competition
add = [] db = client[dbname]
sortkey = [] mdata = db[colname]
for j in layered_dict[i].keys(): mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client): def push_team_pit_data(self, variable, data, dbname = "data_processing", colname = "team_pit"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
previous_time = get_analysis_flags(client, "latest_update") def get_analysis_flags(self, flag):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
if previous_time == None: def set_analysis_flags(self, flag, data):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
set_analysis_flags(client, "latest_update", 0) def get_previous_time(self):
previous_time = 0
else: previous_time = self.get_analysis_flags("latest_update")
previous_time = previous_time["latest_update"] if previous_time == None:
return previous_time self.set_analysis_flags("latest_update", 0)
previous_time = 0
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else: else:
metrics = db_data["metrics"] previous_time = previous_time["latest_update"]
elo = metrics["elo"] return previous_time
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts} def set_current_time(self, current_time):
return group self.set_analysis_flags("latest_update", {"latest_update":current_time})
def load_pit(client, competition): def get_database_config(self):
return get_pit_data_formatted(client, competition) remote_config = self.get_analysis_flags("config")
return remote_config["config"] if remote_config != None else None
def push_match(client, competition, results): def set_database_config(self, config):
for team in results: self.set_analysis_flags("config", {"config": config})
push_team_tests_data(client, competition, team, results[team]) def load_match(self):
def push_metric(client, competition, metric): return self.get_match_data_formatted()
for team in metric: def load_metric(self, match, group_name, metrics):
push_team_metrics_data(client, competition, team, metric[team]) group = {}
def push_pit(client, competition, pit): for team in match[group_name]:
for variable in pit: db_data = self.get_team_metrics_data(team)
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition): if db_data == None:
return True elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(self):
return self.get_pit_data_formatted()
def push_match(self, results):
for team in results:
self.push_team_tests_data(team, results[team])
def push_metric(self, metric):
for team in metric:
self.push_team_metrics_data(team, metric[team])
def push_pit(self, pit):
for variable in pit:
self.push_team_pit_data(variable, pit[variable])
def check_new_database_matches(self):
return True
#----- API implementations below -----#
def get_team_competition(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
""" doesn't seem to be functional:
def get_team_match_data(self, team_num):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)"""
def get_teams_at_competition(self):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -1,141 +0,0 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

View File

@@ -23,7 +23,7 @@ class Logger(L):
self.file = file self.file = file
if file != None: if file is not None:
self.targets.append(self._send_file) self.targets.append(self._send_file)
if profile: if profile:

View File

@@ -1,8 +1,8 @@
import abc import abc
import data as d
import signal import signal
import numpy as np import numpy as np
from tra_analysis import Analysis as an from tra_analysis import Analysis as an
from tqdm import tqdm
class Module(metaclass = abc.ABCMeta): class Module(metaclass = abc.ABCMeta):
@@ -16,7 +16,7 @@ class Module(metaclass = abc.ABCMeta):
callable(subclass.run) callable(subclass.run)
) )
@abc.abstractmethod @abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs): def __init__(self, *args, **kwargs):
raise NotImplementedError raise NotImplementedError
@abc.abstractmethod @abc.abstractmethod
def validate_config(self, *args, **kwargs): def validate_config(self, *args, **kwargs):
@@ -28,20 +28,16 @@ class Module(metaclass = abc.ABCMeta):
class Match (Module): class Match (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -52,7 +48,7 @@ class Match (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.load_match(self.apikey, self.competition) self.data = self.client.load_match()
def _simplestats(self, data_test): def _simplestats(self, data_test):
@@ -92,7 +88,7 @@ class Match (Module):
input_vector = [] input_vector = []
for team in data: for team in tqdm(data, desc = "Match Module ", unit = " team"):
for variable in data[team]: for variable in data[team]:
@@ -140,25 +136,21 @@ class Match (Module):
self.results = return_vector self.results = return_vector
d.push_match(self.apikey, self.competition, self.results) self.client.push_match(self.results)
class Metric (Module): class Metric (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -169,10 +161,12 @@ class Metric (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp) self.data = self.client.pull_new_tba_matches(self.timestamp)
def _process_data(self): def _process_data(self):
self.results = {}
elo_N = self.config["tests"]["elo"]["N"] elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"] elo_K = self.config["tests"]["elo"]["K"]
@@ -181,10 +175,10 @@ class Metric (Module):
red = {} red = {}
blu = {} blu = {}
for match in matches: for match in tqdm(matches, desc = "Metric Module ", unit = " match"):
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) red = self.client.load_metric(match, "red", self.config["tests"])
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) blu = self.client.load_metric(match, "blue", self.config["tests"])
elo_red_total = 0 elo_red_total = 0
elo_blu_total = 0 elo_blu_total = 0
@@ -262,7 +256,9 @@ class Metric (Module):
temp_vector.update(red) temp_vector.update(red)
temp_vector.update(blu) temp_vector.update(blu)
d.push_metric(self.apikey, self.competition, temp_vector) self.results[match['match']] = temp_vector
self.client.push_metric(temp_vector)
def _push_results(self): def _push_results(self):
pass pass
@@ -270,20 +266,16 @@ class Metric (Module):
class Pit (Module): class Pit (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -294,12 +286,12 @@ class Pit (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition) self.data = self.client.load_pit()
def _process_data(self): def _process_data(self):
tests = self.config["tests"] tests = self.config["tests"]
return_vector = {} return_vector = {}
for team in self.data: for team in tqdm(self.data, desc = "Pit Module ", unit = " team"):
for variable in self.data[team]: for variable in self.data[team]:
if variable in tests: if variable in tests:
if not variable in return_vector: if not variable in return_vector:
@@ -309,7 +301,7 @@ class Pit (Module):
self.results = return_vector self.results = return_vector
def _push_results(self): def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results) self.client.push_pit(self.results)
class Rating (Module): class Rating (Module):
pass pass

View File

@@ -1,63 +0,0 @@
import requests
from exceptions import APIError
from dep import load_config
url = "https://titanscouting.epochml.org"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -9,8 +9,7 @@ __version__ = "1.0.0"
__changelog__ = """changelog: __changelog__ = """changelog:
1.0.0: 1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems - superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely - removed daemon and socket functionality, user can implement using external software
- linux daemon now sends stderr to errorlog.log
- added verbose option to linux superscript to allow for interactive output - added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py - moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script - added profile option to linux superscript to profile runtime of script
@@ -149,19 +148,13 @@ __author__ = (
# imports: # imports:
import os, sys, time import argparse, sys, time, traceback, warnings
import pymongo # soon to be deprecated
import traceback
import warnings
from config import Configuration, ConfigurationError from config import Configuration, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches from data import Client
from interface import Logger from interface import Logger
from module import Match, Metric, Pit from module import Match, Metric, Pit
import zmq
config_path = "config.json" def main(logger, verbose, profile, debug, config_path):
def main(logger, verbose, profile, debug, socket_send = None):
def close_all(): def close_all():
if "client" in locals(): if "client" in locals():
@@ -180,52 +173,42 @@ def main(logger, verbose, profile, debug, socket_send = None):
loop_start = time.time() loop_start = time.time()
logger.info("current time: " + str(loop_start)) logger.info("current time: " + str(loop_start))
socket_send("current time: " + str(loop_start))
config = Configuration(config_path) config = Configuration(config_path)
logger.info("found and loaded config at <" + config_path + ">") logger.info("found and loaded config at <" + config_path + ">")
socket_send("found and loaded config at <" + config_path + ">")
apikey, tbakey = config.database, config.tba client = Client(config)
logger.info("found and loaded database and tba keys")
socket_send("found and loaded database and tba keys")
client = pymongo.MongoClient(apikey)
logger.info("established connection to database") logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client) previous_time = client.get_previous_time()
logger.info("analysis backtimed to: " + str(previous_time)) logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client) config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition config_modules, competition = config.modules, config.competition
client.competition = competition
for m in config_modules: for m in config_modules:
if m in modules: if m in modules:
start = time.time() start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition) current_module = modules[m](config_modules[m], previous_time, client)
valid = current_module.validate_config() valid = current_module.validate_config()
if not valid: if not valid:
continue continue
current_module.run() current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds") logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug: if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
set_current_time(client, loop_start) client.set_current_time(loop_start)
close_all() close_all()
logger.info("closed threads and database client") logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile: if profile:
return 0 return 0
@@ -236,33 +219,27 @@ def main(logger, verbose, profile, debug, socket_send = None):
event_delay = config["variable"]["event-delay"] event_delay = config["variable"]["event-delay"]
if event_delay: if event_delay:
logger.info("loop delayed until database returns new matches") logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False new_match = False
while not new_match: while not new_match:
time.sleep(1) time.sleep(1)
new_match = check_new_database_matches(client, competition) new_match = client.check_new_database_matches()
logger.info("database returned new matches") logger.info("database returned new matches")
socket_send("database returned new matches")
else: else:
loop_delay = float(config["variable"]["loop-delay"]) loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start) remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0: if remaining_time > 0:
logger.info("loop delayed by " + str(remaining_time) + " seconds") logger.info("loop delayed by " + str(remaining_time) + " seconds")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time) time.sleep(remaining_time)
except KeyboardInterrupt: except KeyboardInterrupt:
close_all() close_all()
logger.info("detected KeyboardInterrupt, exiting") logger.info("detected KeyboardInterrupt, exiting")
socket_send("detected KeyboardInterrupt, exiting")
return 0 return 0
except ConfigurationError as e: except ConfigurationError as e:
str_e = "".join(traceback.format_exception(e)) str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e)) logger.error("encountered a configuration error: " + str(e))
logger.error(str_e) logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all() close_all()
return 1 return 1
@@ -270,134 +247,56 @@ def main(logger, verbose, profile, debug, socket_send = None):
str_e = "".join(traceback.format_exception(e)) str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running") logger.error("encountered an exception while running")
logger.error(str_e) logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all() close_all()
return 1 return 1
def start(pid_path, verbose, profile, debug): def start(verbose, profile, debug, config_path, log_path):
logger = Logger(verbose, profile, debug, file = log_path)
if profile: if profile:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
import cProfile, pstats, io import cProfile, pstats, io
profile = cProfile.Profile() profile = cProfile.Profile()
profile.enable() profile.enable()
exit_code = main(logger, verbose, profile, debug, socket_send = send) exit_code = main(logger, verbose, profile, debug, config_path)
profile.disable() profile.disable()
f = open("profile.txt", 'w+') f = open("profile.txt", "w+")
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime') ps = pstats.Stats(profile, stream = f).sort_stats("cumtime")
ps.print_stats() ps.print_stats()
sys.exit(exit_code) sys.exit(exit_code)
elif verbose: elif verbose:
def send(msg): exit_code = main(logger, verbose, profile, debug, config_path)
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code) sys.exit(exit_code)
elif debug: elif debug:
def send(msg): exit_code = main(logger, verbose, profile, debug, config_path)
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code) sys.exit(exit_code)
else: else:
logfile = "logfile.log" pass # must be vebose, debug or profile
f = open(logfile, 'w+')
f.close()
e = open('errorlog.log', 'w+')
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = e
):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(msg):
socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
socket.close()
f.close()
sys.exit(exit_code)
def stop(pid_path):
try:
pf = open(pid_path, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
return
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else:
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def restart(pid_path):
stop(pid_path)
start(pid_path, False, False, False)
if __name__ == "__main__": if __name__ == "__main__":
if sys.platform.startswith("win"): parser = argparse.ArgumentParser(description = "TRA data processing application.")
start(None, verbose = True) parser.add_argument("mode", metavar = "MODE", type = str, nargs = 1, choices = ["verbose", "profile", "debug"], help = "verbose, debug, profile")
parser.add_argument("--config", dest = "config", default = "config.json", type = str, help = "path to config file")
parser.add_argument("--logfile", dest = "logfile", default = "logfile.log", type = str, help = "path to log file")
else: args = parser.parse_args()
import daemon
from daemon import pidfile mode = args.mode[0]
from signal import SIGTERM config_path = args.config
pid_path = "tra-daemon.pid" log_path = args.logfile
if len(sys.argv) == 2: if mode == "verbose":
if 'start' == sys.argv[1]: start(True, False, False, config_path = config_path, log_path = log_path)
start(pid_path, False, False, False) elif mode == "profile":
elif 'stop' == sys.argv[1]: start(False, True, False, config_path = config_path, log_path = log_path)
stop(pid_path) elif mode == "debug":
elif 'restart' == sys.argv[1]: start(False, False, True, config_path = config_path, log_path = log_path)
restart(pid_path)
elif 'verbose' == sys.argv[1]: exit(0)
start(None, True, False, False)
elif 'profile' == sys.argv[1]:
start(None, False, True, False)
elif 'debug' == sys.argv[1]:
start(None, False, False, True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)