depreciated nonfunctional scripts in data-analysis

Signed-off-by: Arthur Lu <learthurgo@gmail.com>
This commit is contained in:
Arthur Lu 2020-09-20 00:47:33 +00:00
parent bc3889c4e0
commit e13f2a239b
7 changed files with 0 additions and 835 deletions

View File

@ -1 +0,0 @@
2020ilch

View File

@ -1,14 +0,0 @@
balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-lower-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-upper-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-upper-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
wheel-mechanism
low-balls
high-balls
wheel-success
strategic-focus
climb-mechanism
attitude

View File

@ -1,378 +0,0 @@
# Titan Robotics Team 2022: Superscript Script
# Written by Arthur Lu & Jacob Levine
# Notes:
# setup:
__version__ = "0.0.5.002"
# changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
0.0.5.002:
- made changes due to refactoring of analysis
0.0.5.001:
- text fixes
- removed matplotlib requirement
0.0.5.000:
- improved user interface
0.0.4.002:
- removed unessasary code
0.0.4.001:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.0.4.000:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.0.3.000:
- added analysis to pit data
0.0.2.001:
- minor stability patches
- implemented db syncing for timestamps
- fixed bugs
0.0.2.000:
- finalized testing and small fixes
0.0.1.004:
- finished metrics implement, trueskill is bugged
0.0.1.003:
- working
0.0.1.002:
- started implement of metrics
0.0.1.001:
- cleaned up imports
0.0.1.000:
- tested working, can push to database
0.0.0.009:
- tested working
- prints out stats for the time being, will push to database later
0.0.0.008:
- added data import
- removed tba import
- finished main method
0.0.0.007:
- added load_config
- optimized simpleloop for readibility
- added __all__ entries
- added simplestats engine
- pending testing
0.0.0.006:
- fixes
0.0.0.005:
- imported pickle
- created custom database object
0.0.0.004:
- fixed simpleloop to actually return a vector
0.0.0.003:
- added metricsloop which is unfinished
0.0.0.002:
- added simpleloop which is untested until data is provided
0.0.0.001:
- created script
- added analysis, numba, numpy imports
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
"Jacob Levine <jlevine@imsa.edu>",
)
__all__ = [
"main",
"load_config",
"simpleloop",
"simplestats",
"metricsloop"
]
# imports:
from tra_analysis import analysis as an
import data as d
import numpy as np
from os import system, name
from pathlib import Path
import time
import warnings
def main():
warnings.filterwarnings("ignore")
while(True):
current_time = time.time()
print("[OK] time: " + str(current_time))
start = time.time()
config = load_config(Path("config/stats.config"))
competition = an.load_csv(Path("config/competition.config"))[0][0]
print("[OK] configs loaded")
apikey = an.load_csv(Path("config/keys.config"))[0][0]
tbakey = an.load_csv(Path("config/keys.config"))[1][0]
print("[OK] loaded keys")
previous_time = d.get_analysis_flags(apikey, "latest_update")
if(previous_time == None):
d.set_analysis_flags(apikey, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
print("[OK] analysis backtimed to: " + str(previous_time))
print("[OK] loading data")
start = time.time()
data = d.get_match_data_formatted(apikey, competition)
pit_data = d.pit = d.get_pit_data_formatted(apikey, competition)
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
print("[OK] running tests")
start = time.time()
results = simpleloop(data, config)
print("[OK] finished tests in " + str(time.time() - start) + " seconds")
print("[OK] running metrics")
start = time.time()
metricsloop(tbakey, apikey, competition, previous_time)
print("[OK] finished metrics in " + str(time.time() - start) + " seconds")
print("[OK] running pit analysis")
start = time.time()
pit = pitloop(pit_data, config)
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
print("[OK] pushing to database")
start = time.time()
push_to_database(apikey, competition, results, pit)
print("[OK] pushed to database in " + str(time.time() - start) + " seconds")
clear()
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def load_config(file):
config_vector = {}
file = an.load_csv(file)
for line in file:
config_vector[line[0]] = line[1:]
return config_vector
def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match]
return_vector = {}
for team in data:
variable_vector = {}
for variable in data[team]:
test_vector = {}
variable_data = data[team][variable]
if(variable in tests):
for test in tests[variable]:
test_vector[test] = simplestats(variable_data, test)
else:
pass
variable_vector[variable] = test_vector
return_vector[team] = variable_vector
return return_vector
def simplestats(data, test):
data = np.array(data)
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
if(test == "basic_stats"):
return an.basic_stats(data)
if(test == "historical_analysis"):
return an.histo_analysis([ranges, data])
if(test == "regression_linear"):
return an.regression(ranges, data, ['lin'])
if(test == "regression_logarithmic"):
return an.regression(ranges, data, ['log'])
if(test == "regression_exponential"):
return an.regression(ranges, data, ['exp'])
if(test == "regression_polynomial"):
return an.regression(ranges, data, ['ply'])
if(test == "regression_sigmoidal"):
return an.regression(ranges, data, ['sig'])
def push_to_database(apikey, competition, results, pit):
for team in results:
d.push_team_tests_data(apikey, competition, team, results[team])
for variable in pit:
d.push_team_pit_data(apikey, competition, variable, pit[variable])
def metricsloop(tbakey, apikey, competition, timestamp): # listener based metrics update
elo_N = 400
elo_K = 24
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
red = {}
blu = {}
for match in matches:
red = load_metrics(apikey, competition, match, "red")
blu = load_metrics(apikey, competition, match, "blue")
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if(match["winner"] == "red"):
observations = {"red": 1, "blu": 0}
elif(match["winner"] == "blue"):
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
for team in temp_vector:
d.push_team_metrics_data(apikey, competition, team, temp_vector[team])
def load_metrics(apikey, competition, match, group_name):
group = {}
for team in match[group_name]:
db_data = d.get_team_metrics_data(apikey, competition, team)
if d.get_team_metrics_data(apikey, competition, team) == None:
elo = {"score": 1500}
gl2 = {"score": 1500, "rd": 250, "vol": 0.06}
ts = {"mu": 25, "sigma": 25/3}
#d.push_team_metrics_data(apikey, competition, team, {"elo":elo, "gl2":gl2,"trueskill":ts})
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def pitloop(pit, tests):
return_vector = {}
for team in pit:
for variable in pit[team]:
if(variable in tests):
if(not variable in return_vector):
return_vector[variable] = []
return_vector[variable].append(pit[team][variable])
return return_vector
main()
"""
Metrics Defaults:
elo starting score = 1500
elo N = 400
elo K = 24
gl2 starting score = 1500
gl2 starting rd = 350
gl2 starting vol = 0.06
"""

View File

@ -1,188 +0,0 @@
import json
import superscript as su
import threading
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
)
class Tasker():
match_ = False
metric_ = False
pit_ = False
match_enable = True
metric_enable = True
pit_enable = True
config = {}
def __init__(self):
self.config = su.load_config("config.json")
def match(self):
self.match_ = True
apikey = self.config["key"]["database"]
competition = self.config["competition"]
tests = self.config["statistics"]["match"]
data = su.load_match(apikey, competition)
su.matchloop(apikey, competition, data, tests)
self.match_ = False
if self.match_enable == True and self.match_ == False:
task = threading.Thread(name = "match", target = match)
task.start()
def metric():
self.metric_ = True
apikey = self.config["key"]["database"]
tbakey = self.config["key"]["tba"]
competition = self.config["competition"]
metric = self.config["statistics"]["metric"]
timestamp = su.get_previous_time(apikey)
su.metricloop(tbakey, apikey, competition, timestamp, metric)
self.metric_ = False
if self.metric_enable == True and self.metric_ == False:
task = threading.Thread(name = "match", target = metric)
task.start()
def pit():
self.pit_ = True
apikey = self.config["key"]["database"]
competition = self.config["competition"]
tests = self.config["statistics"]["pit"]
data = su.load_pit(apikey, competition)
su.pitloop(apikey, competition, data, tests)
self.pit_ = False
if self.pit_enable == True and self.pit_ == False:
task = threading.Thread(name = "pit", target = pit)
task.start()
def start_match():
task = threading.Thread(name = "match", target = match)
task.start()
def start_metric():
task = threading.Thread(name = "match", target = metric)
task.start()
def start_pit():
task = threading.Thread(name = "pit", target = pit)
task.start()
def stop_match():
self.match_enable = False
def stop_metric():
self.metric_enable = False
def stop_pit():
self.pit_enable = False
def get_match():
return self.match_
def get_metric():
return self.metric_
def get_pit():
return self.pit_
def get_match_enable():
return self.match_enable
def get_metric_enable():
return self.metric_enable
def get_pit_enable():
return self.pit_enable
"""
def main():
init()
start_match()
start_metric()
start_pit()
exit = False
while(not exit):
i = input("> ")
cmds = i.split(" ")
cmds = [x for x in cmds if x != ""]
l = len(cmds)
if(l == 0):
pass
else:
if(cmds[0] == "exit"):
if(l == 1):
exit = True
else:
print("exit command expected no arguments but encountered " + str(l - 1))
if(cmds[0] == "status"):
if(l == 1):
print("status command expected 1 argument but encountered none\ntype status help for usage")
elif(l > 2):
print("status command expected 1 argument but encountered " + str(l - 1))
elif(cmds[1] == "threads"):
threads = threading.enumerate()
threads = [x.getName() for x in threads]
print("running threads:")
for thread in threads:
print(" " + thread)
elif(cmds[1] == "flags"):
print("current flags:")
print(" match running: " + match_)
print(" metric running: " + metric_)
print(" pit running: " + pit_)
print(" match enable: " + match_enable)
print(" metric enable: " + metric_enable)
print(" pit enable: " + pit_enable)
elif(cmds[1] == "config"):
print("current config:")
print(json.dumps(config))
elif(cmds[1] == "all"):
threads = threading.enumerate()
threads = [x.getName() for x in threads]
print("running threads:")
for thread in threads:
print(" " + thread)
print("current flags:")
print(" match running: " + match_)
print(" metric running: " + metric_)
print(" pit running: " + pit_)
print(" match enable: " + match_enable)
print(" metric enable: " + metric_enable)
print(" pit enable: " + pit_enable)
elif(cmds[1] == "help"):
print("usage: status [arg]\nDisplays the status of the tra data analysis threads.\nArguments:\n threads - prints the stuatus ofcurrently running threads\n flags - prints the status of control and indicator flags\n config - prints the current configuration information\n all - prints all statuses\n <name_of_thread> - prints the status of a specific thread")
else:
threads = threading.enumerate()
threads = [x.getName() for x in threads]
if(cmds[1] in threads):
print(cmds[1] + " is running")
if(__name__ == "__main__"):
main()
"""

View File

@ -1,55 +0,0 @@
import threading
from multiprocessing import Process, Queue
import time
from os import system
class testcls():
i = 0
j = 0
t1_en = True
t2_en = True
def main(self):
t1 = Process(name = "task1", target = self.task1)
t2 = Process(name = "task2", target = self.task2)
t1.start()
t2.start()
#print(self.i)
#print(self.j)
def task1(self):
self.i += 1
time.sleep(1)
if(self.i < 10):
t1 = Process(name = "task1", target = self.task1)
t1.start()
def task2(self):
self.j -= 1
time.sleep(1)
if(self.j > -10):
t2 = t2 = Process(name = "task2", target = self.task2)
t2.start()
"""
if __name__ == "__main__":
tmain = threading.Thread(name = "main", target = main)
tmain.start()
t = 0
while(True):
system("clear")
for thread in threading.enumerate():
if thread.getName() != "MainThread":
print(thread.getName())
print(str(len(threading.enumerate())))
print(i)
print(j)
time.sleep(0.1)
t += 1
if(t == 100):
t1_en = False
t2_en = False
"""

View File

@ -1,33 +0,0 @@
import argparse
from tasks import Tasker
import test
import threading
from multiprocessing import Process, Queue
t = Tasker()
task_map = {"match":None, "metric":None, "pit":None, "test":None}
status_map = {"match":None, "metric":None, "pit":None}
status_map.update(task_map)
parser = argparse.ArgumentParser(prog = "TRA")
subparsers = parser.add_subparsers(title = "command", metavar = "C", help = "//commandhelp//")
parser_start = subparsers.add_parser("start", help = "//starthelp//")
parser_start.add_argument("targets", metavar = "T", nargs = "*", choices = task_map.keys())
parser_start.set_defaults(which = "start")
parser_stop = subparsers.add_parser("stop", help = "//stophelp//")
parser_stop.add_argument("targets", metavar = "T", nargs = "*", choices = task_map.keys())
parser_stop.set_defaults(which = "stop")
parser_status = subparsers.add_parser("status", help = "//stophelp//")
parser_status.add_argument("targets", metavar = "T", nargs = "*", choices = status_map.keys())
parser_status.set_defaults(which = "status")
args = parser.parse_args()
if(args.which == "start" and "test" in args.targets):
a = test.testcls()
tmain = Process(name = "main", target = a.main)
tmain.start()

View File

@ -1,166 +0,0 @@
import json
import superscript as su
import threading
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
)
match_ = False
metric_ = False
pit_ = False
match_enable = True
metric_enable = True
pit_enable = True
config = {}
def __init__(self):
global match_
global metric_
global pit_
global match_enable
global metric_enable
global pit_enable
config = su.load_config("config.json")
def match(self):
match_ = True
apikey = config["key"]["database"]
competition = config["competition"]
tests = config["statistics"]["match"]
data = su.load_match(apikey, competition)
su.matchloop(apikey, competition, data, tests)
match_ = False
if match_enable == True and match_ == False:
task = threading.Thread(name = "match", target = match)
task.start()
def metric():
metric_ = True
apikey = config["key"]["database"]
tbakey = config["key"]["tba"]
competition = config["competition"]
metric = config["statistics"]["metric"]
timestamp = su.get_previous_time(apikey)
su.metricloop(tbakey, apikey, competition, timestamp, metric)
metric_ = False
if metric_enable == True and metric_ == False:
task = threading.Thread(name = "match", target = metric)
task.start()
def pit():
pit_ = True
apikey = config["key"]["database"]
competition = config["competition"]
tests = config["statistics"]["pit"]
data = su.load_pit(apikey, competition)
su.pitloop(apikey, competition, data, tests)
pit_ = False
if pit_enable == True and pit_ == False:
task = threading.Thread(name = "pit", target = pit)
task.start()
def start_match():
task = threading.Thread(name = "match", target = match)
task.start()
def start_metric():
task = threading.Thread(name = "match", target = metric)
task.start()
def start_pit():
task = threading.Thread(name = "pit", target = pit)
task.start()
def main():
init()
start_match()
start_metric()
start_pit()
exit = False
while(not exit):
i = input("> ")
cmds = i.split(" ")
cmds = [x for x in cmds if x != ""]
l = len(cmds)
if(l == 0):
pass
else:
if(cmds[0] == "exit"):
if(l == 1):
exit = True
else:
print("exit command expected no arguments but encountered " + str(l - 1))
if(cmds[0] == "status"):
if(l == 1):
print("status command expected 1 argument but encountered none\ntype status help for usage")
elif(l > 2):
print("status command expected 1 argument but encountered " + str(l - 1))
elif(cmds[1] == "threads"):
threads = threading.enumerate()
threads = [x.getName() for x in threads]
print("running threads:")
for thread in threads:
print(" " + thread)
elif(cmds[1] == "flags"):
print("current flags:")
print(" match running: " + match_)
print(" metric running: " + metric_)
print(" pit running: " + pit_)
print(" match enable: " + match_enable)
print(" metric enable: " + metric_enable)
print(" pit enable: " + pit_enable)
elif(cmds[1] == "config"):
print("current config:")
print(json.dumps(config))
elif(cmds[1] == "all"):
threads = threading.enumerate()
threads = [x.getName() for x in threads]
print("running threads:")
for thread in threads:
print(" " + thread)
print("current flags:")
print(" match running: " + match_)
print(" metric running: " + metric_)
print(" pit running: " + pit_)
print(" match enable: " + match_enable)
print(" metric enable: " + metric_enable)
print(" pit enable: " + pit_enable)
elif(cmds[1] == "help"):
print("usage: status [arg]\nDisplays the status of the tra data analysis threads.\nArguments:\n threads - prints the stuatus ofcurrently running threads\n flags - prints the status of control and indicator flags\n config - prints the current configuration information\n all - prints all statuses\n <name_of_thread> - prints the status of a specific thread")
else:
threads = threading.enumerate()
threads = [x.getName() for x in threads]
if(cmds[1] in threads):
print(cmds[1] + " is running")
if(__name__ == "__main__"):
main()