diff --git a/data analysis/data/scores.csv b/data analysis/data/scores.csv index 0accf104..792e5ee3 100644 --- a/data analysis/data/scores.csv +++ b/data analysis/data/scores.csv @@ -1,6 +1,40 @@ -35,63,62,18,56,65,42,77,82,63,76,50,47,69,57,65,85,70,38,67,89,74,71,67,70,71,95,77,45 -46,24,12,23,66,48,43,35,32,42,78,56,70,53,56,43,16,85,46,76,86,77,71,77,76,70,73 -29,63,35,37,26,51,35,58,30,61,45 -26,62,45,37,42,29,59,44,40 -46,45,71,85,54,57,52,66,47,77,68,75,45,51 -26,15,31,54,37,48,19,69,56 +49,56,63 +48,58,43,71,44,47,38,61,54,68,46,72,67,61,71,55 +46,71,40 +20,18,45,55,25,46,55,31,26,49,51,56,74 +42,61,52,43,67,32,55,44,41,49,47,19,74 +40,25,43,25,56,68,59,46,72,61,45,45,47,67 +65,43,74 +41,35,35,30,56,39,48,49,71,48,72,44,49,54 +26,47,56,49,12,30,44,52,27,49,61,47,32,55,44,54 +29,63,35,37,26,51,35,58,30,61,45,39,56,55 +35,31,27,49,30,57,35,34,65,56,67 +46,43 +45,62,48 +17,16,25,37,28,33,33,41,42,66,71,33 +19,81,49 +49,47 +24,16,12,20,42,37,42,49,19,43,57 +46,45,71,85,54,57,52,66,47,77,68,75,45,51,66,71,67 +40,23,45,58,37,42,42,43,30,36,40,66,54,55 +45,58,52,49,53,55,60,64,66,43,49,58,82,67,78,71,71,63 +63,57,57,39,51,45,69,64,57,74,74,57,70,64,62,62,81,57 +46,48,55,42,24,40,99,39,36,40,39,66,48 +60,49,54 +24,48,20,54,28,41,35,27,28,32,60,33,33 +43,62,53,38,63,45,71,64,60,70,74,57,70,64,62,60,81 +46,49 +20,15,15,37,63,39,41,39,15,45,33 +31,27,63,12,12,30,45,47,30,51,56,40 +65,62,63 +48,30,27,37,36,53,52,62,50,32,44,57,71,69,73,60,64,46,33,40 +39,62,33 +23,43,43,35,33,29,30,83,38,33,96,79,96,77,81,0,46,47,54 +38,39,20,61,42,68,48,65,48,48,72,44,62,43,54 +25,62,53,54,61,63,42,59,14,53,63,65,72,58,50,46,56 +27,19,35,44,40,43,45,38,51,48,44,51,42 +44,43 +40,31,45,42,27,37,43,46,47,50,55,44,42,57 +66,49,49 +66,43,49 +62,42,48 diff --git a/data analysis/superscript.py b/data analysis/superscript.py index c4da6f05..4bef34ac 100644 --- a/data analysis/superscript.py +++ b/data analysis/superscript.py @@ -141,6 +141,12 @@ def titanservice(): line = scores[i] + if len(line) < 4: + + nishant.append('no_data') + + continue + #print(line) #print(line) @@ -260,6 +266,10 @@ def pulldata(): quant_keys.append(key) #print(full[i][j].get(key).get('teamDBRef')[5:] in list_teams) + + print(full[i][j].get(key).get('teamDBRef')) + + print(list_teams) if full[i][j].get(key).get('teamDBRef')[5:] in list_teams: @@ -330,10 +340,10 @@ def service(): fucked = False for i in range(0, 5): - #try: + try: titanservice() break - #except: + except: if (i != 4): print("[WARNING] failed, trying " + str(5 - i - 1) + " more times") else: diff --git a/data analysis/superscript_nishant_only.py b/data analysis/superscript_nishant_only.py new file mode 100644 index 00000000..aba40d79 --- /dev/null +++ b/data analysis/superscript_nishant_only.py @@ -0,0 +1,280 @@ +#Titan Robotics Team 2022: Super Script +#Written by Arthur Lu & Jacob Levine +#Notes: +#setup: + +__version__ = "1.0.6.001" + +__changelog__ = """changelog: +1.0.6.001: + - fixed multiple bugs + - works now +1.0.6.000: + - added pulldata function + - service now pulls in, computes data, and outputs data as planned +1.0.5.003: + - hotfix: actually pushes data correctly now +1.0.5.002: + - more information given + - performance improvements +1.0.5.001: + - grammar +1.0.5.000: + - service now iterates forever + - ready for production other than pulling json data +1.0.4.001: + - grammar fixes +1.0.4.000: + - actually pushes to firebase +1.0.3.001: + - processes data more efficiently +1.0.3.000: + - actually processes data +1.0.2.000: + - added data reading from folder + - nearly crashed computer reading from 20 GiB of data +1.0.1.000: + - added data reading from file + - added superstructure to code +1.0.0.000: + - added import statements (revolutionary) +""" + +__author__ = ( + "Arthur Lu , " + "Jacob Levine ," + ) + +import firebase_admin +from firebase_admin import credentials +from firebase_admin import firestore +import analysis +#import titanlearn +import visualization +import os +import sys +import warnings +import glob +import numpy as np +import time +import tbarequest as tba +import csv + +def titanservice(): + + print("[OK] loading data") + + start = time.time() + + source_dir = 'data' + file_list = glob.glob(source_dir + '/*.csv') #supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy + data = [] + files = [fn for fn in glob.glob('data/*.csv') + if not (os.path.basename(fn).startswith('scores') or os.path.basename(fn).startswith('teams') or os.path.basename(fn).startswith('match') or os.path.basename(fn).startswith('notes') or os.path.basename(fn).startswith('observationType') or os.path.basename(fn).startswith('teamDBRef'))] #scores will be handled sperately + + for i in files: + data.append(analysis.load_csv(i)) + + #print(files) + + stats = [] + measure_stats = [] + teams = analysis.load_csv("data/teams.csv") + scores = analysis.load_csv("data/scores.csv") + + end = time.time() + + print("[OK] loaded data in " + str(end - start) + " seconds") + + #assumes that team number is in the first column, and that the order of teams is the same across all files + #unhelpful comment + #for measure in data: #unpacks 3d array into 2ds + + #measure_stats = [] + + #for i in range(len(measure)): #unpacks into specific teams + + #ofbest_curve = [None] + #r2best_curve = [None] + + #line = measure[i] + + #print(line) + + #x = list(range(len(line))) + #eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) + + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") + + #print(eqs, rmss, r2s, overfit) + + #ofbest_curve.append(beqs) + #ofbest_curve.append(brmss) + #ofbest_curve.append(br2s) + #ofbest_curve.append(boverfit) + #ofbest_curve.pop(0) + + #print(ofbest_curve) + + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") + + #r2best_curve.append(beqs) + #r2best_curve.append(brmss) + #r2best_curve.append(br2s) + #r2best_curve.append(boverfit) + #r2best_curve.pop(0) + + #print(r2best_curve) + + + #measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3))) + + #stats.append(list(measure_stats)) + nishant = [] + + for i in range(len(scores)): + + #print(scores) + + ofbest_curve = [None] + r2best_curve = [None] + + line = scores[i] + + if len(line) < 4: + + nishant.append('no_data') + + continue + + #print(line) + + #print(line) + + x = list(range(len(line))) + eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) + + beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") + + #print(eqs, rmss, r2s, overfit) + + ofbest_curve.append(beqs) + ofbest_curve.append(brmss) + ofbest_curve.append(br2s) + ofbest_curve.append(boverfit) + ofbest_curve.pop(0) + + #print(ofbest_curve) + + beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") + + r2best_curve.append(beqs) + r2best_curve.append(brmss) + r2best_curve.append(br2s) + r2best_curve.append(boverfit) + r2best_curve.pop(0) + + #print(r2best_curve) + + z = len(scores[0]) + 1 + nis_num = [] + + nis_num.append(eval(str(ofbest_curve[0]))) + nis_num.append(eval(str(r2best_curve[0]))) + + nis_num.append((eval(ofbest_curve[0]) + eval(r2best_curve[0])) / 2) + + nishant.append(teams[i] + nis_num) + + json_out = {} + score_out = {} + + for i in range(len(teams)): + score_out[str(teams[i][0])] = (nishant[i]) + + location = db.collection(u'stats').document(u'stats-noNN') + #for i in range(len(teams)): + #general_general_stats = location.collection(teams[i][0]) + + #for j in range(len(files)): + # json_out[str(teams[i][0])] = (stats[j][i]) + # name = os.path.basename(files[j]) + # general_general_stats.document(name).set({'stats':json_out.get(teams[i][0])}) + + for i in range(len(teams)): + nnum = location.collection(teams[i][0]).document(u'nishant_number').set({'nishant':score_out.get(teams[i][0])}) + +def pulldata(): + teams = analysis.load_csv('data/teams.csv') + scores = [] + for i in range(len(teams)): + team_scores = [] + #print(teams[i][0]) + request_data_object = tba.req_team_matches(teams[i][0], 2019, "UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5") + json_data = request_data_object.json() + + for match in range(len(json_data) - 1, -1, -1): + if json_data[match].get('winning_alliance') == "": + #print(json_data[match]) + json_data.remove(json_data[match]) + + json_data = sorted(json_data, key=lambda k: k.get('actual_time', 0), reverse=False) + for j in range(len(json_data)): + if "frc" + teams[i][0] in json_data[j].get('alliances').get('blue').get('team_keys'): + team_scores.append(json_data[j].get('alliances').get('blue').get('score')) + elif "frc" + teams[i][0] in json_data[j].get('alliances').get('red').get('team_keys'): + team_scores.append(json_data[j].get('alliances').get('red').get('score')) + scores.append(team_scores) + + with open("data/scores.csv", "w+", newline = '') as file: + writer = csv.writer(file, delimiter = ',') + writer.writerows(scores) + +def service(): + + while True: + + pulldata() + + start = time.time() + + print("[OK] starting calculations") + + fucked = False + + for i in range(0, 5): + try: + titanservice() + break + except: + if (i != 4): + print("[WARNING] failed, trying " + str(5 - i - 1) + " more times") + else: + print("[ERROR] failed to compute data, skipping") + fucked = True + + end = time.time() + if (fucked == True): + + break + + else: + + print("[OK] finished calculations") + + print("[OK] waiting: " + str(300 - (end - start)) + " seconds" + "\n") + + time.sleep(300 - (end - start)) #executes once every 5 minutes + +warnings.simplefilter("ignore") +#Use a service account +try: + cred = credentials.Certificate('keys/firebasekey.json') +except: + cred = credentials.Certificate('keys/keytemp.json') +firebase_admin.initialize_app(cred) + +db = firestore.client() + +service() #finally we write something that isn't a function definition +#titanservice()