2019-04-03 18:34:31 +00:00
# Titan Robotics Team 2022: Super Script
# Written by Arthur Lu & Jacob Levine
# Notes:
# setup:
2019-03-22 16:54:40 +00:00
__version__ = " 1.0.6.001 "
__changelog__ = """ changelog:
1.0 .6 .001 :
- fixed multiple bugs
- works now
1.0 .6 .000 :
- added pulldata function
- service now pulls in , computes data , and outputs data as planned
1.0 .5 .003 :
- hotfix : actually pushes data correctly now
1.0 .5 .002 :
- more information given
- performance improvements
1.0 .5 .001 :
- grammar
1.0 .5 .000 :
- service now iterates forever
- ready for production other than pulling json data
1.0 .4 .001 :
- grammar fixes
1.0 .4 .000 :
- actually pushes to firebase
1.0 .3 .001 :
- processes data more efficiently
1.0 .3 .000 :
- actually processes data
1.0 .2 .000 :
- added data reading from folder
- nearly crashed computer reading from 20 GiB of data
1.0 .1 .000 :
- added data reading from file
- added superstructure to code
1.0 .0 .000 :
- added import statements ( revolutionary )
2019-04-03 18:34:31 +00:00
"""
2019-03-22 16:54:40 +00:00
__author__ = (
" Arthur Lu <arthurlu@ttic.edu>, "
" Jacob Levine <jlevine@ttic.edu>, "
2019-04-03 18:34:31 +00:00
)
2019-03-22 16:54:40 +00:00
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import analysis
#import titanlearn
import visualization
import os
import sys
import warnings
import glob
import numpy as np
import time
import tbarequest as tba
import csv
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
def titanservice ( ) :
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
print ( " [OK] loading data " )
start = time . time ( )
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
source_dir = ' data '
2019-04-03 18:34:31 +00:00
# supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy
file_list = glob . glob ( source_dir + ' /*.csv ' )
2019-03-22 16:54:40 +00:00
data = [ ]
2019-04-03 18:34:31 +00:00
files = [ fn for fn in glob . glob ( ' data/*.csv ' )
if not ( os . path . basename ( fn ) . startswith ( ' scores ' ) or os . path . basename ( fn ) . startswith ( ' teams ' ) or os . path . basename ( fn ) . startswith ( ' match ' ) or os . path . basename ( fn ) . startswith ( ' notes ' ) or os . path . basename ( fn ) . startswith ( ' observationType ' ) or os . path . basename ( fn ) . startswith ( ' teamDBRef ' ) ) ] # scores will be handled sperately
2019-03-22 16:54:40 +00:00
for i in files :
2019-04-03 18:34:31 +00:00
data . append ( analysis . load_csv ( i ) )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(files)
2019-03-22 16:54:40 +00:00
stats = [ ]
measure_stats = [ ]
teams = analysis . load_csv ( " data/teams.csv " )
scores = analysis . load_csv ( " data/scores.csv " )
end = time . time ( )
print ( " [OK] loaded data in " + str ( end - start ) + " seconds " )
2019-04-03 18:34:31 +00:00
# assumes that team number is in the first column, and that the order of teams is the same across all files
# unhelpful comment
# for measure in data: #unpacks 3d array into 2ds
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#measure_stats = []
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# for i in range(len(measure)): #unpacks into specific teams
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#ofbest_curve = [None]
#r2best_curve = [None]
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#line = measure[i]
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(line)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#x = list(range(len(line)))
#eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit")
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#print(eqs, rmss, r2s, overfit)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# ofbest_curve.append(beqs)
# ofbest_curve.append(brmss)
# ofbest_curve.append(br2s)
# ofbest_curve.append(boverfit)
# ofbest_curve.pop(0)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(ofbest_curve)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s")
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# r2best_curve.append(beqs)
# r2best_curve.append(brmss)
# r2best_curve.append(br2s)
# r2best_curve.append(boverfit)
# r2best_curve.pop(0)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(r2best_curve)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3)))
# stats.append(list(measure_stats))
2019-03-22 16:54:40 +00:00
nishant = [ ]
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
for i in range ( len ( scores ) ) :
2019-04-03 18:34:31 +00:00
# print(scores)
ofbest_curve = [ None ]
r2best_curve = [ None ]
line = scores [ i ]
if len ( line ) < 4 :
nishant . append ( ' no_data ' )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
continue
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(line)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(line)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
x = list ( range ( len ( line ) ) )
eqs , rmss , r2s , overfit = analysis . optimize_regression ( x , line , 10 , 1 )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
beqs , brmss , br2s , boverfit = analysis . select_best_regression (
eqs , rmss , r2s , overfit , " min_overfit " )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
#print(eqs, rmss, r2s, overfit)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
ofbest_curve . append ( beqs )
ofbest_curve . append ( brmss )
ofbest_curve . append ( br2s )
ofbest_curve . append ( boverfit )
ofbest_curve . pop ( 0 )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(ofbest_curve)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
beqs , brmss , br2s , boverfit = analysis . select_best_regression (
eqs , rmss , r2s , overfit , " max_r2s " )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
r2best_curve . append ( beqs )
r2best_curve . append ( brmss )
r2best_curve . append ( br2s )
r2best_curve . append ( boverfit )
r2best_curve . pop ( 0 )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
# print(r2best_curve)
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
z = len ( scores [ 0 ] ) + 1
nis_num = [ ]
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
nis_num . append ( eval ( str ( ofbest_curve [ 0 ] ) ) )
nis_num . append ( eval ( str ( r2best_curve [ 0 ] ) ) )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
nis_num . append ( ( eval ( ofbest_curve [ 0 ] ) + eval ( r2best_curve [ 0 ] ) ) / 2 )
2019-03-22 16:54:40 +00:00
2019-04-03 18:34:31 +00:00
nishant . append ( teams [ i ] + nis_num )
2019-03-22 16:54:40 +00:00
json_out = { }
score_out = { }
for i in range ( len ( teams ) ) :
2019-04-03 18:34:31 +00:00
score_out [ str ( teams [ i ] [ 0 ] ) ] = ( nishant [ i ] )
2019-03-22 16:54:40 +00:00
location = db . collection ( u ' stats ' ) . document ( u ' stats-noNN ' )
2019-04-03 18:34:31 +00:00
# for i in range(len(teams)):
#general_general_stats = location.collection(teams[i][0])
# for j in range(len(files)):
# json_out[str(teams[i][0])] = (stats[j][i])
# name = os.path.basename(files[j])
# general_general_stats.document(name).set({'stats':json_out.get(teams[i][0])})
2019-03-22 16:54:40 +00:00
for i in range ( len ( teams ) ) :
2019-04-03 18:34:31 +00:00
nnum = location . collection ( teams [ i ] [ 0 ] ) . document (
u ' nishant_number ' ) . set ( { ' nishant ' : score_out . get ( teams [ i ] [ 0 ] ) } )
2019-03-22 16:54:40 +00:00
def pulldata ( ) :
teams = analysis . load_csv ( ' data/teams.csv ' )
scores = [ ]
for i in range ( len ( teams ) ) :
team_scores = [ ]
2019-04-03 18:34:31 +00:00
# print(teams[i][0])
request_data_object = tba . req_team_matches (
teams [ i ] [ 0 ] , 2019 , " UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5 " )
2019-03-22 16:54:40 +00:00
json_data = request_data_object . json ( )
for match in range ( len ( json_data ) - 1 , - 1 , - 1 ) :
if json_data [ match ] . get ( ' winning_alliance ' ) == " " :
2019-04-03 18:34:31 +00:00
# print(json_data[match])
2019-03-22 16:54:40 +00:00
json_data . remove ( json_data [ match ] )
2019-04-03 18:34:31 +00:00
json_data = sorted ( json_data , key = lambda k : k . get (
' actual_time ' , 0 ) , reverse = False )
2019-03-22 16:54:40 +00:00
for j in range ( len ( json_data ) ) :
if " frc " + teams [ i ] [ 0 ] in json_data [ j ] . get ( ' alliances ' ) . get ( ' blue ' ) . get ( ' team_keys ' ) :
2019-04-03 18:34:31 +00:00
team_scores . append ( json_data [ j ] . get (
' alliances ' ) . get ( ' blue ' ) . get ( ' score ' ) )
2019-03-22 16:54:40 +00:00
elif " frc " + teams [ i ] [ 0 ] in json_data [ j ] . get ( ' alliances ' ) . get ( ' red ' ) . get ( ' team_keys ' ) :
2019-04-03 18:34:31 +00:00
team_scores . append ( json_data [ j ] . get (
' alliances ' ) . get ( ' red ' ) . get ( ' score ' ) )
2019-03-22 16:54:40 +00:00
scores . append ( team_scores )
2019-04-03 18:34:31 +00:00
with open ( " data/scores.csv " , " w+ " , newline = ' ' ) as file :
writer = csv . writer ( file , delimiter = ' , ' )
2019-03-22 16:54:40 +00:00
writer . writerows ( scores )
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
def service ( ) :
while True :
pulldata ( )
start = time . time ( )
print ( " [OK] starting calculations " )
fucked = False
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
for i in range ( 0 , 5 ) :
try :
titanservice ( )
break
except :
if ( i != 4 ) :
2019-04-03 18:34:31 +00:00
print ( " [WARNING] failed, trying " +
str ( 5 - i - 1 ) + " more times " )
2019-03-22 16:54:40 +00:00
else :
print ( " [ERROR] failed to compute data, skipping " )
fucked = True
end = time . time ( )
if ( fucked == True ) :
break
else :
2019-04-03 18:34:31 +00:00
2019-03-22 16:54:40 +00:00
print ( " [OK] finished calculations " )
print ( " [OK] waiting: " + str ( 300 - ( end - start ) ) + " seconds " + " \n " )
2019-04-03 18:34:31 +00:00
time . sleep ( 300 - ( end - start ) ) # executes once every 5 minutes
2019-03-22 16:54:40 +00:00
warnings . simplefilter ( " ignore " )
2019-04-03 18:34:31 +00:00
# Use a service account
2019-03-22 16:54:40 +00:00
try :
cred = credentials . Certificate ( ' keys/firebasekey.json ' )
except :
cred = credentials . Certificate ( ' keys/keytemp.json ' )
firebase_admin . initialize_app ( cred )
db = firestore . client ( )
2019-04-03 18:34:31 +00:00
service ( ) # finally we write something that isn't a function definition
# titanservice()