From e4a179925a858534207e494180cfd3f2521e1be9 Mon Sep 17 00:00:00 2001 From: ltcptgeneral <35508619+ltcptgeneral@users.noreply.github.com> Date: Thu, 21 Mar 2019 17:50:52 -0500 Subject: [PATCH] Revert "beautified" This reverts commit 0d8780b3c1747c89184d0ab407410991ef5e68e4. --- .../Untitled-checkpoint.ipynb | 88 ------ .../superscript-checkpoint.py | 265 ------------------ .../tbarequest-checkpoint.py | 104 ------- .../.ipynb_checkpoints/test-checkpoint.py | 97 ------- .../titanlearn-checkpoint.py | 206 -------------- .../visualization-checkpoint.py | 130 --------- data analysis/Untitled.ipynb | 88 ------ .../__pycache__/tbarequest.cpython-36.pyc | Bin 2771 -> 0 bytes .../__pycache__/visualization.cpython-36.pyc | Bin 3911 -> 0 bytes data analysis/superscript.py | 259 ++++++++--------- 10 files changed, 120 insertions(+), 1117 deletions(-) delete mode 100644 data analysis/.ipynb_checkpoints/Untitled-checkpoint.ipynb delete mode 100644 data analysis/.ipynb_checkpoints/superscript-checkpoint.py delete mode 100644 data analysis/.ipynb_checkpoints/tbarequest-checkpoint.py delete mode 100644 data analysis/.ipynb_checkpoints/test-checkpoint.py delete mode 100644 data analysis/.ipynb_checkpoints/titanlearn-checkpoint.py delete mode 100644 data analysis/.ipynb_checkpoints/visualization-checkpoint.py delete mode 100644 data analysis/Untitled.ipynb delete mode 100644 data analysis/__pycache__/tbarequest.cpython-36.pyc delete mode 100644 data analysis/__pycache__/visualization.cpython-36.pyc diff --git a/data analysis/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/data analysis/.ipynb_checkpoints/Untitled-checkpoint.ipynb deleted file mode 100644 index 1d6d7372..00000000 --- a/data analysis/.ipynb_checkpoints/Untitled-checkpoint.ipynb +++ /dev/null @@ -1,88 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import tbarequest" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc2358', 'frc7417']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2040', 'frc2481', 'frc81']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm70', 'match_number': 70, 'post_result_time': None, 'predicted_time': 1553357160, 'score_breakdown': None, 'set_number': 1, 'time': 1553357160, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc4156', 'frc4646']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4241', 'frc1781', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm61', 'match_number': 61, 'post_result_time': None, 'predicted_time': 1553352300, 'score_breakdown': None, 'set_number': 1, 'time': 1553352300, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1736', 'frc2022', 'frc5126']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc5690', 'frc1329', 'frc4241']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm57', 'match_number': 57, 'post_result_time': None, 'predicted_time': 1553350140, 'score_breakdown': None, 'set_number': 1, 'time': 1553350140, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc2358', 'frc2039']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc7848', 'frc4212']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm53', 'match_number': 53, 'post_result_time': None, 'predicted_time': 1553292960, 'score_breakdown': None, 'set_number': 1, 'time': 1553292960, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc7465', 'frc4213', 'frc2704']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1329', 'frc6055', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm41', 'match_number': 41, 'post_result_time': None, 'predicted_time': 1553287200, 'score_breakdown': None, 'set_number': 1, 'time': 1553287200, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc4156', 'frc5442']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc5822', 'frc4143', 'frc7848']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm4', 'match_number': 4, 'post_result_time': None, 'predicted_time': 1553264820, 'score_breakdown': None, 'set_number': 1, 'time': 1553264820, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1094', 'frc4314', 'frc6651']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc2081', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm38', 'match_number': 38, 'post_result_time': None, 'predicted_time': 1553285760, 'score_breakdown': None, 'set_number': 1, 'time': 1553285760, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2013', 'frc2081', 'frc6055']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc81', 'frc2022', 'frc3695']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm31', 'match_number': 31, 'post_result_time': None, 'predicted_time': 1553282400, 'score_breakdown': None, 'set_number': 1, 'time': 1553282400, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4655', 'frc5822', 'frc2022']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4256', 'frc323', 'frc2709']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm22', 'match_number': 22, 'post_result_time': None, 'predicted_time': 1553278080, 'score_breakdown': None, 'set_number': 1, 'time': 1553278080, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1756', 'frc1329', 'frc1288']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4096', 'frc2022', 'frc323']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm14', 'match_number': 14, 'post_result_time': None, 'predicted_time': 1553269860, 'score_breakdown': None, 'set_number': 1, 'time': 1553269860, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2039', 'frc2022', 'frc1288']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2013', 'frc4096', 'frc1781']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm10', 'match_number': 10, 'post_result_time': None, 'predicted_time': 1553267940, 'score_breakdown': None, 'set_number': 1, 'time': 1553267940, 'videos': [], 'winning_alliance': ''}\n" - ] - } - ], - "source": [ - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tbarequest.req_team_matches('frc16', '2019','UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/data analysis/.ipynb_checkpoints/superscript-checkpoint.py b/data analysis/.ipynb_checkpoints/superscript-checkpoint.py deleted file mode 100644 index e29e199e..00000000 --- a/data analysis/.ipynb_checkpoints/superscript-checkpoint.py +++ /dev/null @@ -1,265 +0,0 @@ -#Titan Robotics Team 2022: Super Script -#Written by Arthur Lu & Jacob Levine -#Notes: -#setup: - -__version__ = "1.0.6.000" - -__changelog__ = """changelog: -1.0.6.000: - - added pulldata function - - service now pulls in, computes data, and outputs data as planned -1.0.5.003: - - hotfix: actually pushes data correctly now -1.0.5.002: - - more information given - - performance improvements -1.0.5.001: - - grammar -1.0.5.000: - - service now iterates forever - - ready for production other than pulling json data -1.0.4.001: - - grammar fixes -1.0.4.000: - - actually pushes to firebase -1.0.3.001: - - processes data more efficiently -1.0.3.000: - - actually processes data -1.0.2.000: - - added data reading from folder - - nearly crashed computer reading from 20 GiB of data -1.0.1.000: - - added data reading from file - - added superstructure to code -1.0.0.000: - - added import statements (revolutionary) -""" - -__author__ = ( - "Arthur Lu , " - "Jacob Levine ," - ) - -import firebase_admin -from firebase_admin import credentials -from firebase_admin import firestore -import analysis -#import titanlearn -import visualization -import os -import sys -import warnings -import glob -import numpy as np -import time -import tbarequest as tba -import csv - -def titanservice(): - - print("[OK] loading data") - - start = time.time() - - source_dir = 'data' - file_list = glob.glob(source_dir + '/*.csv') #supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy - data = [] - files = [fn for fn in glob.glob('data/*.csv') - if not (os.path.basename(fn).startswith('teams'))] #scores will be handled sperately - - for i in files: - data.append(analysis.load_csv(i)) - - stats = [] - measure_stats = [] - teams = analysis.load_csv("data/teams.csv") - scores = analysis.load_csv("data/scores.csv") - - end = time.time() - - print("[OK] loaded data in " + str(end - start) + " seconds") - - #assumes that team number is in the first column, and that the order of teams is the same across all files - #unhelpful comment - for measure in data: #unpacks 3d array into 2ds - - measure_stats = [] - - for i in range(len(measure)): #unpacks into specific teams - - ofbest_curve = [None] - r2best_curve = [None] - - line = measure[i] - - #print(line) - - x = list(range(len(line))) - eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) - - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") - - #print(eqs, rmss, r2s, overfit) - - ofbest_curve.append(beqs) - ofbest_curve.append(brmss) - ofbest_curve.append(br2s) - ofbest_curve.append(boverfit) - ofbest_curve.pop(0) - - #print(ofbest_curve) - - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") - - r2best_curve.append(beqs) - r2best_curve.append(brmss) - r2best_curve.append(br2s) - r2best_curve.append(boverfit) - r2best_curve.pop(0) - - #print(r2best_curve) - - - measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3)) + ofbest_curve + r2best_curve) - - stats.append(list(measure_stats)) - nishant = [] - - for i in range(len(scores)): - - ofbest_curve = [None] - r2best_curve = [None] - - line = measure[i] - - #print(line) - - x = list(range(len(line))) - eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) - - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") - - #print(eqs, rmss, r2s, overfit) - - ofbest_curve.append(beqs) - ofbest_curve.append(brmss) - ofbest_curve.append(br2s) - ofbest_curve.append(boverfit) - ofbest_curve.pop(0) - - #print(ofbest_curve) - - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") - - r2best_curve.append(beqs) - r2best_curve.append(brmss) - r2best_curve.append(br2s) - r2best_curve.append(boverfit) - r2best_curve.pop(0) - - #print(r2best_curve) - - z = len(scores[0]) + 1 - nis_num = [] - - nis_num.append(eval(str(ofbest_curve[0]))) - nis_num.append(eval(str(r2best_curve[0]))) - - nis_num.append((eval(ofbest_curve[0]) + eval(r2best_curve[0])) / 2) - - nishant.append(teams[i] + nis_num) - - json_out = {} - score_out = {} - - for i in range(len(teams)): - score_out[str(teams[i][0])] = (nishant[i]) - - location = db.collection(u'stats').document(u'stats-noNN') - for i in range(len(teams)): - general_general_stats = location.collection(teams[i][0]) - - for j in range(len(files)): - json_out[str(teams[i][0])] = (stats[j][i]) - name = os.path.basename(files[j]) - general_general_stats.document(name).set({'stats':json_out.get(teams[i][0])}) - - for i in range(len(teams)): - nnum = location.collection(teams[i][0]).document(u'nishant_number').set({'nishant':score_out.get(teams[i][0])}) - -def pulldata(): - teams = analysis.load_csv('data/teams.csv') - scores = [] - for i in range(len(teams)): - team_scores = [] - #print(teams[i][0]) - request_data_object = tba.req_team_matches(teams[i][0], 2019, "UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5") - json_data = request_data_object.json() - for match in range(len(json_data) - 1, -1, -1): - if json_data[match].get('winning_alliance') == "": - print(json_data[match]) - json_data.remove(json_data[match]) - - - json_data = sorted(json_data, key=lambda k: k.get('actual_time', 0), reverse=False) - for j in range(len(json_data)): - if "frc" + teams[i][0] in json_data[j].get('alliances').get('blue').get('team_keys'): - team_scores.append(json_data[j].get('alliances').get('blue').get('score')) - elif "frc" + teams[i][0] in json_data[j].get('alliances').get('red').get('team_keys'): - team_scores.append(json_data[j].get('alliances').get('red').get('score')) - scores.append(team_scores) - - with open("data/scores.csv", "w+", newline = '') as file: - writer = csv.writer(file, delimiter = ',') - writer.writerows(scores) - -def service(): - - while True: - - pulldata() - - start = time.time() - - print("[OK] starting calculations") - - fucked = False - - for i in range(0, 5): - try: - titanservice() - break - except: - if (i != 4): - print("[WARNING] failed, trying " + str(5 - i - 1) + " more times") - else: - print("[ERROR] failed to compute data, skipping") - fucked = True - - end = time.time() - if (fucked == True): - - break - - else: - - print("[OK] finished calculations") - - print("[OK] waiting: " + str(300 - (end - start)) + " seconds" + "\n") - - time.sleep(300 - (end - start)) #executes once every 5 minutes - -warnings.simplefilter("ignore") -#Use a service account -try: - cred = credentials.Certificate('keys/firebasekey.json') -except: - cred = credentials.Certificate('keys/keytemp.json') -firebase_admin.initialize_app(cred) - -db = firestore.client() - -service() #finally we write something that isn't a function definition -#titanservice() diff --git a/data analysis/.ipynb_checkpoints/tbarequest-checkpoint.py b/data analysis/.ipynb_checkpoints/tbarequest-checkpoint.py deleted file mode 100644 index 15d02925..00000000 --- a/data analysis/.ipynb_checkpoints/tbarequest-checkpoint.py +++ /dev/null @@ -1,104 +0,0 @@ -#Titan Robotics Team 2022: TBA Requests Module -#Written by Arthur Lu & Jacob Levine -#Notes: -# this should be imported as a python module using 'import tbarequest' -# this should be included in the local directory or environment variable -# this module has not been optimized for multhreaded computing -#Number of easter eggs: none yet -#setup: - -__version__ = "1.0.0.001" - -#changelog should be viewed using print(tbarequest.__changelog__) -__changelog__ = """changelog: -1.0.1.000: - - fixed a simple error -1.0.0.xxx: - -added common requests and JSON processing""" -__author__ = ( - "Arthur Lu , " - "Jacob Levine ," - ) -__all__ = [ - 'process_json_ret', - 'req_all_events', - 'req_event_matches', - 'req_event_insights', - 'req_event_elim_alli' - 'req_team_events', - 'req_team_matches' - ] -#imports -import requests - -#as this code is public, i'm not putting 2022's API key in here. just add it as a var in your script and go -#requests a list of events that a team went to -def req_team_events(team,year,apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/team/frc'+str(team)+'/events/'+str(year),headers=headers) - return r - -#gets every match that a team played in -def req_team_matches(team,year,apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/team/frc'+str(team)+'/matches/'+str(year), headers=headers) - return r - -#gets all events in a certain year -def req_all_events(year, apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/events/'+str(year), headers=headers) - return r - -#gets all matches for an event -def req_event_matches(event_key,apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/matches', headers=headers) - return r - -#gets elimination alliances from a event -def req_event_elim_alli(event_key, apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/alliances', headers=headers) - return r - -#requests oprs and dprs -def req_event_opr(event_key, apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3//event/'+str(event_key)+'/oprs', headers=headers) - return r - - - -#gets TBA's insights from an event -def req_event_insights(event_key, apikey): - headers={'X-TBA-Auth-Key':apikey} - r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/insights', headers=headers) - return r - -#processes the json return. right now, it's slow and not great. will throw an exception if it doesn't get a good status code -def process_json_ret(req): - if req.status_code == 200: - keys=[] - for i in req.json(): - for j in i.keys(): - read=False - for k in keys: - if k==j: - read=True - break - if not read: - keys.append(j) - out=[] - out.append(keys) - for i in req.json(): - buf=[] - for j in keys: - try: - buf.append(i[j]) - except: - buf.append("") - out.append(buf) - return out - else: - raise ValueError('Status code is: '+req.status_code+', not 200') diff --git a/data analysis/.ipynb_checkpoints/test-checkpoint.py b/data analysis/.ipynb_checkpoints/test-checkpoint.py deleted file mode 100644 index 79e7ee46..00000000 --- a/data analysis/.ipynb_checkpoints/test-checkpoint.py +++ /dev/null @@ -1,97 +0,0 @@ -import firebase_admin -from firebase_admin import credentials -from firebase_admin import firestore -import csv -import numpy as np - -# Use a service account -cred = credentials.Certificate('keys/keytemp.json') -#add your own key as this is public. email me for details -firebase_admin.initialize_app(cred) - -db = firestore.client() - -teams=db.collection('data').document('team-2022').collection("Central 2019").get() -full=[] -tms=[] -for team in teams: - - tms.append(team.id) - reports=db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").get() - - for report in reports: - data=[] - data.append(db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").document(report.id).get().to_dict()) - full.append(data) - -quant_keys = [] - -list_teams = ["2022", "16", "2451"] - -out = [] -var = {} - -for i in range(len(full)): - for j in range(len(full[i])): - for key in list(full[i][j].keys()): - - if "Quantitative" in key: - - quant_keys.append(key) - - if full[i][j].get(key).get('teamDBRef')[5:] in list_teams: - - var = {} - measured_vars = [] - - for k in range(len(list(full[i][j].get(key).keys()))): - - individual_keys = list(full[i][j].get(key).keys()) - - var[individual_keys[k]] = full[i][j].get(key).get(individual_keys[k]) - - out.append(var) - -sorted_out = [] - -for i in out: - - j_list = [] - - key_list = [] - - sorted_keys = sorted(i.keys()) - - for j in sorted_keys: - - key_list.append(i[j]) - - j_list.append(j) - - sorted_out.append(key_list) - -var_index = 0 -team_index = 0 - -big_out = [] - -for j in range(len(i)): - big_out.append([]) - for t in range(len(list_teams)): - big_out[j].append([]) - -for i in sorted_out: - - team_index = list_teams.index(sorted_out[sorted_out.index(i)][j_list.index('teamDBRef')][5:]) - - for j in range(len(i)): - - big_out[j][team_index].append(i[j]) - -for i in range(len(big_out)): - - with open('data/' + j_list[i] + '.csv', "w+", newline = '') as file: - - writer = csv.writer(file, delimiter = ',') - writer.writerows(big_out[i]) - diff --git a/data analysis/.ipynb_checkpoints/titanlearn-checkpoint.py b/data analysis/.ipynb_checkpoints/titanlearn-checkpoint.py deleted file mode 100644 index a3730003..00000000 --- a/data analysis/.ipynb_checkpoints/titanlearn-checkpoint.py +++ /dev/null @@ -1,206 +0,0 @@ -#Titan Robotics Team 2022: ML Module -#Written by Arthur Lu & Jacob Levine -#Notes: -# this should be imported as a python module using 'import titanlearn' -# this should be included in the local directory or environment variable -# this module has not been optimized for multhreaded computing -# this module learns from its mistakes far faster than 2022's captains -#setup: - -__version__ = "1.0.0.001" - -#changelog should be viewed using print(analysis.__changelog__) -__changelog__ = """changelog: -1.0.0.xxx: - -added generation of ANNS, basic SGD training""" -__author__ = ( - "Arthur Lu , " - "Jacob Levine ," - ) -__all__ = [ - 'linear_nn', - 'train_sgd_minibatch', - 'train_sgd_simple' - ] -#imports -import torch -import warnings -from collections import OrderedDict -from sklearn import metrics, datasets -import numpy as np -import matplotlib.pyplot as plt -import math -import time - -#enable CUDA if possible -device = torch.device("cpu") - -#linear_nn: creates a fully connected network given params -def linear_nn(in_dim, hidden_dim, out_dim, num_hidden, act_fn="tanh", end="none"): - if act_fn.lower()=="tanh": - k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))]) - for i in range(num_hidden): - k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "tanh"+str(i+1):torch.nn.Tanh()}) - - elif act_fn.lower()=="sigmoid": - k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))]) - for i in range(num_hidden): - k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "sig"+str(i+1):torch.nn.Sigmoid()}) - - elif act_fn.lower()=="relu": - k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))]) - for i in range(num_hidden): - k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "relu"+str(i+1):torch.nn.ReLU()}) - - elif act_fn.lower()=="leaky relu": - k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))]) - for i in range(num_hidden): - k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "lre"+str(i+1):torch.nn.LeakyReLU()}) - else: - warnings.warn("Did not specify a valid inner activation function. Returning nothing.") - return None - - if end.lower()=="softmax": - k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "softmax": torch.nn.Softmax()}) - elif end.lower()=="none": - k.update({"out": torch.nn.Linear(hidden_dim,out_dim)}) - elif end.lower()=="sigmoid": - k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "sigmoid": torch.nn.Sigmoid()}) - else: - warnings.warn("Did not specify a valid final activation function. Returning nothing.") - return None - - return torch.nn.Sequential(k) - -#train_sgd_simple: trains network using SGD -def train_sgd_simple(net, evalType, data, ground, dev=None, devg=None, iters=1000, learnrate=1e-4, testevery=1, graphsaveloc=None, modelsaveloc=None, loss="mse"): - model=net.to(device) - data=data.to(device) - ground=ground.to(device) - if dev != None: - dev=dev.to(device) - losses=[] - dev_losses=[] - if loss.lower()=="mse": - loss_fn = torch.nn.MSELoss() - elif loss.lower()=="cross entropy": - loss_fn = torch.nn.CrossEntropyLoss() - elif loss.lower()=="nll": - loss_fn = torch.nn.NLLLoss() - elif loss.lower()=="poisson nll": - loss_fn = torch.nn.PoissonNLLLoss() - else: - warnings.warn("Did not specify a valid loss function. Returning nothing.") - return None - optimizer=torch.optim.SGD(model.parameters(), lr=learnrate) - for i in range(iters): - if i%testevery==0: - with torch.no_grad(): - output = model(data) - if evalType == "ap": - ap = metrics.average_precision_score(ground.cpu().numpy(), output.cpu().numpy()) - if evalType == "regression": - ap = metrics.explained_variance_score(ground.cpu().numpy(), output.cpu().numpy()) - losses.append(ap) - print(str(i)+": "+str(ap)) - plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="train AP") - if dev != None: - output = model(dev) - print(evalType) - if evalType == "ap": - - ap = metrics.average_precision_score(devg.numpy(), output.numpy()) - dev_losses.append(ap) - plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP") - elif evalType == "regression": - ev = metrics.explained_variance_score(devg.numpy(), output.numpy()) - dev_losses.append(ev) - plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev EV") - - - if graphsaveloc != None: - plt.savefig(graphsaveloc+".pdf") - with torch.enable_grad(): - optimizer.zero_grad() - output = model(data) - loss = loss_fn(output, ground) - print(loss.item()) - loss.backward() - optimizer.step() - if modelsaveloc != None: - torch.save(model, modelsaveloc) - plt.show() - return model - -#train_sgd_minibatch: same as above, but with minibatches -def train_sgd_minibatch(net, data, ground, dev=None, devg=None, epoch=100, batchsize=20, learnrate=1e-4, testevery=20, graphsaveloc=None, modelsaveloc=None, loss="mse"): - model=net.to(device) - data=data.to(device) - ground=ground.to(device) - if dev != None: - dev=dev.to(device) - losses=[] - dev_losses=[] - if loss.lower()=="mse": - loss_fn = torch.nn.MSELoss() - elif loss.lower()=="cross entropy": - loss_fn = torch.nn.CrossEntropyLoss() - elif loss.lower()=="nll": - loss_fn = torch.nn.NLLLoss() - elif loss.lower()=="poisson nll": - loss_fn = torch.nn.PoissonNLLLoss() - else: - warnings.warn("Did not specify a valid loss function. Returning nothing.") - return None - optimizer=torch.optim.LBFGS(model.parameters(), lr=learnrate) - itercount=0 - for i in range(epoch): - print("EPOCH "+str(i)+" OF "+str(epoch-1)) - batches=math.ceil(data.size()[0].item()/batchsize) - for j in range(batches): - batchdata=[] - batchground=[] - for k in range(j*batchsize, min((j+1)*batchsize, data.size()[0].item()),1): - batchdata.append(data[k]) - batchground.append(ground[k]) - batchdata=torch.stack(batchdata) - batchground=torch.stack(batchground) - if itercount%testevery==0: - with torch.no_grad(): - output = model(data) - ap = metrics.average_precision_score(ground.numpy(), output.numpy()) - losses.append(ap) - print(str(i)+": "+str(ap)) - plt.plot(np.array(range(0,i+1,testevery)),np.array(losses)) - if dev != None: - output = model(dev) - ap = metrics.average_precision_score(devg.numpy(), output.numpy()) - dev_losses.append(ap) - plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP") - if graphsaveloc != None: - plt.savefig(graphsaveloc+".pdf") - with torch.enable_grad(): - optimizer.zero_grad() - output = model(batchdata) - loss = loss_fn(output, ground) - loss.backward() - optimizer.step() - itercount +=1 - if modelsaveloc != None: - torch.save(model, modelsaveloc) - plt.show() - return model - -def retyuoipufdyu(): - - data = torch.tensor(datasets.fetch_california_housing()['data']).to(torch.float) - ground = datasets.fetch_california_housing()['target'] - ground = torch.tensor(ground).to(torch.float) - model = linear_nn(8, 100, 1, 20, act_fn = "relu") - print(model) - return train_sgd_simple(model,"regression", data, ground, learnrate=1e-4, iters=1000) - -start = time.time() -retyuoipufdyu() -end = time.time() -print(end-start) diff --git a/data analysis/.ipynb_checkpoints/visualization-checkpoint.py b/data analysis/.ipynb_checkpoints/visualization-checkpoint.py deleted file mode 100644 index 21e86beb..00000000 --- a/data analysis/.ipynb_checkpoints/visualization-checkpoint.py +++ /dev/null @@ -1,130 +0,0 @@ -#Titan Robotics Team 2022: Visualization Module -#Written by Arthur Lu & Jacob Levine -#Notes: -# this should be imported as a python module using 'import visualization' -# this should be included in the local directory or environment variable -# this module has not been optimized for multhreaded computing -#Number of easter eggs: Jake is Jewish and does not observe easter. -#setup: - -__version__ = "1.0.0.001" - -#changelog should be viewed using print(analysis.__changelog__) -__changelog__ = """changelog: -1.0.0.xxx: - -added basic plotting, clustering, and regression comparisons""" -__author__ = ( - "Arthur Lu , " - "Jacob Levine ," - ) -__all__ = [ - 'affinity_prop', - 'bar_graph', - 'dbscan', - 'kmeans', - 'line_plot', - 'pca_comp', - 'regression_comp', - 'scatter_plot', - 'spectral', - 'vis_2d' - ] -#imports -import matplotlib.pyplot as plt -import numpy as np -from sklearn.decomposition import PCA, KernelPCA, IncrementalPCA -from sklearn.preprocessing import StandardScaler -from sklearn.cluster import AffinityPropagation, DBSCAN, KMeans, SpectralClustering - -#bar of x,y -def bar_graph(x,y): - x=np.asarray(x) - y=np.asarray(y) - plt.bar(x,y) - plt.show() - -#scatter of x,y -def scatter_plot(x,y): - x=np.asarray(x) - y=np.asarray(y) - plt.scatter(x,y) - plt.show() - -#line of x,y -def line_plot(x,y): - x=np.asarray(x) - y=np.asarray(y) - plt.scatter(x,y) - plt.show() - -#plot data + regression fit -def regression_comp(x,y,reg): - x=np.asarray(x) - y=np.asarray(y) - regx=np.arange(x.min(),x.max(),(x.max()-x.min())/1000) - regy=[] - for i in regx: - regy.append(eval(reg[0].replace("z",str(i)))) - regy=np.asarray(regy) - plt.scatter(x,y) - plt.plot(regx,regy,color="orange",linewidth=3) - plt.text(.85*max([x.max(),regx.max()]),.95*max([y.max(),regy.max()]), - u"R\u00b2="+str(round(reg[2],5)), - horizontalalignment='center', verticalalignment='center') - plt.text(.85*max([x.max(),regx.max()]),.85*max([y.max(),regy.max()]), - "MSE="+str(round(reg[1],5)), - horizontalalignment='center', verticalalignment='center') - plt.show() - -#PCA to compress down to 2d -def pca_comp(big_multidim): - pca=PCA(n_components=2) - td_norm=StandardScaler().fit_transform(big_multidim) - td_pca=pca.fit_transform(td_norm) - return td_pca - -#one-stop visualization of multidim datasets -def vis_2d(big_multidim): - td_pca=pca_comp(big_multidim) - plt.scatter(td_pca[:,0], td_pca[:,1]) - -def cluster_vis(data, cluster_assign): - pca=PCA(n_components=2) - td_norm=StandardScaler().fit_transform(data) - td_pca=pca.fit_transform(td_norm) - colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a', - '#f781bf', '#a65628', '#984ea3', - '#999999', '#e41a1c', '#dede00']), - int(max(clu) + 1)))) - colors = np.append(colors, ["#000000"]) - plt.figure(figsize=(8, 8)) - plt.scatter(td_norm[:, 0], td_norm[:, 1], s=10, color=colors[cluster_assign]) - plt.show() - -#affinity prop- slow, but ok if you don't have any idea how many you want -def affinity_prop(data, damping=.77, preference=-70): - td_norm=StandardScaler().fit_transform(data) - db = AffinityPropagation(damping=damping,preference=preference).fit(td) - y=db.predict(td_norm) - return y - -#DBSCAN- slightly faster but can label your dataset as all outliers -def dbscan(data, eps=.3): - td_norm=StandardScaler().fit_transform(data) - db = DBSCAN(eps=eps).fit(td) - y=db.labels_.astype(np.int) - return y - -#K-means clustering- the classic -def kmeans(data, num_clusters): - td_norm=StandardScaler().fit_transform(data) - db = KMeans(n_clusters=num_clusters).fit(td) - y=db.labels_.astype(np.int) - return y - -#Spectral Clustering- Seems to work really well -def spectral(data, num_clusters): - td_norm=StandardScaler().fit_transform(data) - db = SpectralClustering(n_clusters=num_clusters).fit(td) - y=db.labels_.astype(np.int) - return y diff --git a/data analysis/Untitled.ipynb b/data analysis/Untitled.ipynb deleted file mode 100644 index 1d6d7372..00000000 --- a/data analysis/Untitled.ipynb +++ /dev/null @@ -1,88 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "import tbarequest" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc2358', 'frc7417']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2040', 'frc2481', 'frc81']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm70', 'match_number': 70, 'post_result_time': None, 'predicted_time': 1553357160, 'score_breakdown': None, 'set_number': 1, 'time': 1553357160, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc4156', 'frc4646']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4241', 'frc1781', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm61', 'match_number': 61, 'post_result_time': None, 'predicted_time': 1553352300, 'score_breakdown': None, 'set_number': 1, 'time': 1553352300, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1736', 'frc2022', 'frc5126']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc5690', 'frc1329', 'frc4241']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm57', 'match_number': 57, 'post_result_time': None, 'predicted_time': 1553350140, 'score_breakdown': None, 'set_number': 1, 'time': 1553350140, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc2358', 'frc2039']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc7848', 'frc4212']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm53', 'match_number': 53, 'post_result_time': None, 'predicted_time': 1553292960, 'score_breakdown': None, 'set_number': 1, 'time': 1553292960, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc7465', 'frc4213', 'frc2704']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1329', 'frc6055', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm41', 'match_number': 41, 'post_result_time': None, 'predicted_time': 1553287200, 'score_breakdown': None, 'set_number': 1, 'time': 1553287200, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2022', 'frc4156', 'frc5442']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc5822', 'frc4143', 'frc7848']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm4', 'match_number': 4, 'post_result_time': None, 'predicted_time': 1553264820, 'score_breakdown': None, 'set_number': 1, 'time': 1553264820, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1094', 'frc4314', 'frc6651']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc6237', 'frc2081', 'frc2022']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm38', 'match_number': 38, 'post_result_time': None, 'predicted_time': 1553285760, 'score_breakdown': None, 'set_number': 1, 'time': 1553285760, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2013', 'frc2081', 'frc6055']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc81', 'frc2022', 'frc3695']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm31', 'match_number': 31, 'post_result_time': None, 'predicted_time': 1553282400, 'score_breakdown': None, 'set_number': 1, 'time': 1553282400, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4655', 'frc5822', 'frc2022']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4256', 'frc323', 'frc2709']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm22', 'match_number': 22, 'post_result_time': None, 'predicted_time': 1553278080, 'score_breakdown': None, 'set_number': 1, 'time': 1553278080, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc1756', 'frc1329', 'frc1288']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc4096', 'frc2022', 'frc323']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm14', 'match_number': 14, 'post_result_time': None, 'predicted_time': 1553269860, 'score_breakdown': None, 'set_number': 1, 'time': 1553269860, 'videos': [], 'winning_alliance': ''}\n", - "{'actual_time': None, 'alliances': {'blue': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2039', 'frc2022', 'frc1288']}, 'red': {'dq_team_keys': [], 'score': -1, 'surrogate_team_keys': [], 'team_keys': ['frc2013', 'frc4096', 'frc1781']}}, 'comp_level': 'qm', 'event_key': '2019ilpe', 'key': '2019ilpe_qm10', 'match_number': 10, 'post_result_time': None, 'predicted_time': 1553267940, 'score_breakdown': None, 'set_number': 1, 'time': 1553267940, 'videos': [], 'winning_alliance': ''}\n" - ] - } - ], - "source": [ - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "tbarequest.req_team_matches('frc16', '2019','UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/data analysis/__pycache__/tbarequest.cpython-36.pyc b/data analysis/__pycache__/tbarequest.cpython-36.pyc deleted file mode 100644 index 1ddebe9089634e5278390f6dc3bb2e1b4b3bdac9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2771 zcmd5;UvJws5GN&CmYq0%oUB>S@M*xF`W5!HJK1rPt|?L!1xkVB-HAH!cXzzw!(1+#H-D@A zkt5_Ea-~^7KY_05FmO^PoR%p!$_6(%<0)?OG`DM3+2k3XZL%`sQ+)boQcm$4w_&#U z3}-N>`7BStZ1Xuj4|9g+`2x&YzQ`?@r}$^wgn620_etT-AoHMD!l(3L(5Tk^c1<+H z+IkkH&`YKDtOIS;ISP7$JHDfWR;MW(A!R6&Ri$FD*Bh_$Ifq5nu+<9NjuaXesCn`ZMUN2m`0F5zwI-kp?(1GGcO0mk2kP z2+$Pi7~J}+F=R0VH>7q4Gp|-(Jl$H|>PGd|Z$*FL)}yGS)?N4P?5r5o#bL86aBF_M zDvDrX*Y5=G=|dN{=^n}Iz;;Kt-2&C=y70M>s$gmxww|~_n>8?!sUlfOX@;L^))&6i z7HB#aeVK-RYbt*yau%q6H=eCmzE_~6l32g89ag(7G;Af3Yo)a{RpEZ*gLD06Uj-mN z^vAy{cKUjDVzJS#zCg=hj1y|pWh&=^`lQU4amTI&`z59*n9_z#$_x%!7z!ylh4eHI zaC}L!OwJ&M78?z7NMSsD3H=J#-8U!H4)TV-wRw#(PKT8U``j`KOC@P0eWE%dU7csG)ozsfrkNQ!)QUz zC)`M80B6xfTwxS)ddl-op#TSP!SQs?^CtH*V6&d*!(AbiK&OF*dyh9NSXf$<3M4UY zOH6FZ(UutXNvufR)|IU$)e8%kwl`WK?>5CI7CeR3iKUsrm_f&nVVO3wt;u8C)_)um BgUJ8@ diff --git a/data analysis/__pycache__/visualization.cpython-36.pyc b/data analysis/__pycache__/visualization.cpython-36.pyc deleted file mode 100644 index 93b7d1d12c691a4809b55d0c9f899dddbb412dcf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3911 zcmcgv&2JmW72nw}k}Hz>^jGX8tQ`bp;HvURY)7!8I8s|Ac2qZNU|_3)80{>nWtK~J zc4$ii)r;JtZoT!`UfN@iJ@yairH2A@?I95g^bhF8{k>V5l9jmWA?+^a&70ZT+4ttX z-*0Ab)M~*q``4SlnX#P;R;Xqq9n?qA_6fbs+}No zL`~FFx9f$z(Mn+%^>kY41!2&e3a5J2u*xj-&7`$nJ**pjHl6OxgfqR_aJDxW&h_TQ zdGyZ-_n8$gh-H zHtuqxro#^~MYI@;;rmw`AM>cc)p!^`NwTKgzlDu_Y z93jxwdehO)qt*Mm@*tL3oRV7qD2rs=i!;Tk-DJAHq2M@|Vk6>dESs*LzdyG15lrPB zuCN-~U;A)l_5Ql{AADlWoZA?Ga&@wTkxgI4MYV&nijrT2P*#_XtOK@RXbc)tcGnqM zBX(dj3%Yw?AzLg&l&v>CZD#{r;yIU+?`mg|D(zs&w3~1DpEhmHb~M}ly(KSV@_((Y zE^mI8$1>kE3%|M6kA~!7zNzH(rR&%8O~Dm!@QkOsd6I8piH1B)_KahTgI!7XewS=5 zLs%@pU1i^*H*x{f$}=R+Lg>;sc=9}2Od&Z!*fm zpj%%tisDU|k3wdf*e|nJ#NO)KBdhBiSdfVPM551 zjlf6BAKAjbV0BBvQRR_$fb2nG%=*RJ<07|Q^fF_Sd!o0Z%BqGh2Nx_cMPEE;i7M{eq4ytWqi=k5w)%Ls=~v8F zpCDmoJ^2#rDyOiXn(b-7FNxlcXsrPPFY2d#SrEq4M5yh~4RiftrPDUoKUan>hJO0z zKi)f{96zGaX+Hus0CYQ?-|ov~uTNY6JajSwO*s1`mLRqhU3=~)8$Y;rgjib5>Em41 zJ~!rQrv+*}p!DDBDqFcU@2`Nu%e z4hT=X5IZ5sUColy;3&bKdnkDWqCoB2%msEY8~o0(CAI*qkP%r=7cB^_pUKa+peW>< zun&om*n!DQWRQ)x*&{@H-x)dJBxhY-!C25`H8TX<&xl}}j;x|vPDAKgJ5eo=aF)0G zvIicmZY7;oZm1>_hm zC2N@gfBwoAtPO-rS~j(aV-2^$4@xHaHUx~P&m9K_OhmYr?~ypgM0A9BjFQv(S*(0a zk2FUO#TfMmD3b-XPb{c2vSFfQzzM)9Kv@CAr;i_<$^ipBDk?Z&qIwR_6#(D&lmi%h zqSiqVTGeh1M~dk~Kp%R2*?&ym#kj%x{olE-3NMYy4US1xJl| zX~`@zT1)0SB3eWSmD))sPxfLHi6dHA+R$?O9iz!*R7Nysj+0BiOTDy>Xs_54-89LS z_LDqKfG{uGjZy$D$&_qjROsMfBj2Hy%#P#Zn?o6&Ab4N9v@3@7@d>EKaq8)0@;1G5 zhXkbo$bvW~Ka~!8>cv~wOMixOIempNNclR$e8{t`j@BZRKg5$SemA%m{Oz}2t{hP- za%eOz${k85lRL#h0T3*X3XVcbgLK&;4hqW9I&A?ahrE#oP}=KQxRT(#0gi7v7|6IC z%Q%bTrf(2gEE*2OmUe(mZ7ZQm7$uTOVaOUD$d5>TOoDvYw%GdGjhNUYK^fz+%I4tV z7X{t^b!Bo?AaIgz#-Bwf%9{9NlRxl=jlBV}biFQ6&@B@xMsDE|(@s3do0StD%wi(> zfW$)xT}t^@oaQaCZLW3)alyzR;!)EpoFWD(Vz^G)*PSfL$^pv6p&5kh+w~&=Qx<2# zm))5onM7&(oO8Y*EDY|IyDA`Uxobh{#^n zA@pPvK!x9$h<=>1j>+6``o#?69FT-VX;RrFfLS}*(99uM5^^N%=*6BytZ0Zs67nQv ztE31^TBQ?YWOgY!D(mvyUSAB;_, " "Jacob Levine ," -) + ) import firebase_admin from firebase_admin import credentials @@ -57,24 +57,22 @@ import time import tbarequest as tba import csv - def titanservice(): - + print("[OK] loading data") start = time.time() - + source_dir = 'data' - # supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy - file_list = glob.glob(source_dir + '/*.csv') + file_list = glob.glob(source_dir + '/*.csv') #supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy data = [] - files = [fn for fn in glob.glob('data/*.csv') - if not (os.path.basename(fn).startswith('scores') or os.path.basename(fn).startswith('teams') or os.path.basename(fn).startswith('match') or os.path.basename(fn).startswith('notes') or os.path.basename(fn).startswith('observationType') or os.path.basename(fn).startswith('teamDBRef'))] # scores will be handled sperately + files = [fn for fn in glob.glob('data/*.csv') + if not (os.path.basename(fn).startswith('scores') or os.path.basename(fn).startswith('teams') or os.path.basename(fn).startswith('match') or os.path.basename(fn).startswith('notes') or os.path.basename(fn).startswith('observationType') or os.path.basename(fn).startswith('teamDBRef'))] #scores will be handled sperately for i in files: - data.append(analysis.load_csv(i)) + data.append(analysis.load_csv(i)) - # print(files) + #print(files) stats = [] measure_stats = [] @@ -85,168 +83,156 @@ def titanservice(): print("[OK] loaded data in " + str(end - start) + " seconds") - # assumes that team number is in the first column, and that the order of teams is the same across all files - # unhelpful comment - for measure in data: # unpacks 3d array into 2ds + #assumes that team number is in the first column, and that the order of teams is the same across all files + #unhelpful comment + for measure in data: #unpacks 3d array into 2ds measure_stats = [] - for i in range(len(measure)): # unpacks into specific teams + for i in range(len(measure)): #unpacks into specific teams - #ofbest_curve = [None] - #r2best_curve = [None] + #ofbest_curve = [None] + #r2best_curve = [None] - line = measure[i] + line = measure[i] - # print(line) + #print(line) - #x = list(range(len(line))) - #eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) + #x = list(range(len(line))) + #eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) - #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") - #print(eqs, rmss, r2s, overfit) + #print(eqs, rmss, r2s, overfit) + + #ofbest_curve.append(beqs) + #ofbest_curve.append(brmss) + #ofbest_curve.append(br2s) + #ofbest_curve.append(boverfit) + #ofbest_curve.pop(0) - # ofbest_curve.append(beqs) - # ofbest_curve.append(brmss) - # ofbest_curve.append(br2s) - # ofbest_curve.append(boverfit) - # ofbest_curve.pop(0) + #print(ofbest_curve) - # print(ofbest_curve) + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") - #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") + #r2best_curve.append(beqs) + #r2best_curve.append(brmss) + #r2best_curve.append(br2s) + #r2best_curve.append(boverfit) + #r2best_curve.pop(0) - # r2best_curve.append(beqs) - # r2best_curve.append(brmss) - # r2best_curve.append(br2s) - # r2best_curve.append(boverfit) - # r2best_curve.pop(0) + #print(r2best_curve) - # print(r2best_curve) - - measure_stats.append(teams[i] + list(analysis.basic_stats( - line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3))) + + measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3))) stats.append(list(measure_stats)) nishant = [] - + for i in range(len(scores)): - # print(scores) + #print(scores) - ofbest_curve = [None] - r2best_curve = [None] + ofbest_curve = [None] + r2best_curve = [None] - line = scores[i] + line = scores[i] - # print(line) + #print(line) - # print(line) + #print(line) - x = list(range(len(line))) - eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) + x = list(range(len(line))) + eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) - beqs, brmss, br2s, boverfit = analysis.select_best_regression( - eqs, rmss, r2s, overfit, "min_overfit") + beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") - #print(eqs, rmss, r2s, overfit) + #print(eqs, rmss, r2s, overfit) + + ofbest_curve.append(beqs) + ofbest_curve.append(brmss) + ofbest_curve.append(br2s) + ofbest_curve.append(boverfit) + ofbest_curve.pop(0) - ofbest_curve.append(beqs) - ofbest_curve.append(brmss) - ofbest_curve.append(br2s) - ofbest_curve.append(boverfit) - ofbest_curve.pop(0) + #print(ofbest_curve) - # print(ofbest_curve) + beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") - beqs, brmss, br2s, boverfit = analysis.select_best_regression( - eqs, rmss, r2s, overfit, "max_r2s") + r2best_curve.append(beqs) + r2best_curve.append(brmss) + r2best_curve.append(br2s) + r2best_curve.append(boverfit) + r2best_curve.pop(0) - r2best_curve.append(beqs) - r2best_curve.append(brmss) - r2best_curve.append(br2s) - r2best_curve.append(boverfit) - r2best_curve.pop(0) + #print(r2best_curve) + + z = len(scores[0]) + 1 + nis_num = [] - # print(r2best_curve) + nis_num.append(eval(str(ofbest_curve[0]))) + nis_num.append(eval(str(r2best_curve[0]))) - z = len(scores[0]) + 1 - nis_num = [] - - nis_num.append(eval(str(ofbest_curve[0]))) - nis_num.append(eval(str(r2best_curve[0]))) - - nis_num.append((eval(ofbest_curve[0]) + eval(r2best_curve[0])) / 2) - - nishant.append(teams[i] + nis_num) + nis_num.append((eval(ofbest_curve[0]) + eval(r2best_curve[0])) / 2) + nishant.append(teams[i] + nis_num) + json_out = {} score_out = {} for i in range(len(teams)): - score_out[str(teams[i][0])] = (nishant[i]) + score_out[str(teams[i][0])] = (nishant[i]) location = db.collection(u'stats').document(u'stats-noNN') for i in range(len(teams)): general_general_stats = location.collection(teams[i][0]) - + for j in range(len(files)): json_out[str(teams[i][0])] = (stats[j][i]) name = os.path.basename(files[j]) - general_general_stats.document(name).set( - {'stats': json_out.get(teams[i][0])}) + general_general_stats.document(name).set({'stats':json_out.get(teams[i][0])}) for i in range(len(teams)): - nnum = location.collection(teams[i][0]).document( - u'nishant_number').set({'nishant': score_out.get(teams[i][0])}) - + nnum = location.collection(teams[i][0]).document(u'nishant_number').set({'nishant':score_out.get(teams[i][0])}) def pulldata(): teams = analysis.load_csv('data/teams.csv') scores = [] for i in range(len(teams)): team_scores = [] - # print(teams[i][0]) - request_data_object = tba.req_team_matches( - teams[i][0], 2019, "UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5") + #print(teams[i][0]) + request_data_object = tba.req_team_matches(teams[i][0], 2019, "UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5") json_data = request_data_object.json() for match in range(len(json_data) - 1, -1, -1): if json_data[match].get('winning_alliance') == "": - # print(json_data[match]) + #print(json_data[match]) json_data.remove(json_data[match]) - - json_data = sorted(json_data, key=lambda k: k.get( - 'actual_time', 0), reverse=False) + + json_data = sorted(json_data, key=lambda k: k.get('actual_time', 0), reverse=False) for j in range(len(json_data)): if "frc" + teams[i][0] in json_data[j].get('alliances').get('blue').get('team_keys'): - team_scores.append(json_data[j].get( - 'alliances').get('blue').get('score')) + team_scores.append(json_data[j].get('alliances').get('blue').get('score')) elif "frc" + teams[i][0] in json_data[j].get('alliances').get('red').get('team_keys'): - team_scores.append(json_data[j].get( - 'alliances').get('red').get('score')) + team_scores.append(json_data[j].get('alliances').get('red').get('score')) scores.append(team_scores) - with open("data/scores.csv", "w+", newline='') as file: - writer = csv.writer(file, delimiter=',') + with open("data/scores.csv", "w+", newline = '') as file: + writer = csv.writer(file, delimiter = ',') writer.writerows(scores) list_teams = teams - teams = db.collection('data').document( - 'team-2022').collection("Central 2019").get() - full = [] - tms = [] + teams=db.collection('data').document('team-2022').collection("Central 2019").get() + full=[] + tms=[] for team in teams: - + tms.append(team.id) - reports = db.collection('data').document( - 'team-2022').collection("Central 2019").document(team.id).collection("matches").get() + reports=db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").get() for report in reports: - data = [] - data.append(db.collection('data').document('team-2022').collection("Central 2019").document( - team.id).collection("matches").document(report.id).get().to_dict()) + data=[] + data.append(db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").document(report.id).get().to_dict()) full.append(data) quant_keys = [] @@ -257,22 +243,21 @@ def pulldata(): for i in range(len(full)): for j in range(len(full[i])): for key in list(full[i][j].keys()): - + if "Quantitative" in key: - + quant_keys.append(key) - + if full[i][j].get(key).get('teamDBRef')[5:] in list_teams: - + var = {} measured_vars = [] - + for k in range(len(list(full[i][j].get(key).keys()))): individual_keys = list(full[i][j].get(key).keys()) - - var[individual_keys[k]] = full[i][j].get( - key).get(individual_keys[k]) + + var[individual_keys[k]] = full[i][j].get(key).get(individual_keys[k]) out.append(var) @@ -306,8 +291,7 @@ def pulldata(): for i in sorted_out: - team_index = list_teams.index( - sorted_out[sorted_out.index(i)][j_list.index('teamDBRef')][5:]) + team_index = list_teams.index(sorted_out[sorted_out.index(i)][j_list.index('teamDBRef')][5:]) for j in range(len(i)): @@ -315,12 +299,11 @@ def pulldata(): for i in range(len(big_out)): - with open('data/' + j_list[i] + '.csv', "w+", newline='') as file: + with open('data/' + j_list[i] + '.csv', "w+", newline = '') as file: - writer = csv.writer(file, delimiter=',') + writer = csv.writer(file, delimiter = ',') writer.writerows(big_out[i]) - - + def service(): while True: @@ -332,18 +315,17 @@ def service(): print("[OK] starting calculations") fucked = False - + for i in range(0, 5): - # try: - titanservice() - break - # except: - if (i != 4): - print("[WARNING] failed, trying " + - str(5 - i - 1) + " more times") - else: - print("[ERROR] failed to compute data, skipping") - fucked = True + #try: + titanservice() + break + #except: + if (i != 4): + print("[WARNING] failed, trying " + str(5 - i - 1) + " more times") + else: + print("[ERROR] failed to compute data, skipping") + fucked = True end = time.time() if (fucked == True): @@ -351,16 +333,15 @@ def service(): break else: - + print("[OK] finished calculations") print("[OK] waiting: " + str(300 - (end - start)) + " seconds" + "\n") - time.sleep(300 - (end - start)) # executes once every 5 minutes - + time.sleep(300 - (end - start)) #executes once every 5 minutes warnings.simplefilter("ignore") -# Use a service account +#Use a service account try: cred = credentials.Certificate('keys/firebasekey.json') except: @@ -369,5 +350,5 @@ firebase_admin.initialize_app(cred) db = firestore.client() -service() # finally we write something that isn't a function definition -# titanservice() +service() #finally we write something that isn't a function definition +#titanservice()