From bd79ddff6be2dbdf4106d5673f0e465010de5be3 Mon Sep 17 00:00:00 2001 From: jlevine18 Date: Thu, 21 Mar 2019 16:55:04 -0500 Subject: [PATCH] Update superscript.py --- data analysis/superscript.py | 131 +++++++++++++++++++++++++++++------ 1 file changed, 109 insertions(+), 22 deletions(-) diff --git a/data analysis/superscript.py b/data analysis/superscript.py index e29e199e..de6a9899 100644 --- a/data analysis/superscript.py +++ b/data analysis/superscript.py @@ -67,7 +67,7 @@ def titanservice(): file_list = glob.glob(source_dir + '/*.csv') #supposedly sorts by alphabetical order, skips reading teams.csv because of redundancy data = [] files = [fn for fn in glob.glob('data/*.csv') - if not (os.path.basename(fn).startswith('teams'))] #scores will be handled sperately + if not (os.path.basename(fn).startswith('teams') or os.path.basename(fn).startswith('match') or os.path.basename(fn).startswith('notes') or os.path.basename(fn).startswith('observationType') or os.path.basename(fn).startswith('teamDBRef'))] #scores will be handled sperately for i in files: data.append(analysis.load_csv(i)) @@ -89,40 +89,44 @@ def titanservice(): for i in range(len(measure)): #unpacks into specific teams - ofbest_curve = [None] - r2best_curve = [None] + print(i) + print(measure) + print(len(measure)) + + #ofbest_curve = [None] + #r2best_curve = [None] line = measure[i] #print(line) - x = list(range(len(line))) - eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) + #x = list(range(len(line))) + #eqs, rmss, r2s, overfit = analysis.optimize_regression(x, line, 10, 1) - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "min_overfit") #print(eqs, rmss, r2s, overfit) - ofbest_curve.append(beqs) - ofbest_curve.append(brmss) - ofbest_curve.append(br2s) - ofbest_curve.append(boverfit) - ofbest_curve.pop(0) + #ofbest_curve.append(beqs) + #ofbest_curve.append(brmss) + #ofbest_curve.append(br2s) + #ofbest_curve.append(boverfit) + #ofbest_curve.pop(0) #print(ofbest_curve) - beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") + #beqs, brmss, br2s, boverfit = analysis.select_best_regression(eqs, rmss, r2s, overfit, "max_r2s") - r2best_curve.append(beqs) - r2best_curve.append(brmss) - r2best_curve.append(br2s) - r2best_curve.append(boverfit) - r2best_curve.pop(0) + #r2best_curve.append(beqs) + #r2best_curve.append(brmss) + #r2best_curve.append(br2s) + #r2best_curve.append(boverfit) + #r2best_curve.pop(0) #print(r2best_curve) - measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3)) + ofbest_curve + r2best_curve) + measure_stats.append(teams[i] + list(analysis.basic_stats(line, 0, 0)) + list(analysis.histo_analysis(line, 1, -3, 3))) stats.append(list(measure_stats)) nishant = [] @@ -197,11 +201,11 @@ def pulldata(): #print(teams[i][0]) request_data_object = tba.req_team_matches(teams[i][0], 2019, "UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5") json_data = request_data_object.json() + for match in range(len(json_data) - 1, -1, -1): if json_data[match].get('winning_alliance') == "": - print(json_data[match]) + #print(json_data[match]) json_data.remove(json_data[match]) - json_data = sorted(json_data, key=lambda k: k.get('actual_time', 0), reverse=False) for j in range(len(json_data)): @@ -214,6 +218,89 @@ def pulldata(): with open("data/scores.csv", "w+", newline = '') as file: writer = csv.writer(file, delimiter = ',') writer.writerows(scores) + + list_teams = teams + teams=db.collection('data').document('team-2022').collection("Central 2019").get() + full=[] + tms=[] + for team in teams: + + tms.append(team.id) + reports=db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").get() + + for report in reports: + data=[] + data.append(db.collection('data').document('team-2022').collection("Central 2019").document(team.id).collection("matches").document(report.id).get().to_dict()) + full.append(data) + + quant_keys = [] + + out = [] + var = {} + + for i in range(len(full)): + for j in range(len(full[i])): + for key in list(full[i][j].keys()): + + if "Quantitative" in key: + + quant_keys.append(key) + + if full[i][j].get(key).get('teamDBRef')[5:] in list_teams: + + var = {} + measured_vars = [] + + for k in range(len(list(full[i][j].get(key).keys()))): + + individual_keys = list(full[i][j].get(key).keys()) + + var[individual_keys[k]] = full[i][j].get(key).get(individual_keys[k]) + + out.append(var) + + sorted_out = [] + + for i in out: + + j_list = [] + + key_list = [] + + sorted_keys = sorted(i.keys()) + + for j in sorted_keys: + + key_list.append(i[j]) + + j_list.append(j) + + sorted_out.append(key_list) + + var_index = 0 + team_index = 0 + + big_out = [] + + for j in range(len(i)): + big_out.append([]) + for t in range(len(list_teams)): + big_out[j].append([]) + + for i in sorted_out: + + team_index = list_teams.index(sorted_out[sorted_out.index(i)][j_list.index('teamDBRef')][5:]) + + for j in range(len(i)): + + big_out[j][team_index].append(i[j]) + + for i in range(len(big_out)): + + with open('data/' + j_list[i] + '.csv', "w+", newline = '') as file: + + writer = csv.writer(file, delimiter = ',') + writer.writerows(big_out[i]) def service(): @@ -228,10 +315,10 @@ def service(): fucked = False for i in range(0, 5): - try: + #try: titanservice() break - except: + #except: if (i != 4): print("[WARNING] failed, trying " + str(5 - i - 1) + " more times") else: