added TBA requests module

This commit is contained in:
jlevine18 2018-12-21 11:04:46 -06:00 committed by GitHub
parent ad8e3cc4e5
commit cf897c7a8f
2 changed files with 223 additions and 131 deletions

View File

@ -0,0 +1,94 @@
#Titan Robotics Team 2022: TBA Requests Module
#Written by Arthur Lu & Jacob Levine
#Notes:
# this should be imported as a python module using 'import visualization'
# this should be included in the local directory or environment variable
# this module has not been optimized for multhreaded computing
#Number of easter eggs: none yet
#setup:
__version__ = "1.0.0.001"
#changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
1.0.0.xxx:
-added common requests and JSON processing"""
__author__ = (
"Arthur Lu <arthurlu@ttic.edu>, "
"Jacob Levine <jlevine@ttic.edu>,"
)
__all__ = [
'process_json_ret',
'req_all_events',
'req_event_matches',
'req_event_insights',
'req_event_elim_alli'
'req_team_events',
'req_team_matches'
]
#imports
import requests
#as this code is public, i'm not putting 2022's API key in here. just add it as a var in your script and go
#requests a list of events that a team went to
def req_team_events(team,year,apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/team/frc'+str(team)+'/events/'+str(year),headers=headers)
return r
#gets every match that a team played in
def req_team_matches(team,year,apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/team/frc'+str(team)+'/matches/'+str(year), headers=headers)
return r
#gets all events in a certain year
def req_all_events(year, apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/events/'+str(year), headers=headers)
return r
#gets all matches for an event
def req_event_matches(event_key,apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/matches', headers=headers)
return r
#gets elimination alliances from a event
def req_event_elim_alli(event_key, apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/alliances', headers=headers)
return r
#gets TBA's insights from an event
def req_event_insights(event_key, apikey):
headers={'X-TBA-Auth-Key':apikey}
r=requests.get('https://www.thebluealliance.com/api/v3/event/'+str(event_key)+'/insights', headers=headers)
return r
#processes the json return. right now, it's slow and not great. will throw an exception if it doesn't get a good status code
def process_json_ret(req):
if req.status_code == 200:
keys=[]
for i in req.json():
for j in i.keys():
read=False
for k in keys:
if k==j:
read=True
break
if not read:
keys.append(j)
out=[]
out.append(keys)
for i in req.json():
buf=[]
for j in keys:
try:
buf.append(i[j])
except:
buf.append("")
out.append(buf)
return out
else:
raise ValueError('Status code is: '+req.status_code+', not 200')

View File

@ -1,131 +1,129 @@
#Titan Robotics Team 2022: Visualization Module #Titan Robotics Team 2022: Visualization Module
#Written by Arthur Lu & Jacob Levine #Written by Arthur Lu & Jacob Levine
#Notes: #Notes:
# this should be imported as a python module using 'import visualization' # this should be imported as a python module using 'import visualization'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module has not been optimized for multhreaded computing # this module has not been optimized for multhreaded computing
#Number of easter eggs: Jake is Jewish and does not observe easter. #Number of easter eggs: Jake is Jewish and does not observe easter.
#setup: #setup:
__version__ = "1.0.0.001" __version__ = "1.0.0.001"
#changelog should be viewed using print(analysis.__changelog__) #changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.0.0.xxx: 1.0.0.xxx:
-added basic plotting, clustering, and regression comparisons""" -added basic plotting, clustering, and regression comparisons"""
__author__ = ( __author__ = (
"Arthur Lu <arthurlu@ttic.edu>, " "Arthur Lu <arthurlu@ttic.edu>, "
"Jacob Levine <jlevine@ttic.edu>," "Jacob Levine <jlevine@ttic.edu>,"
) )
__all__ = [ __all__ = [
'affinity_prop', 'affinity_prop',
'bar_graph', 'bar_graph',
'dbscan', 'dbscan',
'kmeans', 'kmeans',
'line_plot', 'line_plot',
'pca_comp', 'pca_comp',
'regression_comp', 'regression_comp',
'scatter_plot', 'scatter_plot',
'spectral', 'spectral',
'vis_2d' 'vis_2d'
] ]
#imports #imports
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA, IncrementalPCA from sklearn.decomposition import PCA, KernelPCA, IncrementalPCA
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler
from sklearn.cluster import AffinityPropagation, DBSCAN, KMeans, SpectralClustering from sklearn.cluster import AffinityPropagation, DBSCAN, KMeans, SpectralClustering
import statistics
#bar of x,y
#bar of x,y def bar_graph(x,y):
def bar_graph(x,y): x=np.asarray(x)
x=np.asarray(x) y=np.asarray(y)
y=np.asarray(y) plt.bar(x,y)
plt.bar(x,y) plt.show()
plt.show()
#scatter of x,y
#scatter of x,y def scatter_plot(x,y):
def scatter_plot(x,y): x=np.asarray(x)
x=np.asarray(x) y=np.asarray(y)
y=np.asarray(y) plt.scatter(x,y)
plt.scatter(x,y) plt.show()
plt.show()
#line of x,y
#line of x,y def line_plot(x,y):
def line_plot(x,y): x=np.asarray(x)
x=np.asarray(x) y=np.asarray(y)
y=np.asarray(y) plt.scatter(x,y)
plt.scatter(x,y) plt.show()
plt.show()
#plot data + regression fit
#plot data + regression fit def regression_comp(x,y,reg):
def regression_comp(x,y,reg): x=np.asarray(x)
x=np.asarray(x) y=np.asarray(y)
y=np.asarray(y) regx=np.arange(x.min(),x.max(),(x.max()-x.min())/1000))
regx=np.arange(x.min(),x.max(),(x.max()-x.min())/1000)) regy=[]
regy=[] for i in regx:
for i in regx: regy.append(eval(reg[0].replace("z",str(i))))
regy.append(eval(reg[0].replace("z",str(i)))) regy=np.asarray(regy)
regy=np.asarray(regy) plt.scatter(x,y)
plt.scatter(x,y) plt.plot(regx,regy,color="orange",linewidth=3)
plt.plot(regx,regy,color="orange",linewidth=3) plt.text(.85*max([x.max(),regx.max()]),.95*max([y.max(),regy.max()]),
plt.text(.85*max([x.max(),regx.max()]),.95*max([y.max(),regy.max()]), u"R\u00b2="+str(round(reg[2],5)),
u"R\u00b2="+str(round(reg[2],5)), horizontalalignment='center', verticalalignment='center')
horizontalalignment='center', verticalalignment='center') plt.text(.85*max([x.max(),regx.max()]),.85*max([y.max(),regy.max()]),
plt.text(.85*max([x.max(),regx.max()]),.85*max([y.max(),regy.max()]), "MSE="+str(round(reg[1],5)),
"MSE="+str(round(reg[1],5)), horizontalalignment='center', verticalalignment='center')
horizontalalignment='center', verticalalignment='center') plt.show()
plt.show()
#PCA to compress down to 2d
#PCA to compress down to 2d def pca_comp(big_multidim):
def pca_comp(big_multidim): pca=PCA(n_components=2)
pca=PCA(n_components=2) td_norm=StandardScaler().fit_transform(big_multidim)
td_norm=StandardScaler().fit_transform(big_multidim) td_pca=pca.fit_transform(td_norm)
td_pca=pca.fit_transform(td_norm) return td_pca
return td_pca
#one-stop visualization of multidim datasets
#one-stop visualization of multidim datasets def vis_2d(big_multidim):
def vis_2d(big_multidim): td_pca=pca_comp(big_multidim)
td_pca=pca_comp(big_multidim) plt.scatter(td_pca[:,0], td_pca[:,1])
plt.scatter(td_pca[:,0], td_pca[:,1])
def cluster_vis(data, cluster_assign):
def cluster_vis(data, cluster_assign): pca=PCA(n_components=2)
pca=PCA(n_components=2) td_norm=StandardScaler().fit_transform(data)
td_norm=StandardScaler().fit_transform(data) td_pca=pca.fit_transform(td_norm)
td_pca=pca.fit_transform(td_norm) colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',
'#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c', '#dede00']),
'#999999', '#e41a1c', '#dede00']), int(max(clu) + 1))))
int(max(clu) + 1)))) colors = np.append(colors, ["#000000"])
colors = np.append(colors, ["#000000"]) plt.figure(figsize=(8, 8))
plt.figure(figsize=(8, 8)) plt.scatter(td_norm[:, 0], td_norm[:, 1], s=10, color=colors[cluster_assign])
plt.scatter(td_norm[:, 0], td_norm[:, 1], s=10, color=colors[cluster_assign]) plt.show()
plt.show()
#affinity prop- slow, but ok if you don't have any idea how many you want
#affinity prop- slow, but ok if you don't have any idea how many you want def affinity_prop(data, damping=.77, preference=-70):
def affinity_prop(data, damping=.77, preference=-70): td_norm=StandardScaler().fit_transform(data)
td_norm=StandardScaler().fit_transform(data) db = AffinityPropagation(damping=damping,preference=preference).fit(td)
db = AffinityPropagation(damping=damping,preference=preference).fit(td) y=db.predict(td_norm)
y=db.predict(td_norm) return y
return y
#DBSCAN- slightly faster but can label your dataset as all outliers
#DBSCAN- slightly faster but can label your dataset as all outliers def dbscan(data, eps=.3):
def dbscan(data, eps=.3): td_norm=StandardScaler().fit_transform(data)
td_norm=StandardScaler().fit_transform(data) db = DBSCAN(eps=eps).fit(td)
db = DBSCAN(eps=eps).fit(td) y=db.labels_.astype(np.int)
y=db.labels_.astype(np.int) return y
return y
#K-means clustering- the classic
#K-means clustering- the classic def kmeans(data, num_clusters):
def kmeans(data, num_clusters): td_norm=StandardScaler().fit_transform(data)
td_norm=StandardScaler().fit_transform(data) db = KMeans(n_clusters=num_clusters).fit(td)
db = KMeans(n_clusters=num_clusters).fit(td) y=db.labels_.astype(np.int)
y=db.labels_.astype(np.int) return y
return y
#Spectral Clustering- Seems to work really well
#Spectral Clustering- Seems to work really well def spectral(data, num_clusters):
def spectral(data, num_clusters): td_norm=StandardScaler().fit_transform(data)
td_norm=StandardScaler().fit_transform(data) db = SpectralClustering(n_clusters=num_clusters).fit(td)
db = SpectralClustering(n_clusters=num_clusters, eigen_solver='arpack', y=db.labels_.astype(np.int)
affinity="nearest_neighbors").fit(td) return y
y=db.labels_.astype(np.int)
return y