analysis.py v 1.1.12.002, superscript.py

v 0.0.0.003
This commit is contained in:
art 2020-02-18 20:29:22 -06:00
parent 260eea1fab
commit 75409cfe24
3 changed files with 45 additions and 20 deletions

View File

@ -7,10 +7,12 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.12.001" __version__ = "1.1.12.002"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.12.002:
- removed team first time trueskill instantiation in favor of integration in superscript.py
1.1.12.001: 1.1.12.001:
- improved readibility of regression outputs by stripping tensor data - improved readibility of regression outputs by stripping tensor data
- used map with lambda to acheive the improved readibility - used map with lambda to acheive the improved readibility
@ -394,35 +396,31 @@ def regression(ndevice, inputs, outputs, args, loss = torch.nn.MSELoss(), _itera
return regressions return regressions
@jit(nopython=True) @jit(nopython=True)
def elo(starting_score, opposing_scores, observed, N, K): def elo(starting_score, opposing_score, observed, N, K):
expected = 1/(1+10**((np.array(opposing_scores) - starting_score)/N)) expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
return starting_score + K*(np.sum(observed) - np.sum(expected)) return starting_score + K*(np.sum(observed) - np.sum(expected))
@jit(forceobj=True) @jit(forceobj=True)
def gliko2(starting_score, starting_rd, starting_vol, opposing_scores, opposing_rd, observations): def gliko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
player = Gliko2(rating = starting_score, rd = starting_rd, vol = starting_vol) player = Gliko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
player.update_player([x for x in opposing_scores], [x for x in opposing_rd], observations) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
return (player.rating, player.rd, player.vol) return (player.rating, player.rd, player.vol)
@jit(forceobj=True) @jit(forceobj=True)
def trueskill(teams_data, observations):#teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
team_ratings = [] team_ratings = []
for team in teams_data: for team in teams_data:
team_temp = [] team_temp = []
for player in team: for player in team:
if player != None:
player = Trueskill.Rating(player[0], player[1]) player = Trueskill.Rating(player[0], player[1])
team_temp.append(player) team_temp.append(player)
else:
player = Trueskill.Rating()
team_temp.append(player)
team_ratings.append(team_temp) team_ratings.append(team_temp)
return Trueskill.rate(teams_data, observations) return Trueskill.rate(teams_data, observations)

View File

@ -3,10 +3,12 @@
# Notes: # Notes:
# setup: # setup:
__version__ = "0.0.0.002" __version__ = "0.0.0.003"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
0.0.0.003:
- added metricsloop which is unfinished
0.0.0.002: 0.0.0.002:
- added simpleloop which is untested until data is provided - added simpleloop which is untested until data is provided
0.0.0.001: 0.0.0.001:
@ -27,6 +29,10 @@ __all__ = [
from analysis import analysis as an from analysis import analysis as an
from numba import jit from numba import jit
import numpy as np import numpy as np
try:
from analysis import trueskill as Trueskill
except:
import trueskill as Trueskill
def main(): def main():
@ -54,28 +60,49 @@ def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match]
variable_vector.append(an.histo_analysis(variable)) variable_vector.append(an.histo_analysis(variable))
if(test == "sr.lin" or test == "sregression.lin" or test == 2): if(test == "r.lin" or test == "regression.lin" or test == 2):
variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["lin"])) variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["lin"]))
if(test == "sr.log" or test == "sregression.log" or test == 3): if(test == "r.log" or test == "regression.log" or test == 3):
variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["log"])) variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["log"]))
if(test == "sr.exp" or test == "sregression.exp" or test == 4): if(test == "r.exp" or test == "regression.exp" or test == 4):
variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["exp"])) variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["exp"]))
if(test == "sr.ply" or test == "sregression.ply" or test == 5): if(test == "r.ply" or test == "regression.ply" or test == 5):
variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["ply"])) variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["ply"]))
if(test == "sr.sig" or test == "sregression.sig" or test == 6): if(test == "r.sig" or test == "regression.sig" or test == 6):
variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["sig"])) variable_vector.append(an.regression("cpu", range(0, len(variable) - 1), variable, ["sig"]))
def metricsloop(data): def metricsloop(team_lookup, data, tests): # expects array with [Match] ([Teams], [Win/Loss])
pass scores = []
elo_starting_score = 1500
N = 1500
K = 32
gl2_starting_score = 1500
gl2_starting_rd = 350
gl2_starting_vol = 0.06
for team in team_lookup:
elo = elo_starting_score
gl2 = {"score": gl2_starting_score, "rd": gl2_starting_rd, "vol": gl2_starting_vol}
ts = Trueskill.Rating()
scores[str(team)] = {"elo": elo, "gl2": gl2, "ts": ts} )
for match in data:
groups = data[0]
observations = data[1]
main() main()