mirror of
https://github.com/titanscouting/tra-analysis.git
synced 2024-11-09 22:44:44 +00:00
finished Analysis docstrings,
removed typehinting to rework
This commit is contained in:
parent
27a77c3edb
commit
3c868672c9
@ -380,7 +380,6 @@ import numpy as np
|
||||
import scipy
|
||||
import sklearn, sklearn.cluster
|
||||
from tra_analysis.metrics import trueskill as Trueskill
|
||||
from tra_analysis.typedef import R, List, Dict
|
||||
|
||||
# import submodules
|
||||
|
||||
@ -389,7 +388,7 @@ from .ClassificationMetric import ClassificationMetric
|
||||
class error(ValueError):
|
||||
pass
|
||||
|
||||
def load_csv(filepath: str) -> np.ndarray:
|
||||
def load_csv(filepath):
|
||||
"""
|
||||
Loads csv file into 2D numpy array. Does not check csv file validity.
|
||||
parameters:
|
||||
@ -402,9 +401,9 @@ def load_csv(filepath: str) -> np.ndarray:
|
||||
csvfile.close()
|
||||
return file_array
|
||||
|
||||
def basic_stats(data: List[R]) -> Dict[str, R]:
|
||||
def basic_stats(data):
|
||||
"""
|
||||
Calculates mean, median, standard deviation, variance, minimum, maximum of a simple set of elements
|
||||
Calculates mean, median, standard deviation, variance, minimum, maximum of a simple set of elements.
|
||||
parameters:
|
||||
data: List representing set of unordered elements
|
||||
return:
|
||||
@ -421,9 +420,9 @@ def basic_stats(data: List[R]) -> Dict[str, R]:
|
||||
|
||||
return {"mean": _mean, "median": _median, "standard-deviation": _stdev, "variance": _variance, "minimum": _min, "maximum": _max}
|
||||
|
||||
def z_score(point: R, mean: R, stdev: R) -> R:
|
||||
def z_score(point, mean, stdev):
|
||||
"""
|
||||
Calculates z score of a specific point given mean and standard deviation of data
|
||||
Calculates z score of a specific point given mean and standard deviation of data.
|
||||
parameters:
|
||||
point: Real value corresponding to a single point of data
|
||||
mean: Real value corresponding to the mean of the dataset
|
||||
@ -437,7 +436,14 @@ def z_score(point: R, mean: R, stdev: R) -> R:
|
||||
|
||||
# expects 2d array, normalizes across all axes
|
||||
def z_normalize(array, *args):
|
||||
|
||||
"""
|
||||
Applies sklearn.normalize(array, axis = args) on any arraylike parseable by numpy.
|
||||
parameters:
|
||||
array: array like structure of reals aka nested indexables
|
||||
*args: arguments relating to axis normalized against
|
||||
return:
|
||||
numpy array of normalized values from ArrayLike input
|
||||
"""
|
||||
array = np.array(array)
|
||||
for arg in args:
|
||||
array = sklearn.preprocessing.normalize(array, axis = arg)
|
||||
@ -446,7 +452,13 @@ def z_normalize(array, *args):
|
||||
|
||||
# expects 2d array of [x,y]
|
||||
def histo_analysis(hist_data):
|
||||
|
||||
"""
|
||||
Calculates the mean and standard deviation of derivatives of (x,y) points. Requires at least 2 points to compute.
|
||||
parameters:
|
||||
hist_data: list of real coordinate point data (x, y)
|
||||
return:
|
||||
Dictionary with (mean, deviation) as keys to corresponding values
|
||||
"""
|
||||
if len(hist_data[0]) > 2:
|
||||
|
||||
hist_data = np.array(hist_data)
|
||||
@ -462,7 +474,15 @@ def histo_analysis(hist_data):
|
||||
return None
|
||||
|
||||
def regression(inputs, outputs, args): # inputs, outputs expects N-D array
|
||||
|
||||
"""
|
||||
Applies specified regression kernels onto input, output data pairs.
|
||||
parameters:
|
||||
inputs: List of Reals representing independent variable values of each point
|
||||
outputs: List of Reals representing dependent variable values of each point
|
||||
args: List of Strings from values (lin, log, exp, ply, sig)
|
||||
return:
|
||||
Dictionary with keys (lin, log, exp, ply, sig) as keys to correspondiong regression models
|
||||
"""
|
||||
X = np.array(inputs)
|
||||
y = np.array(outputs)
|
||||
|
||||
@ -566,13 +586,39 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
|
||||
return regressions
|
||||
|
||||
class Metric:
|
||||
|
||||
"""
|
||||
The metric class wraps the metrics models. Call without instantiation as Metric.<method>(...)
|
||||
"""
|
||||
def elo(self, starting_score, opposing_score, observed, N, K):
|
||||
|
||||
"""
|
||||
Calculates an elo adjusted ELO score given a player's current score, opponent's score, and outcome of match.
|
||||
reference: https://en.wikipedia.org/wiki/Elo_rating_system
|
||||
parameters:
|
||||
starting_score: Real value representing player's ELO score before a match
|
||||
opposing_score: Real value representing opponent's score before the match
|
||||
observed: Array of Real values representing multiple sequential match outcomes against the same opponent. 1 for match win, 0.5 for tie, 0 for loss.
|
||||
N: Real value representing the normal or mean score expected (usually 1200)
|
||||
K: R eal value representing a system constant, determines how quickly players will change scores (usually 24)
|
||||
return:
|
||||
Real value representing the player's new ELO score
|
||||
"""
|
||||
return Elo.calculate(starting_score, opposing_score, observed, N, K)
|
||||
|
||||
def glicko2(self, starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
|
||||
|
||||
"""
|
||||
Calculates an adjusted Glicko-2 score given a player's current score, multiple opponent's score, and outcome of several matches.
|
||||
reference: http://www.glicko.net/glicko/glicko2.pdf
|
||||
parameters:
|
||||
starting_score: Real value representing the player's Glicko-2 score
|
||||
starting_rd: Real value representing the player's RD
|
||||
starting_vol: Real value representing the player's volatility
|
||||
opposing_score: List of Real values representing multiple opponent's Glicko-2 scores
|
||||
opposing_rd: List of Real values representing multiple opponent's RD
|
||||
opposing_vol: List of Real values representing multiple opponent's volatility
|
||||
observations: List of Real values representing the outcome of several matches, where each match's opponent corresponds with the opposing_score, opposing_rd, opposing_vol values of the same indesx. Outcomes can be a score, presuming greater score is better.
|
||||
return:
|
||||
Tuple of 3 Real values representing the player's new score, rd, and vol
|
||||
"""
|
||||
player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
|
||||
|
||||
player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
|
||||
@ -580,7 +626,15 @@ class Metric:
|
||||
return (player.rating, player.rd, player.vol)
|
||||
|
||||
def trueskill(self, teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
|
||||
|
||||
"""
|
||||
Calculates the score changes for multiple teams playing in a single match accoding to the trueskill algorithm.
|
||||
reference: https://trueskill.org/
|
||||
parameters:
|
||||
teams_data: List of List of Tuples of 2 Real values representing multiple player ratings. List of teams, which is a List of players. Each player rating is a Tuple of 2 Real values (mu, sigma).
|
||||
observations: List of Real values representing the match outcome. Each value in the List is the score corresponding to the team at the same index in teams_data.
|
||||
return:
|
||||
List of List of Tuples of 2 Real values representing new player ratings. Same structure as teams_data.
|
||||
"""
|
||||
team_ratings = []
|
||||
|
||||
for team in teams_data:
|
||||
@ -617,13 +671,30 @@ def npmax(data):
|
||||
return np.amax(data)
|
||||
|
||||
def pca(data, n_components = None, copy = True, whiten = False, svd_solver = "auto", tol = 0.0, iterated_power = "auto", random_state = None):
|
||||
|
||||
"""
|
||||
Performs a principle component analysis on the input data.
|
||||
reference: https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
|
||||
parameters:
|
||||
data: Arraylike of Reals representing the set of data to perform PCA on
|
||||
* : refer to reference for usage, parameters follow same usage
|
||||
return:
|
||||
Arraylike of Reals representing the set of data that has had PCA performed. The dimensionality of the Arraylike may be smaller or equal.
|
||||
"""
|
||||
kernel = sklearn.decomposition.PCA(n_components = n_components, copy = copy, whiten = whiten, svd_solver = svd_solver, tol = tol, iterated_power = iterated_power, random_state = random_state)
|
||||
|
||||
return kernel.fit_transform(data)
|
||||
|
||||
def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "default", max_depth = None): #expects *2d data and 1d labels
|
||||
|
||||
"""
|
||||
Generates a decision tree classifier fitted to the given data.
|
||||
reference: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
|
||||
parameters:
|
||||
data: List of values representing each data point of multiple axes
|
||||
labels: List of values represeing the labels corresponding to the same index at data
|
||||
* : refer to reference for usage, parameters follow same usage
|
||||
return:
|
||||
DecisionTreeClassifier model and corresponding classification accuracy metrics
|
||||
"""
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.tree.DecisionTreeClassifier(criterion = criterion, splitter = splitter, max_depth = max_depth)
|
||||
model = model.fit(data_train,labels_train)
|
||||
|
@ -74,5 +74,3 @@ from .RegressionMetric import RegressionMetric
|
||||
from . import Sort
|
||||
from . import StatisticalTest
|
||||
from . import SVM
|
||||
|
||||
from . import typedef
|
@ -1,4 +0,0 @@
|
||||
from typing import TypeVar, List, Dict
|
||||
List = List
|
||||
Dict = Dict
|
||||
R = TypeVar('R', int, float)
|
Loading…
Reference in New Issue
Block a user