From dad195a00f95559f905165357f5c32063da1cccc Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 9 Nov 2021 22:52:04 +0000 Subject: [PATCH 1/4] fixed/optimized imports, fixed headers Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/Analysis.py | 24 ++++--------------- .../tra_analysis/ClassificationMetric.py | 5 ++-- .../tra_analysis/CorrelationTest.py | 5 ++-- analysis-master/tra_analysis/KNN.py | 5 ++-- analysis-master/tra_analysis/NaiveBayes.py | 7 +++--- analysis-master/tra_analysis/RandomForest.py | 7 +++--- .../tra_analysis/RegressionMetric.py | 5 ++-- analysis-master/tra_analysis/SVM.py | 5 ++-- analysis-master/tra_analysis/Sort.py | 2 +- .../tra_analysis/StatisticalTest.py | 9 +++---- analysis-master/tra_analysis/__init__.py | 3 +++ 11 files changed, 37 insertions(+), 40 deletions(-) diff --git a/analysis-master/tra_analysis/Analysis.py b/analysis-master/tra_analysis/Analysis.py index df81951c..e736ec62 100644 --- a/analysis-master/tra_analysis/Analysis.py +++ b/analysis-master/tra_analysis/Analysis.py @@ -7,10 +7,13 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "3.0.4" +__version__ = "3.0.5" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 3.0.5: + - removed extra submodule imports + - fixed/optimized header 3.0.4: - removed -_obj imports 3.0.3: @@ -361,7 +364,6 @@ __all__ = [ 'histo_analysis', 'regression', 'Metric', - 'kmeans', 'pca', 'decisiontree', # all statistics functions left out due to integration in other functions @@ -374,21 +376,14 @@ __all__ = [ import csv from tra_analysis.metrics import elo as Elo from tra_analysis.metrics import glicko2 as Glicko2 -import math import numpy as np import scipy -from scipy import optimize, stats -import sklearn -from sklearn import preprocessing, pipeline, linear_model, metrics, cluster, decomposition, tree, neighbors, naive_bayes, svm, model_selection, ensemble +import sklearn, sklearn.cluster from tra_analysis.metrics import trueskill as Trueskill -import warnings # import submodules -from .Array import Array from .ClassificationMetric import ClassificationMetric -from .RegressionMetric import RegressionMetric -from . import SVM class error(ValueError): pass @@ -599,16 +594,7 @@ def npmin(data): def npmax(data): return np.amax(data) -""" need to decide what to do with this function -def kmeans(data, n_clusters=8, init="k-means++", n_init=10, max_iter=300, tol=0.0001, precompute_distances="auto", verbose=0, random_state=None, copy_x=True, n_jobs=None, algorithm="auto"): - kernel = sklearn.cluster.KMeans(n_clusters = n_clusters, init = init, n_init = n_init, max_iter = max_iter, tol = tol, precompute_distances = precompute_distances, verbose = verbose, random_state = random_state, copy_x = copy_x, n_jobs = n_jobs, algorithm = algorithm) - kernel.fit(data) - predictions = kernel.predict(data) - centers = kernel.cluster_centers_ - - return centers, predictions -""" def pca(data, n_components = None, copy = True, whiten = False, svd_solver = "auto", tol = 0.0, iterated_power = "auto", random_state = None): kernel = sklearn.decomposition.PCA(n_components = n_components, copy = copy, whiten = whiten, svd_solver = svd_solver, tol = tol, iterated_power = iterated_power, random_state = random_state) diff --git a/analysis-master/tra_analysis/ClassificationMetric.py b/analysis-master/tra_analysis/ClassificationMetric.py index a1afcf25..6dc2ff31 100644 --- a/analysis-master/tra_analysis/ClassificationMetric.py +++ b/analysis-master/tra_analysis/ClassificationMetric.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import ClassificationMetric' # setup: -__version__ = "1.0.1" +__version__ = "1.0.2" __changelog__ = """changelog: + 1.0.2: + - optimized imports 1.0.1: - fixed __all__ 1.0.0: @@ -22,7 +24,6 @@ __all__ = [ ] import sklearn -from sklearn import metrics class ClassificationMetric(): diff --git a/analysis-master/tra_analysis/CorrelationTest.py b/analysis-master/tra_analysis/CorrelationTest.py index bc35ce92..ddb27939 100644 --- a/analysis-master/tra_analysis/CorrelationTest.py +++ b/analysis-master/tra_analysis/CorrelationTest.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import CorrelationTest' # setup: -__version__ = "1.0.1" +__version__ = "1.0.2" __changelog__ = """changelog: + 1.0.2: + - optimized imports 1.0.1: - fixed __all__ 1.0.0: @@ -29,7 +31,6 @@ __all__ = [ ] import scipy -from scipy import stats def anova_oneway(*args): #expects arrays of samples diff --git a/analysis-master/tra_analysis/KNN.py b/analysis-master/tra_analysis/KNN.py index d594c72f..09d663f3 100644 --- a/analysis-master/tra_analysis/KNN.py +++ b/analysis-master/tra_analysis/KNN.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import KNN' # setup: -__version__ = "1.0.0" +__version__ = "1.0.1" __changelog__ = """changelog: + 1.0.1: + - optimized imports 1.0.0: - ported analysis.KNN() here - removed classness @@ -23,7 +25,6 @@ __all__ = [ ] import sklearn -from sklearn import model_selection, neighbors from . import ClassificationMetric, RegressionMetric def knn_classifier(data, labels, n_neighbors = 5, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling diff --git a/analysis-master/tra_analysis/NaiveBayes.py b/analysis-master/tra_analysis/NaiveBayes.py index f6b68f1e..007d3e6e 100644 --- a/analysis-master/tra_analysis/NaiveBayes.py +++ b/analysis-master/tra_analysis/NaiveBayes.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import NaiveBayes' # setup: -__version__ = "1.0.0" +__version__ = "1.0.1" __changelog__ = """changelog: + 1.0.1: + - optimized imports 1.0.0: - ported analysis.NaiveBayes() here - removed classness @@ -24,8 +26,7 @@ __all__ = [ ] import sklearn -from sklearn import model_selection, naive_bayes -from . import ClassificationMetric, RegressionMetric +from . import ClassificationMetric def gaussian(data, labels, test_size = 0.3, priors = None, var_smoothing = 1e-09): diff --git a/analysis-master/tra_analysis/RandomForest.py b/analysis-master/tra_analysis/RandomForest.py index 44bee009..02afe73c 100644 --- a/analysis-master/tra_analysis/RandomForest.py +++ b/analysis-master/tra_analysis/RandomForest.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import RandomForest' # setup: -__version__ = "1.0.1" +__version__ = "1.0.2" __changelog__ = """changelog: + 1.0.2: + - optimized imports 1.0.1: - fixed __all__ 1.0.0: @@ -23,8 +25,7 @@ __all__ = [ "random_forest_regressor", ] -import sklearn -from sklearn import ensemble, model_selection +import sklearn, sklearn.ensemble, sklearn.naive_bayes from . import ClassificationMetric, RegressionMetric def random_forest_classifier(data, labels, test_size, n_estimators, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None): diff --git a/analysis-master/tra_analysis/RegressionMetric.py b/analysis-master/tra_analysis/RegressionMetric.py index 0b985493..e71689d4 100644 --- a/analysis-master/tra_analysis/RegressionMetric.py +++ b/analysis-master/tra_analysis/RegressionMetric.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import RegressionMetric' # setup: -__version__ = "1.0.0" +__version__ = "1.0.1" __changelog__ = """changelog: + 1.0.1: + - optimized imports 1.0.0: - ported analysis.RegressionMetric() here """ @@ -21,7 +23,6 @@ __all__ = [ import numpy as np import sklearn -from sklearn import metrics class RegressionMetric(): diff --git a/analysis-master/tra_analysis/SVM.py b/analysis-master/tra_analysis/SVM.py index acf189f3..02b86138 100644 --- a/analysis-master/tra_analysis/SVM.py +++ b/analysis-master/tra_analysis/SVM.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import SVM' # setup: -__version__ = "1.0.2" +__version__ = "1.0.3" __changelog__ = """changelog: + 1.0.3: + - optimized imports 1.0.2: - fixed __all__ 1.0.1: @@ -30,7 +32,6 @@ __all__ = [ ] import sklearn -from sklearn import svm from . import ClassificationMetric, RegressionMetric class CustomKernel: diff --git a/analysis-master/tra_analysis/Sort.py b/analysis-master/tra_analysis/Sort.py index 63dc3677..90ba6409 100644 --- a/analysis-master/tra_analysis/Sort.py +++ b/analysis-master/tra_analysis/Sort.py @@ -16,7 +16,7 @@ __changelog__ = """changelog: __author__ = ( "Arthur Lu ", - "James Pan " + "James Pan ", ) __all__ = [ diff --git a/analysis-master/tra_analysis/StatisticalTest.py b/analysis-master/tra_analysis/StatisticalTest.py index 44fa3a07..e07649e9 100644 --- a/analysis-master/tra_analysis/StatisticalTest.py +++ b/analysis-master/tra_analysis/StatisticalTest.py @@ -4,9 +4,11 @@ # this should be imported as a python module using 'from tra_analysis import StatisticalTest' # setup: -__version__ = "1.0.2" +__version__ = "1.0.3" __changelog__ = """changelog: + 1.0.3: + - optimized imports 1.0.2: - added tukey_multicomparison - fixed styling @@ -61,7 +63,6 @@ __all__ = [ import numpy as np import scipy -from scipy import stats, interpolate def ttest_onesample(a, popmean, axis = 0, nan_policy = 'propagate'): @@ -279,9 +280,9 @@ def get_tukeyQcrit(k, df, alpha=0.05): cv001 = c[:, 2::2] if alpha == 0.05: - intp = interpolate.interp1d(crows, cv005[:,k-2]) + intp = scipy.interpolate.interp1d(crows, cv005[:,k-2]) elif alpha == 0.01: - intp = interpolate.interp1d(crows, cv001[:,k-2]) + intp = scipy.interpolate.interp1d(crows, cv001[:,k-2]) else: raise ValueError('only implemented for alpha equal to 0.01 and 0.05') return intp(df) diff --git a/analysis-master/tra_analysis/__init__.py b/analysis-master/tra_analysis/__init__.py index 0f170bdd..d74c1c9d 100644 --- a/analysis-master/tra_analysis/__init__.py +++ b/analysis-master/tra_analysis/__init__.py @@ -16,6 +16,8 @@ __changelog__ = """changelog: - deprecated titanlearn.py - deprecated visualization.py - removed matplotlib from requirements + - removed extra submodule imports in Analysis + - added typehinting, docstrings for each function 3.0.0: - incremented version to release 3.0.0 3.0.0-rc2: @@ -45,6 +47,7 @@ __all__ = [ "Analysis", "Array", "ClassificationMetric", + "Clustering", "CorrelationTest", "Expression", "Fit", From 27a77c3edb3e6dabf7c40193b9c5cdadc9c78de3 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 16 Nov 2021 20:17:46 +0000 Subject: [PATCH 2/4] added type hinting for a few functions, added typedef module to hold custom typings Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/Analysis.py | 33 +++++++++++++++++++----- analysis-master/tra_analysis/__init__.py | 4 ++- analysis-master/tra_analysis/typedef.py | 4 +++ 3 files changed, 34 insertions(+), 7 deletions(-) create mode 100644 analysis-master/tra_analysis/typedef.py diff --git a/analysis-master/tra_analysis/Analysis.py b/analysis-master/tra_analysis/Analysis.py index e736ec62..b9f4443b 100644 --- a/analysis-master/tra_analysis/Analysis.py +++ b/analysis-master/tra_analysis/Analysis.py @@ -380,6 +380,7 @@ import numpy as np import scipy import sklearn, sklearn.cluster from tra_analysis.metrics import trueskill as Trueskill +from tra_analysis.typedef import R, List, Dict # import submodules @@ -388,15 +389,27 @@ from .ClassificationMetric import ClassificationMetric class error(ValueError): pass -def load_csv(filepath): +def load_csv(filepath: str) -> np.ndarray: + """ + Loads csv file into 2D numpy array. Does not check csv file validity. + parameters: + filepath: String path to the csv file + return: + 2D numpy array of values stored in csv file + """ with open(filepath, newline='') as csvfile: file_array = np.array(list(csv.reader(csvfile))) csvfile.close() return file_array -# expects 1d array -def basic_stats(data): - +def basic_stats(data: List[R]) -> Dict[str, R]: + """ + Calculates mean, median, standard deviation, variance, minimum, maximum of a simple set of elements + parameters: + data: List representing set of unordered elements + return: + Dictionary with (mean, median, standard-deviation, variance, minimum, maximum) as keys and corresponding values + """ data_t = np.array(data).astype(float) _mean = mean(data_t) @@ -408,8 +421,16 @@ def basic_stats(data): return {"mean": _mean, "median": _median, "standard-deviation": _stdev, "variance": _variance, "minimum": _min, "maximum": _max} -# returns z score with inputs of point, mean and standard deviation of spread -def z_score(point, mean, stdev): +def z_score(point: R, mean: R, stdev: R) -> R: + """ + Calculates z score of a specific point given mean and standard deviation of data + parameters: + point: Real value corresponding to a single point of data + mean: Real value corresponding to the mean of the dataset + stdev: Real value corresponding to the standard deviation of the dataset + return: + Real value that is the point's z score + """ score = (point - mean) / stdev return score diff --git a/analysis-master/tra_analysis/__init__.py b/analysis-master/tra_analysis/__init__.py index d74c1c9d..2df41075 100644 --- a/analysis-master/tra_analysis/__init__.py +++ b/analysis-master/tra_analysis/__init__.py @@ -73,4 +73,6 @@ from . import RandomForest from .RegressionMetric import RegressionMetric from . import Sort from . import StatisticalTest -from . import SVM \ No newline at end of file +from . import SVM + +from . import typedef \ No newline at end of file diff --git a/analysis-master/tra_analysis/typedef.py b/analysis-master/tra_analysis/typedef.py new file mode 100644 index 00000000..875776a0 --- /dev/null +++ b/analysis-master/tra_analysis/typedef.py @@ -0,0 +1,4 @@ +from typing import TypeVar, List, Dict +List = List +Dict = Dict +R = TypeVar('R', int, float) \ No newline at end of file From 3c868672c9c1196540201006a1b543d598464a19 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 18 Nov 2021 09:23:19 +0000 Subject: [PATCH 3/4] finished Analysis docstrings, removed typehinting to rework --- analysis-master/tra_analysis/Analysis.py | 101 +++++++++++++++++++---- analysis-master/tra_analysis/__init__.py | 4 +- analysis-master/tra_analysis/typedef.py | 4 - 3 files changed, 87 insertions(+), 22 deletions(-) delete mode 100644 analysis-master/tra_analysis/typedef.py diff --git a/analysis-master/tra_analysis/Analysis.py b/analysis-master/tra_analysis/Analysis.py index b9f4443b..204c58d2 100644 --- a/analysis-master/tra_analysis/Analysis.py +++ b/analysis-master/tra_analysis/Analysis.py @@ -380,7 +380,6 @@ import numpy as np import scipy import sklearn, sklearn.cluster from tra_analysis.metrics import trueskill as Trueskill -from tra_analysis.typedef import R, List, Dict # import submodules @@ -389,7 +388,7 @@ from .ClassificationMetric import ClassificationMetric class error(ValueError): pass -def load_csv(filepath: str) -> np.ndarray: +def load_csv(filepath): """ Loads csv file into 2D numpy array. Does not check csv file validity. parameters: @@ -402,9 +401,9 @@ def load_csv(filepath: str) -> np.ndarray: csvfile.close() return file_array -def basic_stats(data: List[R]) -> Dict[str, R]: +def basic_stats(data): """ - Calculates mean, median, standard deviation, variance, minimum, maximum of a simple set of elements + Calculates mean, median, standard deviation, variance, minimum, maximum of a simple set of elements. parameters: data: List representing set of unordered elements return: @@ -421,9 +420,9 @@ def basic_stats(data: List[R]) -> Dict[str, R]: return {"mean": _mean, "median": _median, "standard-deviation": _stdev, "variance": _variance, "minimum": _min, "maximum": _max} -def z_score(point: R, mean: R, stdev: R) -> R: +def z_score(point, mean, stdev): """ - Calculates z score of a specific point given mean and standard deviation of data + Calculates z score of a specific point given mean and standard deviation of data. parameters: point: Real value corresponding to a single point of data mean: Real value corresponding to the mean of the dataset @@ -437,7 +436,14 @@ def z_score(point: R, mean: R, stdev: R) -> R: # expects 2d array, normalizes across all axes def z_normalize(array, *args): - + """ + Applies sklearn.normalize(array, axis = args) on any arraylike parseable by numpy. + parameters: + array: array like structure of reals aka nested indexables + *args: arguments relating to axis normalized against + return: + numpy array of normalized values from ArrayLike input + """ array = np.array(array) for arg in args: array = sklearn.preprocessing.normalize(array, axis = arg) @@ -446,7 +452,13 @@ def z_normalize(array, *args): # expects 2d array of [x,y] def histo_analysis(hist_data): - + """ + Calculates the mean and standard deviation of derivatives of (x,y) points. Requires at least 2 points to compute. + parameters: + hist_data: list of real coordinate point data (x, y) + return: + Dictionary with (mean, deviation) as keys to corresponding values + """ if len(hist_data[0]) > 2: hist_data = np.array(hist_data) @@ -462,7 +474,15 @@ def histo_analysis(hist_data): return None def regression(inputs, outputs, args): # inputs, outputs expects N-D array - + """ + Applies specified regression kernels onto input, output data pairs. + parameters: + inputs: List of Reals representing independent variable values of each point + outputs: List of Reals representing dependent variable values of each point + args: List of Strings from values (lin, log, exp, ply, sig) + return: + Dictionary with keys (lin, log, exp, ply, sig) as keys to correspondiong regression models + """ X = np.array(inputs) y = np.array(outputs) @@ -566,13 +586,39 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array return regressions class Metric: - + """ + The metric class wraps the metrics models. Call without instantiation as Metric.(...) + """ def elo(self, starting_score, opposing_score, observed, N, K): - + """ + Calculates an elo adjusted ELO score given a player's current score, opponent's score, and outcome of match. + reference: https://en.wikipedia.org/wiki/Elo_rating_system + parameters: + starting_score: Real value representing player's ELO score before a match + opposing_score: Real value representing opponent's score before the match + observed: Array of Real values representing multiple sequential match outcomes against the same opponent. 1 for match win, 0.5 for tie, 0 for loss. + N: Real value representing the normal or mean score expected (usually 1200) + K: R eal value representing a system constant, determines how quickly players will change scores (usually 24) + return: + Real value representing the player's new ELO score + """ return Elo.calculate(starting_score, opposing_score, observed, N, K) def glicko2(self, starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): - + """ + Calculates an adjusted Glicko-2 score given a player's current score, multiple opponent's score, and outcome of several matches. + reference: http://www.glicko.net/glicko/glicko2.pdf + parameters: + starting_score: Real value representing the player's Glicko-2 score + starting_rd: Real value representing the player's RD + starting_vol: Real value representing the player's volatility + opposing_score: List of Real values representing multiple opponent's Glicko-2 scores + opposing_rd: List of Real values representing multiple opponent's RD + opposing_vol: List of Real values representing multiple opponent's volatility + observations: List of Real values representing the outcome of several matches, where each match's opponent corresponds with the opposing_score, opposing_rd, opposing_vol values of the same indesx. Outcomes can be a score, presuming greater score is better. + return: + Tuple of 3 Real values representing the player's new score, rd, and vol + """ player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) @@ -580,7 +626,15 @@ class Metric: return (player.rating, player.rd, player.vol) def trueskill(self, teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] - + """ + Calculates the score changes for multiple teams playing in a single match accoding to the trueskill algorithm. + reference: https://trueskill.org/ + parameters: + teams_data: List of List of Tuples of 2 Real values representing multiple player ratings. List of teams, which is a List of players. Each player rating is a Tuple of 2 Real values (mu, sigma). + observations: List of Real values representing the match outcome. Each value in the List is the score corresponding to the team at the same index in teams_data. + return: + List of List of Tuples of 2 Real values representing new player ratings. Same structure as teams_data. + """ team_ratings = [] for team in teams_data: @@ -617,13 +671,30 @@ def npmax(data): return np.amax(data) def pca(data, n_components = None, copy = True, whiten = False, svd_solver = "auto", tol = 0.0, iterated_power = "auto", random_state = None): - + """ + Performs a principle component analysis on the input data. + reference: https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html + parameters: + data: Arraylike of Reals representing the set of data to perform PCA on + * : refer to reference for usage, parameters follow same usage + return: + Arraylike of Reals representing the set of data that has had PCA performed. The dimensionality of the Arraylike may be smaller or equal. + """ kernel = sklearn.decomposition.PCA(n_components = n_components, copy = copy, whiten = whiten, svd_solver = svd_solver, tol = tol, iterated_power = iterated_power, random_state = random_state) return kernel.fit_transform(data) def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "default", max_depth = None): #expects *2d data and 1d labels - + """ + Generates a decision tree classifier fitted to the given data. + reference: https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html + parameters: + data: List of values representing each data point of multiple axes + labels: List of values represeing the labels corresponding to the same index at data + * : refer to reference for usage, parameters follow same usage + return: + DecisionTreeClassifier model and corresponding classification accuracy metrics + """ data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) model = sklearn.tree.DecisionTreeClassifier(criterion = criterion, splitter = splitter, max_depth = max_depth) model = model.fit(data_train,labels_train) diff --git a/analysis-master/tra_analysis/__init__.py b/analysis-master/tra_analysis/__init__.py index 2df41075..d74c1c9d 100644 --- a/analysis-master/tra_analysis/__init__.py +++ b/analysis-master/tra_analysis/__init__.py @@ -73,6 +73,4 @@ from . import RandomForest from .RegressionMetric import RegressionMetric from . import Sort from . import StatisticalTest -from . import SVM - -from . import typedef \ No newline at end of file +from . import SVM \ No newline at end of file diff --git a/analysis-master/tra_analysis/typedef.py b/analysis-master/tra_analysis/typedef.py deleted file mode 100644 index 875776a0..00000000 --- a/analysis-master/tra_analysis/typedef.py +++ /dev/null @@ -1,4 +0,0 @@ -from typing import TypeVar, List, Dict -List = List -Dict = Dict -R = TypeVar('R', int, float) \ No newline at end of file From 3dabe87ead0f9b1ab9fee12263b16efe7a56a55c Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 18 Nov 2021 09:24:52 +0000 Subject: [PATCH 4/4] removed unessasary comments, updated version and changelog --- analysis-master/tra_analysis/Analysis.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/analysis-master/tra_analysis/Analysis.py b/analysis-master/tra_analysis/Analysis.py index 204c58d2..2316a026 100644 --- a/analysis-master/tra_analysis/Analysis.py +++ b/analysis-master/tra_analysis/Analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "3.0.5" +__version__ = "3.0.6" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 3.0.6: + - added docstrings 3.0.5: - removed extra submodule imports - fixed/optimized header @@ -434,7 +436,6 @@ def z_score(point, mean, stdev): return score -# expects 2d array, normalizes across all axes def z_normalize(array, *args): """ Applies sklearn.normalize(array, axis = args) on any arraylike parseable by numpy. @@ -450,7 +451,6 @@ def z_normalize(array, *args): return array -# expects 2d array of [x,y] def histo_analysis(hist_data): """ Calculates the mean and standard deviation of derivatives of (x,y) points. Requires at least 2 points to compute.