From 0e7255ab99bf0e27d87f435f4783e66b10b86791 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 15 Sep 2020 23:24:50 +0000 Subject: [PATCH 01/10] changed && to ; in devcontainer.json Signed-off-by: Arthur Lu --- .devcontainer/devcontainer.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index bdf95e4b..53b442b5 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -24,5 +24,5 @@ "ms-python.python", "waderyan.gitblame" ], - "postCreateCommand": "apt install vim -y && pip install -r data-analysis/requirements.txt && pip install -r analysis-master/requirements.txt && pip install tra-analysis" + "postCreateCommand": "apt install vim -y ; pip install -r data-analysis/requirements.txt ; pip install -r analysis-master/requirements.txt ; pip install tra-analysis" } \ No newline at end of file From 73a16b8397a1d3525ec697007967bdbfb838d139 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Wed, 16 Sep 2020 21:24:50 +0000 Subject: [PATCH 02/10] added depreciated config files to gitignore Signed-off-by: Arthur Lu --- .gitignore | 3 ++- data-analysis/config/database.config | 0 2 files changed, 2 insertions(+), 1 deletion(-) delete mode 100644 data-analysis/config/database.config diff --git a/.gitignore b/.gitignore index da8e601a..2545560b 100644 --- a/.gitignore +++ b/.gitignore @@ -37,4 +37,5 @@ analysis-master/tra_analysis/__pycache__ analysis-master/tra_analysis/.ipynb_checkpoints .pytest_cache analysis-master/tra_analysis/metrics/__pycache__ -analysis-master/dist \ No newline at end of file +analysis-master/dist +data-analysis/config/ \ No newline at end of file diff --git a/data-analysis/config/database.config b/data-analysis/config/database.config deleted file mode 100644 index e69de29b..00000000 From 3763cb041f9aca45dc52b7c5e5ba01fb21a45cda Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 17 Sep 2020 02:11:44 +0000 Subject: [PATCH 03/10] analysis.py v 2.2.2 Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/analysis.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/analysis-master/tra_analysis/analysis.py b/analysis-master/tra_analysis/analysis.py index 4e422c40..a963ae00 100644 --- a/analysis-master/tra_analysis/analysis.py +++ b/analysis-master/tra_analysis/analysis.py @@ -7,12 +7,15 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "2.2.1" +__version__ = "2.2.2" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 2.2.2: + - fixed 2.2.1 changelog entry + - changed regression to return dictionary 2.2.1: - changed all references to parent package analysis to tra_analysis + - changed all references to parent package analysis to tra_analysis 2.2.0: - added Sort class - added several array sorting functions to Sort class including: @@ -424,7 +427,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array X = np.array(inputs) y = np.array(outputs) - regressions = [] + regressions = {} if 'lin' in args: # formula: ax + b @@ -437,7 +440,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array popt, pcov = scipy.optimize.curve_fit(lin, X, y) coeffs = popt.flatten().tolist() - regressions.append(str(coeffs[0]) + "*x+" + str(coeffs[1])) + regressions["lin"] = (str(coeffs[0]) + "*x+" + str(coeffs[1])) except Exception as e: @@ -454,7 +457,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array popt, pcov = scipy.optimize.curve_fit(log, X, y) coeffs = popt.flatten().tolist() - regressions.append(str(coeffs[0]) + "*log(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) + regressions["log"] = (str(coeffs[0]) + "*log(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) except Exception as e: @@ -471,7 +474,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array popt, pcov = scipy.optimize.curve_fit(exp, X, y) coeffs = popt.flatten().tolist() - regressions.append(str(coeffs[0]) + "*e^(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) + regressions["exp"] = (str(coeffs[0]) + "*e^(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) except Exception as e: @@ -482,7 +485,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array inputs = np.array([inputs]) outputs = np.array([outputs]) - plys = [] + plys = {} limit = len(outputs[0]) for i in range(2, limit): @@ -500,9 +503,9 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array for param in params: temp += "(" + str(param) + "*x^" + str(counter) + ")" counter += 1 - plys.append(temp) + plys["x^" + str(i)] = (temp) - regressions.append(plys) + regressions["ply"] = (plys) if 'sig' in args: # formula: a tanh (b(x + c)) + d @@ -515,7 +518,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array popt, pcov = scipy.optimize.curve_fit(sig, X, y) coeffs = popt.flatten().tolist() - regressions.append(str(coeffs[0]) + "*tanh(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) + regressions["sig"] = (str(coeffs[0]) + "*tanh(" + str(coeffs[1]) + "*(x+" + str(coeffs[2]) + "))+" + str(coeffs[3])) except Exception as e: From f1982eb93d5dbfddd92093d4ffb66e3cebfdcbdc Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Fri, 18 Sep 2020 21:55:59 +0000 Subject: [PATCH 04/10] analysis.py v 2.2.3 Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/analysis.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/analysis-master/tra_analysis/analysis.py b/analysis-master/tra_analysis/analysis.py index a963ae00..f71a8b8f 100644 --- a/analysis-master/tra_analysis/analysis.py +++ b/analysis-master/tra_analysis/analysis.py @@ -7,10 +7,14 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "2.2.2" +__version__ = "2.2.3" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 2.2.3: + - fixed spelling of RandomForest + - made n_neighbors required for KNN + - made n_classifiers required for SVM 2.2.2: - fixed 2.2.1 changelog entry - changed regression to return dictionary @@ -645,7 +649,7 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = " class KNN: - def knn_classifier(self, data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling + def knn_classifier(self, data, labels, n_neighbors, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) model = sklearn.neighbors.KNeighborsClassifier() @@ -654,7 +658,7 @@ class KNN: return model, ClassificationMetric(predictions, labels_test) - def knn_regressor(self, data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): + def knn_regressor(self, data, outputs, n_neighbors, test_size = 0.3, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) @@ -757,9 +761,9 @@ class SVM: return RegressionMetric(predictions, test_outputs) -class RandomForrest: +class RandomForest: - def random_forest_classifier(self, data, labels, test_size, n_estimators="warn", criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None): + def random_forest_classifier(self, data, labels, test_size, n_estimators, criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None): data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) kernel = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators, criterion = criterion, max_depth = max_depth, min_samples_split = min_samples_split, min_samples_leaf = min_samples_leaf, min_weight_fraction_leaf = min_weight_fraction_leaf, max_leaf_nodes = max_leaf_nodes, min_impurity_decrease = min_impurity_decrease, bootstrap = bootstrap, oob_score = oob_score, n_jobs = n_jobs, random_state = random_state, verbose = verbose, warm_start = warm_start, class_weight = class_weight) @@ -768,7 +772,7 @@ class RandomForrest: return kernel, ClassificationMetric(predictions, labels_test) - def random_forest_regressor(self, data, outputs, test_size, n_estimators="warn", criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False): + def random_forest_regressor(self, data, outputs, test_size, n_estimators, criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False): data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) kernel = sklearn.ensemble.RandomForestRegressor(n_estimators = n_estimators, criterion = criterion, max_depth = max_depth, min_samples_split = min_samples_split, min_weight_fraction_leaf = min_weight_fraction_leaf, max_features = max_features, max_leaf_nodes = max_leaf_nodes, min_impurity_decrease = min_impurity_decrease, min_impurity_split = min_impurity_split, bootstrap = bootstrap, oob_score = oob_score, n_jobs = n_jobs, random_state = random_state, verbose = verbose, warm_start = warm_start) From cd869c0a8e9824cd3d4cfca680dcf81ce43712fe Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sat, 19 Sep 2020 22:04:24 +0000 Subject: [PATCH 05/10] analysis.py v 2.3.0 Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/analysis.py | 117 +++++++++++++++-------- 1 file changed, 75 insertions(+), 42 deletions(-) diff --git a/analysis-master/tra_analysis/analysis.py b/analysis-master/tra_analysis/analysis.py index f71a8b8f..5d7d771a 100644 --- a/analysis-master/tra_analysis/analysis.py +++ b/analysis-master/tra_analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "2.2.3" +__version__ = "2.3.0" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 2.3.0: + - overhauled Array class 2.2.3: - fixed spelling of RandomForest - made n_neighbors required for KNN @@ -786,7 +788,7 @@ class CorrelationTest: def anova_oneway(self, *args): #expects arrays of samples results = scipy.stats.f_oneway(*args) - return {"F-value": results[0], "p-value": results[1]} + return {"f-value": results[0], "p-value": results[1]} def pearson(self, x, y): @@ -985,81 +987,112 @@ class StatisticalTest: return {"z-score": results[0], "p-value": results[1]} class Array(): # tests on nd arrays independent of basic_stats + + def __init__(self, narray): + + self.array = np.array(narray) + + def __str__(self): + + return str(self.array) - def elementwise_mean(self, *args): # expects arrays that are size normalized + def elementwise_mean(self, *args, axis = 0): # expects arrays that are size normalized + if len(*args) == 0: + return np.mean(self.array, axis = axis) + else: + return np.mean([*args], axis = axis) - return np.mean([*args], axis = 0) + def elementwise_median(self, *args, axis = 0): - def elementwise_median(self, *args): + if len(*args) == 0: + return np.median(self.array, axis = axis) + else: + return np.median([*args], axis = axis) - return np.median([*args], axis = 0) + def elementwise_stdev(self, *args, axis = 0): - def elementwise_stdev(self, *args): + if len(*args) == 0: + return np.std(self.array, axis = axis) + else: + return np.std([*args], axis = axis) - return np.std([*args], axis = 0) + def elementwise_variance(self, *args, axis = 0): - def elementwise_variance(self, *args): + if len(*args) == 0: + return np.var(self.array, axis = axis) + else: + return np.var([*args], axis = axis) - return np.var([*args], axis = 0) + def elementwise_npmin(self, *args, axis = 0): - def elementwise_npmin(self, *args): + if len(*args) == 0: + return np.amin(self.array, axis = axis) + else: + return np.amin([*args], axis = axis) - return np.amin([*args], axis = 0) + def elementwise_npmax(self, *args, axis = 0): - def elementwise_npmax(self, *args): + if len(*args) == 0: + return np.amax(self.array, axis = axis) + else: + return np.amax([*args], axis = axis) - return np.amax([*args], axis = 0) + def elementwise_stats(self, *args, axis = 0): - def elementwise_stats(self, *args): - - _mean = self.elementwise_mean(*args) - _median = self.elementwise_median(*args) - _stdev = self.elementwise_stdev(*args) - _variance = self.elementwise_variance(*args) - _min = self.elementwise_npmin(*args) - _max = self.elementwise_npmax(*args) + _mean = self.elementwise_mean(*args, axis = axis) + _median = self.elementwise_median(*args, axis = axis) + _stdev = self.elementwise_stdev(*args, axis = axis) + _variance = self.elementwise_variance(*args, axis = axis) + _min = self.elementwise_npmin(*args, axis = axis) + _max = self.elementwise_npmax(*args, axis = axis) return _mean, _median, _stdev, _variance, _min, _max + def __getitem__(self, key): + + return self.array[key] + + def __setitem__(self, key, value): + + self.array[key] == value + def normalize(self, array): a = np.atleast_1d(np.linalg.norm(array)) a[a==0] = 1 return array / np.expand_dims(a, -1) - def add(self, *args): + def __add__(self, other): - temp = np.array([]) + return self.array + other - for a in args: - temp += a + def __sub__(self, other): + + return self.array - other + + def __neg__(self): - return temp + return -self.array - def mul(self, *args): + def __abs__(self): - temp = np.array([]) + return abs(self.array) - for a in args: - temp *= a - - return temp + def __invert__(self): - def neg(self, array): - - return -array + return 1/self.array - def inv(self, array): + def __mul__(self, other): - return 1/array + return self.array.dot(other) - def dot(self, a, b): + def __rmul__(self, other): - return np.dot(a, b) + return self.array.dot(other) - def cross(self, a, b): + def cross(self, other): - return np.cross(a, b) + return np.cross(self.array, b) def sort(self, array): # depreciated warnings.warn("Array.sort has been depreciated in favor of Sort") From 97334d1f66cefa00d64a0e6add1fa31a28eafd74 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sat, 19 Sep 2020 22:40:20 +0000 Subject: [PATCH 06/10] edited README.md Signed-off-by: Arthur Lu --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 445ced6b..e3de715d 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,9 @@ after installing python, or with a package manager on linux. Refer to the [pip i ### Standard Platforms For the latest version of tra-analysis, run `pip install tra-analysis` or `pip install tra_analysis`. The requirements for tra-analysis should be automatically installed. ### Exotic Platforms (Android) -[Termux](https://termux.com/) is recommended for a linux environemnt on Android. Consult the [documentation]() for advice on installing the prerequisites. After installing the prerequisites, the package should be installed normally with `pip install tra-analysis` or `pip install tra_analysis`. +[Termux](https://termux.com/) is recommended for a linux environemnt on Android. Consult the [documentation](https://titanscouting.github.io/analysis/general/installation#exotic-platforms-android) for advice on installing the prerequisites. After installing the prerequisites, the package should be installed normally with `pip install tra-analysis` or `pip install tra_analysis`. ## Use -tra-analysis operates like any other python package. Consult the [documentation]() for more information. +tra-analysis operates like any other python package. Consult the [documentation](https://titanscouting.github.io/analysis/tra_analysis/) for more information. # Supported Platforms Although any modern 64 bit platform should be supported, the following platforms have been tested to be working: * AMD64 (Tested on Zen, Zen+, and Zen 2) From ff9ad078e5a67df0efacb3b00da02d8b5b2db3d1 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sat, 19 Sep 2020 23:14:46 +0000 Subject: [PATCH 07/10] analysis.py v 2.3.1 Signed-off-by: Arthur Lu --- analysis-master/tra_analysis/analysis.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/analysis-master/tra_analysis/analysis.py b/analysis-master/tra_analysis/analysis.py index 5d7d771a..14de855a 100644 --- a/analysis-master/tra_analysis/analysis.py +++ b/analysis-master/tra_analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "2.3.0" +__version__ = "2.3.1" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 2.3.1: + - fixed bugs in Array class 2.3.0: - overhauled Array class 2.2.3: @@ -1064,11 +1066,11 @@ class Array(): # tests on nd arrays independent of basic_stats def __add__(self, other): - return self.array + other + return self.array + other.array def __sub__(self, other): - return self.array - other + return self.array - other.array def __neg__(self): @@ -1084,15 +1086,15 @@ class Array(): # tests on nd arrays independent of basic_stats def __mul__(self, other): - return self.array.dot(other) + return self.array.dot(other.array) def __rmul__(self, other): - return self.array.dot(other) + return self.array.dot(other.array) def cross(self, other): - return np.cross(self.array, b) + return np.cross(self.array, other.array) def sort(self, array): # depreciated warnings.warn("Array.sort has been depreciated in favor of Sort") From 16502c525929fb5d78b844ecba42e145193d5c7e Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sun, 20 Sep 2020 00:45:38 +0000 Subject: [PATCH 08/10] superscript.py v 0.7.0 Signed-off-by: Arthur Lu --- data-analysis/config.json | 2 +- data-analysis/superscript.py | 71 +++++++++++++++++++++++++++++++++++- 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/data-analysis/config.json b/data-analysis/config.json index 78930573..eca2d09a 100644 --- a/data-analysis/config.json +++ b/data-analysis/config.json @@ -1,6 +1,6 @@ { "team": "", - "competition": "", + "competition": "2020ilch", "key":{ "database":"", "tba":"" diff --git a/data-analysis/superscript.py b/data-analysis/superscript.py index 05d3e809..fdd4e5a5 100644 --- a/data-analysis/superscript.py +++ b/data-analysis/superscript.py @@ -3,10 +3,12 @@ # Notes: # setup: -__version__ = "0.6.2" +__version__ = "0.7.0" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 0.7.0: + - finished implementing main function 0.6.2: - integrated get_team_rankings.py as get_team_metrics() function - integrated visualize_pit.py as graph_pit_histogram() function @@ -120,6 +122,65 @@ import matplotlib.pyplot as plt import time import warnings +def main(): + + warnings.filterwarnings("ignore") + + while (True): + + current_time = time.time() + print("[OK] time: " + str(current_time)) + + config = load_config("config.json") + competition = config["competition"] + match_tests = config["statistics"]["match"] + pit_tests = config["statistics"]["pit"] + metrics_tests = config["statistics"]["metric"] + print("[OK] configs loaded") + + apikey = config["key"]["database"] + tbakey = config["key"]["tba"] + print("[OK] loaded keys") + + previous_time = get_previous_time(apikey) + print("[OK] analysis backtimed to: " + str(previous_time)) + + print("[OK] loading data") + start = time.time() + match_data = load_match(apikey, competition) + pit_data = load_pit(apikey, competition) + print("[OK] loaded data in " + str(time.time() - start) + " seconds") + + print("[OK] running tests") + start = time.time() + matchloop(apikey, competition, match_data, match_tests) + print("[OK] finished tests in " + str(time.time() - start) + " seconds") + + print("[OK] running metrics") + start = time.time() + metricloop(tbakey, apikey, competition, previous_time, metrics_tests) + print("[OK] finished metrics in " + str(time.time() - start) + " seconds") + + print("[OK] running pit analysis") + start = time.time() + pitloop(apikey, competition, pit_data, pit_tests) + print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") + + set_current_time(apikey, current_time) + print("[OK] finished all tests, looping") + + clear() + +def clear(): + + # for windows + if name == 'nt': + _ = system('cls') + + # for mac and linux(here, os.name is 'posix') + else: + _ = system('clear') + def load_config(file): config_vector = {} @@ -148,6 +209,10 @@ def get_previous_time(apikey): return previous_time +def set_current_time(apikey, current_time): + + d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) + def load_match(apikey, competition): return d.get_match_data_formatted(apikey, competition) @@ -402,4 +467,6 @@ def graph_pit_histogram(apikey, competition, figsize=(80,15)): i+=1 - plt.show() \ No newline at end of file + plt.show() + +main() \ No newline at end of file From 27a86e568b47c1cf01f9671811851849a9ae233e Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sun, 20 Sep 2020 00:47:33 +0000 Subject: [PATCH 09/10] depreciated nonfunctional scripts in data-analysis Signed-off-by: Arthur Lu --- data-analysis/config/competition.config | 1 - data-analysis/config/stats.config | 14 - data-analysis/superscript_old.py | 378 ------------------------ data-analysis/tasks.py | 188 ------------ data-analysis/test.py | 55 ---- data-analysis/tra-cli.py | 33 --- data-analysis/tra.py | 166 ----------- 7 files changed, 835 deletions(-) delete mode 100644 data-analysis/config/competition.config delete mode 100644 data-analysis/config/stats.config delete mode 100644 data-analysis/superscript_old.py delete mode 100644 data-analysis/tasks.py delete mode 100644 data-analysis/test.py delete mode 100644 data-analysis/tra-cli.py delete mode 100644 data-analysis/tra.py diff --git a/data-analysis/config/competition.config b/data-analysis/config/competition.config deleted file mode 100644 index 511e258a..00000000 --- a/data-analysis/config/competition.config +++ /dev/null @@ -1 +0,0 @@ -2020ilch \ No newline at end of file diff --git a/data-analysis/config/stats.config b/data-analysis/config/stats.config deleted file mode 100644 index 5b0501ac..00000000 --- a/data-analysis/config/stats.config +++ /dev/null @@ -1,14 +0,0 @@ -balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-lower-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-upper-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-upper-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -wheel-mechanism -low-balls -high-balls -wheel-success -strategic-focus -climb-mechanism -attitude \ No newline at end of file diff --git a/data-analysis/superscript_old.py b/data-analysis/superscript_old.py deleted file mode 100644 index 880320cc..00000000 --- a/data-analysis/superscript_old.py +++ /dev/null @@ -1,378 +0,0 @@ -# Titan Robotics Team 2022: Superscript Script -# Written by Arthur Lu & Jacob Levine -# Notes: -# setup: - -__version__ = "0.0.5.002" - -# changelog should be viewed using print(analysis.__changelog__) -__changelog__ = """changelog: - 0.0.5.002: - - made changes due to refactoring of analysis - 0.0.5.001: - - text fixes - - removed matplotlib requirement - 0.0.5.000: - - improved user interface - 0.0.4.002: - - removed unessasary code - 0.0.4.001: - - fixed bug where X range for regression was determined before sanitization - - better sanitized data - 0.0.4.000: - - fixed spelling issue in __changelog__ - - addressed nan bug in regression - - fixed errors on line 335 with metrics calling incorrect key "glicko2" - - fixed errors in metrics computing - 0.0.3.000: - - added analysis to pit data - 0.0.2.001: - - minor stability patches - - implemented db syncing for timestamps - - fixed bugs - 0.0.2.000: - - finalized testing and small fixes - 0.0.1.004: - - finished metrics implement, trueskill is bugged - 0.0.1.003: - - working - 0.0.1.002: - - started implement of metrics - 0.0.1.001: - - cleaned up imports - 0.0.1.000: - - tested working, can push to database - 0.0.0.009: - - tested working - - prints out stats for the time being, will push to database later - 0.0.0.008: - - added data import - - removed tba import - - finished main method - 0.0.0.007: - - added load_config - - optimized simpleloop for readibility - - added __all__ entries - - added simplestats engine - - pending testing - 0.0.0.006: - - fixes - 0.0.0.005: - - imported pickle - - created custom database object - 0.0.0.004: - - fixed simpleloop to actually return a vector - 0.0.0.003: - - added metricsloop which is unfinished - 0.0.0.002: - - added simpleloop which is untested until data is provided - 0.0.0.001: - - created script - - added analysis, numba, numpy imports -""" - -__author__ = ( - "Arthur Lu ", - "Jacob Levine ", -) - -__all__ = [ - "main", - "load_config", - "simpleloop", - "simplestats", - "metricsloop" -] - -# imports: - -from tra_analysis import analysis as an -import data as d -import numpy as np -from os import system, name -from pathlib import Path -import time -import warnings - -def main(): - warnings.filterwarnings("ignore") - while(True): - - current_time = time.time() - print("[OK] time: " + str(current_time)) - - start = time.time() - config = load_config(Path("config/stats.config")) - competition = an.load_csv(Path("config/competition.config"))[0][0] - print("[OK] configs loaded") - - apikey = an.load_csv(Path("config/keys.config"))[0][0] - tbakey = an.load_csv(Path("config/keys.config"))[1][0] - print("[OK] loaded keys") - - previous_time = d.get_analysis_flags(apikey, "latest_update") - - if(previous_time == None): - - d.set_analysis_flags(apikey, "latest_update", 0) - previous_time = 0 - - else: - - previous_time = previous_time["latest_update"] - - print("[OK] analysis backtimed to: " + str(previous_time)) - - print("[OK] loading data") - start = time.time() - data = d.get_match_data_formatted(apikey, competition) - pit_data = d.pit = d.get_pit_data_formatted(apikey, competition) - print("[OK] loaded data in " + str(time.time() - start) + " seconds") - - print("[OK] running tests") - start = time.time() - results = simpleloop(data, config) - print("[OK] finished tests in " + str(time.time() - start) + " seconds") - - print("[OK] running metrics") - start = time.time() - metricsloop(tbakey, apikey, competition, previous_time) - print("[OK] finished metrics in " + str(time.time() - start) + " seconds") - - print("[OK] running pit analysis") - start = time.time() - pit = pitloop(pit_data, config) - print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") - - d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) - - print("[OK] pushing to database") - start = time.time() - push_to_database(apikey, competition, results, pit) - print("[OK] pushed to database in " + str(time.time() - start) + " seconds") - - clear() - -def clear(): - - # for windows - if name == 'nt': - _ = system('cls') - - # for mac and linux(here, os.name is 'posix') - else: - _ = system('clear') - -def load_config(file): - config_vector = {} - file = an.load_csv(file) - for line in file: - config_vector[line[0]] = line[1:] - - return config_vector - -def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match] - - return_vector = {} - for team in data: - variable_vector = {} - for variable in data[team]: - test_vector = {} - variable_data = data[team][variable] - if(variable in tests): - for test in tests[variable]: - test_vector[test] = simplestats(variable_data, test) - else: - pass - variable_vector[variable] = test_vector - return_vector[team] = variable_vector - - return return_vector - -def simplestats(data, test): - - data = np.array(data) - data = data[np.isfinite(data)] - ranges = list(range(len(data))) - - if(test == "basic_stats"): - return an.basic_stats(data) - - if(test == "historical_analysis"): - return an.histo_analysis([ranges, data]) - - if(test == "regression_linear"): - return an.regression(ranges, data, ['lin']) - - if(test == "regression_logarithmic"): - return an.regression(ranges, data, ['log']) - - if(test == "regression_exponential"): - return an.regression(ranges, data, ['exp']) - - if(test == "regression_polynomial"): - return an.regression(ranges, data, ['ply']) - - if(test == "regression_sigmoidal"): - return an.regression(ranges, data, ['sig']) - -def push_to_database(apikey, competition, results, pit): - - for team in results: - - d.push_team_tests_data(apikey, competition, team, results[team]) - - for variable in pit: - - d.push_team_pit_data(apikey, competition, variable, pit[variable]) - -def metricsloop(tbakey, apikey, competition, timestamp): # listener based metrics update - - elo_N = 400 - elo_K = 24 - - matches = d.pull_new_tba_matches(tbakey, competition, timestamp) - - red = {} - blu = {} - - for match in matches: - - red = load_metrics(apikey, competition, match, "red") - blu = load_metrics(apikey, competition, match, "blue") - - elo_red_total = 0 - elo_blu_total = 0 - - gl2_red_score_total = 0 - gl2_blu_score_total = 0 - - gl2_red_rd_total = 0 - gl2_blu_rd_total = 0 - - gl2_red_vol_total = 0 - gl2_blu_vol_total = 0 - - for team in red: - - elo_red_total += red[team]["elo"]["score"] - - gl2_red_score_total += red[team]["gl2"]["score"] - gl2_red_rd_total += red[team]["gl2"]["rd"] - gl2_red_vol_total += red[team]["gl2"]["vol"] - - for team in blu: - - elo_blu_total += blu[team]["elo"]["score"] - - gl2_blu_score_total += blu[team]["gl2"]["score"] - gl2_blu_rd_total += blu[team]["gl2"]["rd"] - gl2_blu_vol_total += blu[team]["gl2"]["vol"] - - red_elo = {"score": elo_red_total / len(red)} - blu_elo = {"score": elo_blu_total / len(blu)} - - red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} - blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} - - - if(match["winner"] == "red"): - - observations = {"red": 1, "blu": 0} - - elif(match["winner"] == "blue"): - - observations = {"red": 0, "blu": 1} - - else: - - observations = {"red": 0.5, "blu": 0.5} - - red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] - blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] - - new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) - new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) - - red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} - blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} - - for team in red: - - red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta - - red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] - red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] - red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] - - for team in blu: - - blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta - - blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] - blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] - blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] - - temp_vector = {} - temp_vector.update(red) - temp_vector.update(blu) - - for team in temp_vector: - - d.push_team_metrics_data(apikey, competition, team, temp_vector[team]) - -def load_metrics(apikey, competition, match, group_name): - - group = {} - - for team in match[group_name]: - - db_data = d.get_team_metrics_data(apikey, competition, team) - - if d.get_team_metrics_data(apikey, competition, team) == None: - - elo = {"score": 1500} - gl2 = {"score": 1500, "rd": 250, "vol": 0.06} - ts = {"mu": 25, "sigma": 25/3} - - #d.push_team_metrics_data(apikey, competition, team, {"elo":elo, "gl2":gl2,"trueskill":ts}) - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - else: - - metrics = db_data["metrics"] - - elo = metrics["elo"] - gl2 = metrics["gl2"] - ts = metrics["ts"] - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - return group - -def pitloop(pit, tests): - - return_vector = {} - for team in pit: - for variable in pit[team]: - if(variable in tests): - if(not variable in return_vector): - return_vector[variable] = [] - return_vector[variable].append(pit[team][variable]) - - return return_vector - -main() - -""" -Metrics Defaults: - -elo starting score = 1500 -elo N = 400 -elo K = 24 - -gl2 starting score = 1500 -gl2 starting rd = 350 -gl2 starting vol = 0.06 -""" \ No newline at end of file diff --git a/data-analysis/tasks.py b/data-analysis/tasks.py deleted file mode 100644 index 9b3f4ea9..00000000 --- a/data-analysis/tasks.py +++ /dev/null @@ -1,188 +0,0 @@ -import json -import superscript as su -import threading - -__author__ = ( - "Arthur Lu ", -) - -class Tasker(): - - match_ = False - metric_ = False - pit_ = False - - match_enable = True - metric_enable = True - pit_enable = True - - config = {} - - def __init__(self): - - self.config = su.load_config("config.json") - - def match(self): - - self.match_ = True - - apikey = self.config["key"]["database"] - competition = self.config["competition"] - tests = self.config["statistics"]["match"] - - data = su.load_match(apikey, competition) - su.matchloop(apikey, competition, data, tests) - - self.match_ = False - - if self.match_enable == True and self.match_ == False: - - task = threading.Thread(name = "match", target = match) - task.start() - - def metric(): - - self.metric_ = True - - apikey = self.config["key"]["database"] - tbakey = self.config["key"]["tba"] - competition = self.config["competition"] - metric = self.config["statistics"]["metric"] - - timestamp = su.get_previous_time(apikey) - - su.metricloop(tbakey, apikey, competition, timestamp, metric) - - self.metric_ = False - - if self.metric_enable == True and self.metric_ == False: - - task = threading.Thread(name = "match", target = metric) - task.start() - - def pit(): - - self.pit_ = True - - apikey = self.config["key"]["database"] - competition = self.config["competition"] - tests = self.config["statistics"]["pit"] - - data = su.load_pit(apikey, competition) - su.pitloop(apikey, competition, data, tests) - - self.pit_ = False - - if self.pit_enable == True and self.pit_ == False: - - task = threading.Thread(name = "pit", target = pit) - task.start() - - def start_match(): - task = threading.Thread(name = "match", target = match) - task.start() - - def start_metric(): - task = threading.Thread(name = "match", target = metric) - task.start() - - def start_pit(): - task = threading.Thread(name = "pit", target = pit) - task.start() - - def stop_match(): - self.match_enable = False - - def stop_metric(): - self.metric_enable = False - - def stop_pit(): - self.pit_enable = False - - def get_match(): - return self.match_ - - def get_metric(): - return self.metric_ - - def get_pit(): - return self.pit_ - - def get_match_enable(): - return self.match_enable - - def get_metric_enable(): - return self.metric_enable - - def get_pit_enable(): - return self.pit_enable -""" -def main(): - - init() - start_match() - start_metric() - start_pit() - - exit = False - while(not exit): - - i = input("> ") - cmds = i.split(" ") - cmds = [x for x in cmds if x != ""] - l = len(cmds) - - if(l == 0): - pass - else: - if(cmds[0] == "exit"): - if(l == 1): - exit = True - else: - print("exit command expected no arguments but encountered " + str(l - 1)) - if(cmds[0] == "status"): - if(l == 1): - print("status command expected 1 argument but encountered none\ntype status help for usage") - elif(l > 2): - print("status command expected 1 argument but encountered " + str(l - 1)) - elif(cmds[1] == "threads"): - threads = threading.enumerate() - threads = [x.getName() for x in threads] - print("running threads:") - for thread in threads: - print(" " + thread) - elif(cmds[1] == "flags"): - print("current flags:") - print(" match running: " + match_) - print(" metric running: " + metric_) - print(" pit running: " + pit_) - print(" match enable: " + match_enable) - print(" metric enable: " + metric_enable) - print(" pit enable: " + pit_enable) - elif(cmds[1] == "config"): - print("current config:") - print(json.dumps(config)) - elif(cmds[1] == "all"): - threads = threading.enumerate() - threads = [x.getName() for x in threads] - print("running threads:") - for thread in threads: - print(" " + thread) - print("current flags:") - print(" match running: " + match_) - print(" metric running: " + metric_) - print(" pit running: " + pit_) - print(" match enable: " + match_enable) - print(" metric enable: " + metric_enable) - print(" pit enable: " + pit_enable) - elif(cmds[1] == "help"): - print("usage: status [arg]\nDisplays the status of the tra data analysis threads.\nArguments:\n threads - prints the stuatus ofcurrently running threads\n flags - prints the status of control and indicator flags\n config - prints the current configuration information\n all - prints all statuses\n - prints the status of a specific thread") - else: - threads = threading.enumerate() - threads = [x.getName() for x in threads] - if(cmds[1] in threads): - print(cmds[1] + " is running") - -if(__name__ == "__main__"): - main() -""" \ No newline at end of file diff --git a/data-analysis/test.py b/data-analysis/test.py deleted file mode 100644 index d8d029a4..00000000 --- a/data-analysis/test.py +++ /dev/null @@ -1,55 +0,0 @@ -import threading -from multiprocessing import Process, Queue -import time -from os import system - -class testcls(): - - i = 0 - j = 0 - - t1_en = True - t2_en = True - - def main(self): - t1 = Process(name = "task1", target = self.task1) - t2 = Process(name = "task2", target = self.task2) - t1.start() - t2.start() - #print(self.i) - #print(self.j) - - def task1(self): - self.i += 1 - time.sleep(1) - if(self.i < 10): - t1 = Process(name = "task1", target = self.task1) - t1.start() - - def task2(self): - self.j -= 1 - time.sleep(1) - if(self.j > -10): - t2 = t2 = Process(name = "task2", target = self.task2) - t2.start() -""" -if __name__ == "__main__": - - tmain = threading.Thread(name = "main", target = main) - tmain.start() - - t = 0 - while(True): - system("clear") - for thread in threading.enumerate(): - if thread.getName() != "MainThread": - print(thread.getName()) - print(str(len(threading.enumerate()))) - print(i) - print(j) - time.sleep(0.1) - t += 1 - if(t == 100): - t1_en = False - t2_en = False -""" \ No newline at end of file diff --git a/data-analysis/tra-cli.py b/data-analysis/tra-cli.py deleted file mode 100644 index 356051d1..00000000 --- a/data-analysis/tra-cli.py +++ /dev/null @@ -1,33 +0,0 @@ -import argparse -from tasks import Tasker -import test -import threading -from multiprocessing import Process, Queue - -t = Tasker() - -task_map = {"match":None, "metric":None, "pit":None, "test":None} -status_map = {"match":None, "metric":None, "pit":None} -status_map.update(task_map) - -parser = argparse.ArgumentParser(prog = "TRA") -subparsers = parser.add_subparsers(title = "command", metavar = "C", help = "//commandhelp//") - -parser_start = subparsers.add_parser("start", help = "//starthelp//") -parser_start.add_argument("targets", metavar = "T", nargs = "*", choices = task_map.keys()) -parser_start.set_defaults(which = "start") - -parser_stop = subparsers.add_parser("stop", help = "//stophelp//") -parser_stop.add_argument("targets", metavar = "T", nargs = "*", choices = task_map.keys()) -parser_stop.set_defaults(which = "stop") - -parser_status = subparsers.add_parser("status", help = "//stophelp//") -parser_status.add_argument("targets", metavar = "T", nargs = "*", choices = status_map.keys()) -parser_status.set_defaults(which = "status") - -args = parser.parse_args() - -if(args.which == "start" and "test" in args.targets): - a = test.testcls() - tmain = Process(name = "main", target = a.main) - tmain.start() \ No newline at end of file diff --git a/data-analysis/tra.py b/data-analysis/tra.py deleted file mode 100644 index d790d57c..00000000 --- a/data-analysis/tra.py +++ /dev/null @@ -1,166 +0,0 @@ -import json -import superscript as su -import threading - -__author__ = ( - "Arthur Lu ", -) - -match_ = False -metric_ = False -pit_ = False - -match_enable = True -metric_enable = True -pit_enable = True - -config = {} - -def __init__(self): - - global match_ - global metric_ - global pit_ - - global match_enable - global metric_enable - global pit_enable - - config = su.load_config("config.json") - -def match(self): - - match_ = True - - apikey = config["key"]["database"] - competition = config["competition"] - tests = config["statistics"]["match"] - - data = su.load_match(apikey, competition) - su.matchloop(apikey, competition, data, tests) - - match_ = False - - if match_enable == True and match_ == False: - - task = threading.Thread(name = "match", target = match) - task.start() - -def metric(): - - metric_ = True - - apikey = config["key"]["database"] - tbakey = config["key"]["tba"] - competition = config["competition"] - metric = config["statistics"]["metric"] - - timestamp = su.get_previous_time(apikey) - - su.metricloop(tbakey, apikey, competition, timestamp, metric) - - metric_ = False - - if metric_enable == True and metric_ == False: - - task = threading.Thread(name = "match", target = metric) - task.start() - -def pit(): - - pit_ = True - - apikey = config["key"]["database"] - competition = config["competition"] - tests = config["statistics"]["pit"] - - data = su.load_pit(apikey, competition) - su.pitloop(apikey, competition, data, tests) - - pit_ = False - - if pit_enable == True and pit_ == False: - - task = threading.Thread(name = "pit", target = pit) - task.start() - -def start_match(): - task = threading.Thread(name = "match", target = match) - task.start() - -def start_metric(): - task = threading.Thread(name = "match", target = metric) - task.start() - -def start_pit(): - task = threading.Thread(name = "pit", target = pit) - task.start() - -def main(): - - init() - start_match() - start_metric() - start_pit() - - exit = False - while(not exit): - - i = input("> ") - cmds = i.split(" ") - cmds = [x for x in cmds if x != ""] - l = len(cmds) - - if(l == 0): - pass - else: - if(cmds[0] == "exit"): - if(l == 1): - exit = True - else: - print("exit command expected no arguments but encountered " + str(l - 1)) - if(cmds[0] == "status"): - if(l == 1): - print("status command expected 1 argument but encountered none\ntype status help for usage") - elif(l > 2): - print("status command expected 1 argument but encountered " + str(l - 1)) - elif(cmds[1] == "threads"): - threads = threading.enumerate() - threads = [x.getName() for x in threads] - print("running threads:") - for thread in threads: - print(" " + thread) - elif(cmds[1] == "flags"): - print("current flags:") - print(" match running: " + match_) - print(" metric running: " + metric_) - print(" pit running: " + pit_) - print(" match enable: " + match_enable) - print(" metric enable: " + metric_enable) - print(" pit enable: " + pit_enable) - elif(cmds[1] == "config"): - print("current config:") - print(json.dumps(config)) - elif(cmds[1] == "all"): - threads = threading.enumerate() - threads = [x.getName() for x in threads] - print("running threads:") - for thread in threads: - print(" " + thread) - print("current flags:") - print(" match running: " + match_) - print(" metric running: " + metric_) - print(" pit running: " + pit_) - print(" match enable: " + match_enable) - print(" metric enable: " + metric_enable) - print(" pit enable: " + pit_enable) - elif(cmds[1] == "help"): - print("usage: status [arg]\nDisplays the status of the tra data analysis threads.\nArguments:\n threads - prints the stuatus ofcurrently running threads\n flags - prints the status of control and indicator flags\n config - prints the current configuration information\n all - prints all statuses\n - prints the status of a specific thread") - else: - threads = threading.enumerate() - threads = [x.getName() for x in threads] - if(cmds[1] in threads): - print(cmds[1] + " is running") - -if(__name__ == "__main__"): - main() \ No newline at end of file From fa7216d4e05bebba39f39eed949c67c78959aca1 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Sun, 20 Sep 2020 00:50:14 +0000 Subject: [PATCH 10/10] modified setup.py for analysis package v 2.1.0 Signed-off-by: Arthur Lu --- analysis-master/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/analysis-master/setup.py b/analysis-master/setup.py index ca9af33c..9c9cbf28 100644 --- a/analysis-master/setup.py +++ b/analysis-master/setup.py @@ -8,7 +8,7 @@ with open("requirements.txt", 'r') as file: setuptools.setup( name="tra_analysis", - version="2.0.3", + version="2.1.0", author="The Titan Scouting Team", author_email="titanscout2022@gmail.com", description="Analysis package developed by Titan Scouting for The Red Alliance",