mirror of
https://github.com/titanscouting/tra-analysis.git
synced 2025-09-06 23:17:22 +00:00
Compare commits
4 Commits
improve-de
...
analysis-v
Author | SHA1 | Date | |
---|---|---|---|
|
df6362c52a | ||
|
f793a77660 | ||
|
9187f1e7da | ||
|
6647dcfd72 |
4
.github/workflows/ut-analysis.yml
vendored
4
.github/workflows/ut-analysis.yml
vendored
@@ -10,12 +10,12 @@ on:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
unittest:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
python-version: ["3.7", "3.8", "3.9", "3.10"]
|
||||
|
||||
env:
|
||||
working-directory: ./analysis-master/
|
||||
|
8
analysis-master/requirements.txt
Normal file
8
analysis-master/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
six
|
||||
pyparsing
|
||||
|
||||
pylint
|
||||
pytest
|
@@ -153,14 +153,9 @@ def test_sort():
|
||||
assert all(a == b for a, b in zip(sort(test_data_scrambled), test_data_sorted))
|
||||
|
||||
def test_statisticaltest():
|
||||
|
||||
#print(StatisticalTest.tukey_multicomparison([test_data_linear, test_data_linear2, test_data_linear3]))
|
||||
|
||||
assert StatisticalTest.tukey_multicomparison([test_data_linear, test_data_linear2, test_data_linear3]) == \
|
||||
{'group 1 and group 2': [0.32571517201527916, False], 'group 1 and group 3': [0.977145516045838, False], 'group 2 and group 3': [0.6514303440305589, False]}
|
||||
#assert all(np.isclose([i[0] for i in list(StatisticalTest.tukey_multicomparison([test_data_linear, test_data_linear2, test_data_linear3]).values],
|
||||
# [0.32571517201527916, 0.977145516045838, 0.6514303440305589]))
|
||||
#assert [i[1] for i in StatisticalTest.tukey_multicomparison([test_data_linear, test_data_linear2, test_data_linear3]).values] == \
|
||||
# [False, False, False]
|
||||
|
||||
def test_svm():
|
||||
|
||||
|
@@ -4,10 +4,12 @@
|
||||
# this should be imported as a python module using 'from tra_analysis import Clustering'
|
||||
# setup:
|
||||
|
||||
__version__ = "2.0.1"
|
||||
__version__ = "2.0.2"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
2.0.2:
|
||||
- generalized optional args to **kwargs
|
||||
2.0.1:
|
||||
- added normalization preprocessing to clustering, expects instance of sklearn.preprocessing.Normalizer()
|
||||
2.0.0:
|
||||
@@ -30,32 +32,32 @@ __all__ = [
|
||||
|
||||
import sklearn
|
||||
|
||||
def kmeans(data, normalizer = None, n_clusters=8, init="k-means++", n_init=10, max_iter=300, tol=0.0001, precompute_distances="auto", verbose=0, random_state=None, copy_x=True, n_jobs=None, algorithm="auto"):
|
||||
def kmeans(data, normalizer = None, **kwargs):
|
||||
|
||||
if normalizer != None:
|
||||
data = normalizer.transform(data)
|
||||
|
||||
kernel = sklearn.cluster.KMeans(n_clusters = n_clusters, init = init, n_init = n_init, max_iter = max_iter, tol = tol, precompute_distances = precompute_distances, verbose = verbose, random_state = random_state, copy_x = copy_x, n_jobs = n_jobs, algorithm = algorithm)
|
||||
kernel = sklearn.cluster.KMeans(**kwargs)
|
||||
kernel.fit(data)
|
||||
predictions = kernel.predict(data)
|
||||
centers = kernel.cluster_centers_
|
||||
|
||||
return centers, predictions
|
||||
|
||||
def dbscan(data, normalizer=None, eps=0.5, min_samples=5, metric='euclidean', metric_params=None, algorithm='auto', leaf_size=30, p=None, n_jobs=None):
|
||||
def dbscan(data, normalizer=None, **kwargs):
|
||||
|
||||
if normalizer != None:
|
||||
data = normalizer.transform(data)
|
||||
|
||||
model = sklearn.cluster.DBSCAN(eps = eps, min_samples = min_samples, metric = metric, metric_params = metric_params, algorithm = algorithm, leaf_size = leaf_size, p = p, n_jobs = n_jobs).fit(data)
|
||||
model = sklearn.cluster.DBSCAN(**kwargs).fit(data)
|
||||
|
||||
return model.labels_
|
||||
|
||||
def spectral(data, normalizer=None, n_clusters=8, eigen_solver=None, n_components=None, random_state=None, n_init=10, gamma=1.0, affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None, n_jobs=None, verbose=False):
|
||||
def spectral(data, normalizer=None, **kwargs):
|
||||
|
||||
if normalizer != None:
|
||||
data = normalizer.transform(data)
|
||||
|
||||
model = sklearn.cluster.SpectralClustering(n_clusters = n_clusters, eigen_solver = eigen_solver, n_components = n_components, random_state = random_state, n_init = n_init, gamma = gamma, affinity = affinity, n_neighbors = n_neighbors, eigen_tol = eigen_tol, assign_labels = assign_labels, degree = degree, coef0 = coef0, kernel_params = kernel_params, n_jobs = n_jobs).fit(data)
|
||||
model = sklearn.cluster.SpectralClustering(**kwargs).fit(data)
|
||||
|
||||
return model.labels_
|
@@ -4,9 +4,11 @@
|
||||
# this should be imported as a python module using 'from tra_analysis import CorrelationTest'
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.2"
|
||||
__version__ = "1.0.3"
|
||||
|
||||
__changelog__ = """changelog:
|
||||
1.0.3:
|
||||
- generalized optional args to **kwargs
|
||||
1.0.2:
|
||||
- optimized imports
|
||||
1.0.1:
|
||||
@@ -42,9 +44,9 @@ def pearson(x, y):
|
||||
results = scipy.stats.pearsonr(x, y)
|
||||
return {"r-value": results[0], "p-value": results[1]}
|
||||
|
||||
def spearman(a, b = None, axis = 0, nan_policy = 'propagate'):
|
||||
def spearman(a, b = None, **kwargs):
|
||||
|
||||
results = scipy.stats.spearmanr(a, b = b, axis = axis, nan_policy = nan_policy)
|
||||
results = scipy.stats.spearmanr(a, b = b, **kwargs)
|
||||
return {"r-value": results[0], "p-value": results[1]}
|
||||
|
||||
def point_biserial(x, y):
|
||||
@@ -52,17 +54,17 @@ def point_biserial(x, y):
|
||||
results = scipy.stats.pointbiserialr(x, y)
|
||||
return {"r-value": results[0], "p-value": results[1]}
|
||||
|
||||
def kendall(x, y, initial_lexsort = None, nan_policy = 'propagate', method = 'auto'):
|
||||
def kendall(x, y, **kwargs):
|
||||
|
||||
results = scipy.stats.kendalltau(x, y, initial_lexsort = initial_lexsort, nan_policy = nan_policy, method = method)
|
||||
results = scipy.stats.kendalltau(x, y, **kwargs)
|
||||
return {"tau": results[0], "p-value": results[1]}
|
||||
|
||||
def kendall_weighted(x, y, rank = True, weigher = None, additive = True):
|
||||
def kendall_weighted(x, y, **kwargs):
|
||||
|
||||
results = scipy.stats.weightedtau(x, y, rank = rank, weigher = weigher, additive = additive)
|
||||
results = scipy.stats.weightedtau(x, y, **kwargs)
|
||||
return {"tau": results[0], "p-value": results[1]}
|
||||
|
||||
def mgc(x, y, compute_distance = None, reps = 1000, workers = 1, is_twosamp = False, random_state = None):
|
||||
def mgc(x, y, **kwargs):
|
||||
|
||||
results = scipy.stats.multiscale_graphcorr(x, y, compute_distance = compute_distance, reps = reps, workers = workers, is_twosamp = is_twosamp, random_state = random_state)
|
||||
results = scipy.stats.multiscale_graphcorr(x, y, **kwargs)
|
||||
return {"k-value": results[0], "p-value": results[1], "data": results[2]} # unsure if MGC test returns a k value
|
@@ -4,9 +4,11 @@
|
||||
# this should be imported as a python module using 'from tra_analysis import KNN'
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.1"
|
||||
__version__ = "1.0.2"
|
||||
|
||||
__changelog__ = """changelog:
|
||||
1.0.2:
|
||||
- generalized optional args to **kwargs
|
||||
1.0.1:
|
||||
- optimized imports
|
||||
1.0.0:
|
||||
@@ -27,19 +29,19 @@ __all__ = [
|
||||
import sklearn
|
||||
from . import ClassificationMetric, RegressionMetric
|
||||
|
||||
def knn_classifier(data, labels, n_neighbors = 5, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
|
||||
def knn_classifier(data, labels, n_neighbors = 5, test_size = 0.3, **kwargs): #expects *2d data and 1d labels post-scaling
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsClassifier(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
|
||||
model = sklearn.neighbors.KNeighborsClassifier(n_neighbors = n_neighbors, **kwargs)
|
||||
model.fit(data_train, labels_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
return model, ClassificationMetric(predictions, labels_test)
|
||||
|
||||
def knn_regressor(data, outputs, n_neighbors = 5, test_size = 0.3, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
|
||||
def knn_regressor(data, outputs, n_neighbors = 5, test_size = 0.3, **kwargs):
|
||||
|
||||
data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
|
||||
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, **kwargs)
|
||||
model.fit(data_train, outputs_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
|
@@ -4,9 +4,11 @@
|
||||
# this should be imported as a python module using 'from tra_analysis import NaiveBayes'
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.1"
|
||||
__version__ = "1.0.2"
|
||||
|
||||
__changelog__ = """changelog:
|
||||
1.0.2:
|
||||
- generalized optional args to **kwargs
|
||||
1.0.1:
|
||||
- optimized imports
|
||||
1.0.0:
|
||||
@@ -20,45 +22,45 @@ __author__ = (
|
||||
|
||||
__all__ = [
|
||||
'gaussian',
|
||||
'multinomial'
|
||||
'multinomial',
|
||||
'bernoulli',
|
||||
'complement'
|
||||
'complement',
|
||||
]
|
||||
|
||||
import sklearn
|
||||
from . import ClassificationMetric
|
||||
|
||||
def gaussian(data, labels, test_size = 0.3, priors = None, var_smoothing = 1e-09):
|
||||
def gaussian(data, labels, test_size = 0.3, **kwargs):
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.naive_bayes.GaussianNB(priors = priors, var_smoothing = var_smoothing)
|
||||
model = sklearn.naive_bayes.GaussianNB(**kwargs)
|
||||
model.fit(data_train, labels_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
return model, ClassificationMetric(predictions, labels_test)
|
||||
|
||||
def multinomial(data, labels, test_size = 0.3, alpha=1.0, fit_prior=True, class_prior=None):
|
||||
def multinomial(data, labels, test_size = 0.3, **kwargs):
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.naive_bayes.MultinomialNB(alpha = alpha, fit_prior = fit_prior, class_prior = class_prior)
|
||||
model = sklearn.naive_bayes.MultinomialNB(**kwargs)
|
||||
model.fit(data_train, labels_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
return model, ClassificationMetric(predictions, labels_test)
|
||||
|
||||
def bernoulli(data, labels, test_size = 0.3, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None):
|
||||
def bernoulli(data, labels, test_size = 0.3, **kwargs):
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.naive_bayes.BernoulliNB(alpha = alpha, binarize = binarize, fit_prior = fit_prior, class_prior = class_prior)
|
||||
model = sklearn.naive_bayes.BernoulliNB(**kwargs)
|
||||
model.fit(data_train, labels_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
return model, ClassificationMetric(predictions, labels_test)
|
||||
|
||||
def complement(data, labels, test_size = 0.3, alpha=1.0, fit_prior=True, class_prior=None, norm=False):
|
||||
def complement(data, labels, test_size = 0.3, **kwargs):
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.naive_bayes.ComplementNB(alpha = alpha, fit_prior = fit_prior, class_prior = class_prior, norm = norm)
|
||||
model = sklearn.naive_bayes.ComplementNB(**kwargs)
|
||||
model.fit(data_train, labels_train)
|
||||
predictions = model.predict(data_test)
|
||||
|
||||
|
Reference in New Issue
Block a user