analysis.py v 1.1.0.004

This commit is contained in:
ltcptgeneral 2019-09-16 11:11:27 -05:00
parent 173f9b3460
commit 43d059b477
2 changed files with 28 additions and 6 deletions

View File

@ -7,10 +7,12 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.0.003" __version__ = "1.1.0.004"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.0.004:
- added performance metrics (r^2, mse, rms)
1.1.0.003: 1.1.0.003:
- resolved nopython mode for mean, median, stdev, variance - resolved nopython mode for mean, median, stdev, variance
1.1.0.002: 1.1.0.002:
@ -120,8 +122,8 @@ __changelog__ = """changelog:
""" """
__author__ = ( __author__ = (
"Arthur Lu <arthurlu@ttic.edu>, " "Arthur Lu <arthurlu@ttic.edu>",
"Jacob Levine <jlevine@ttic.edu>," "Jacob Levine <jlevine@ttic.edu>",
) )
__all__ = [ __all__ = [
@ -131,6 +133,9 @@ __all__ = [
'z_score', 'z_score',
'z_normalize', 'z_normalize',
'histo_analysis', 'histo_analysis',
'r_squared',
'mse',
'rms',
# all statistics functions left out due to integration in other functions # all statistics functions left out due to integration in other functions
] ]
@ -142,6 +147,8 @@ import csv
import numba import numba
from numba import jit from numba import jit
import numpy as np import numpy as np
import math
from sklearn import metrics
from sklearn import preprocessing from sklearn import preprocessing
class error(ValueError): class error(ValueError):
@ -212,10 +219,25 @@ def histo_analysis(hist_data):
derivative = t[1] / t[0] derivative = t[1] / t[0]
np.sort(derivative) np.sort(derivative)
mean_derivative = basic_stats(derivative)[0]
stdev_derivative = basic_stats(derivative)[3]
return mean_derivative, stdev_derivative return basic_stats(derivative)[0], basic_stats(derivative)[3]
#regressions
@jit(forceobj=True)
def r_squared(predictions, targets): # assumes equal size inputs
return metrics.r2_score(np.array(targets), np.array(predictions))
@jit(forceobj=True)
def mse(predictions, targets):
return metrics.mean_squared_error(np.array(targets), np.array(predictions))
@jit(forceobj=True)
def rms(predictions, targets):
return math.sqrt(metrics.mean_squared_error(np.array(targets), np.array(predictions)))
@jit(nopython=True) @jit(nopython=True)
def mean(data): def mean(data):