From 75be44f518f1ced38f377e4202487b34b807f6ad Mon Sep 17 00:00:00 2001 From: ltcptgeneral <35508619+ltcptgeneral@users.noreply.github.com> Date: Tue, 6 Nov 2018 21:56:51 -0600 Subject: [PATCH] Initial commit --- .gitattributes | 2 + __pycache__/analysis.cpython-37.pyc | Bin 0 -> 8361 bytes __pycache__/generate_data.cpython-37.pyc | Bin 0 -> 498 bytes analysis.py | 673 +++++++++++++++++++++++ analysis_test.py | 43 ++ generate_data.py | 16 + repack_json.py | 28 + 7 files changed, 762 insertions(+) create mode 100644 .gitattributes create mode 100644 __pycache__/analysis.cpython-37.pyc create mode 100644 __pycache__/generate_data.cpython-37.pyc create mode 100644 analysis.py create mode 100644 analysis_test.py create mode 100644 generate_data.py create mode 100644 repack_json.py diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..dfe07704 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/__pycache__/analysis.cpython-37.pyc b/__pycache__/analysis.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c645d4e8834b7c0007e2fb032b9251be4f75633f GIT binary patch literal 8361 zcmd5>-H+VV75BG2-r3D2S;FR{!3_{)+7Ob3KnWBAq=iWK#&FO}#+`#Z-Tk9T$!f(lh_ z-4EYu-+S)4=lssO$Inbpy9%zKKlzu}es)4p{z?zSOGo8AQv5??LJ?|DY38NcRQXwJ zYFz3~{dq-bLVr^c`j*x-@MH)RPbQvBJXylVlZ_`!IHLBZ(zJyurcgSfE~Zh|#EjU3 z(iO8}FUl#gPwYoo7wRRYF?Sm@D2r{%hnI%Rd8GISWPw67HMNe?)R7D%6Ujodg(@_R zFpm>5V~-2A9Al6NAE!Ux(>n8>KtTIvPfaE^wz(u%!SlF05-KM7(l zY^?_INv|uAtwk{ii9)K%Xf2RL)k&`v3NKh*4%$h4%Jcnh*iC%jOIp|Pc^veXPkLvB z*OIce;YG{C0jo_s_(J8KwKy71w5@q;(rVw3fQFTsYqA?Anceo8_{?nkn1Ko`AgiM& z$H{77Z%%qc%?zUUSBfMt{P>Xz=dQdYqpP&6E8n~5Eu3Ch@Gd2?l?0uQD;HWx%X>O( z^)}*ee5F-XPOWWZrU;h$omtu%1DT>a>gL`G?@w`m+oC>PRFd%UULHdw#SZ#Piaqqz z6uanaDfZFVQxmx%6zrzD*f1pxE=dX`4IypH9Aphk)=*?-zIdIMu4Ca%x3X-zSTrv+ z&!*@jXSjWHc#7-D6xAy4&AIA2hOcg03h5~?=FE86mbC%jey8QRKV5kvU+{*OPzz*+iRMf zPo05%ef~1`Sm@Vud4cvC`f5L+q&l~}Li>&{sKwxx6_NqyGqsr9vPtrA)#Mh7TYi#S zXtAh;nP)6Y0*c#|fhql7uVH7VY=xa5(|bXf^OD7AOoD2;1X*#=lI<0_9|K4d2zeJ} z2PnH6S>_A@Ayd0!Jk5+ibd!7WR(uqhq8jSFIs;vBR972Mo3m9$(}!(R3A%_pqK?`8 z1606fZ5KA@Ow&s?>jRD1Ow85LN-WJwqbSXyw5VjD)N`iEc}&%?D>Q9@Xo{^zUZmI!12b{#cGt*sxDCVS((f z21y{l7!y|5%9GwhRl`=9bE5S!rH-YJHkYgz3~hHc$96Rrw$V-!YV(eh-!U>cdrDpu z=7z+VE-GmQORnx_Zie`4jA)Vf;lrE`GpA#k8fK6+EhaLSWcS8pjAZfKNQLkZ?aAi8 z5s{@WsnSWDU^*X01={X1`S(Si4}C&FEy8*kuo_2`0kYVIrtyyNRHd?Ir>(RfE9TCJ%x*xC)a1 zZp0y{f^8+6%MD_h9~AG5SYDTeXMln7ulUR(h*j_!S1}uEjd<8cIQoQ z$+7xPcIPc>*#)cZ+%sSsHbQNNj1jjc1NMkzyE>=@*zXea$kQUgc5{DyCuGC5D24X_ z6|+YSxns=!B`rP|{?a?IO3odCb~)>31MTCOd7WmT2-Muh-Iv(ig`7C|#6=Wo> z2%`W!a3-XSI_RP4QtopgXUsXuoLa&8{|}r?Q&rH3cvffwJ=HJKvwlsBJ(LF8f74M0#?4c3{0%x&Lj}!w z3ghLcYBM!%X{4N~c@TXRG`N25vXU6L(DObivT@7en!PcDy3KXbuiKy*W)(n@p2Uw8XD~Du=212o%+Co=z0v869l5APk`H$iPhN z1;^+ZTypI_^?I+Mv5r}CzEN#RBh`1Ok$Q6)sWyQ|ob_rQ2p|KSyU5zndJ)^d)QY=p z9|ylA_GECq-<60MK7>$VCqCy1MA%MxIQ#~PFv`y3-i?(&2Hfm*v zHx8&rCVnZuju}eSA`@F4ndy8Bty1TqS%;Mz7Z8tSq8mojT#F!hl2=U&ZklmBlEL_z z>`kBB|A|)$cd)L?C-KxK`%V|d8B3sl0}s$@Gu3f`yqKD|pxXk0RRVjVCE8VEPI+IQ zQw}Sr!=a!^AShH1<44GTWlo`#qY-+vyOjYPjnueiNcSb>x(nBZ{t+NvNKr4jz>^_U zls|ufBO=L&diu$3*+?8%;C9gMW z&CsDjF1AN7vT2qrzj)DZTudFst4VMuC)A#q_yTs9=f2U@BQVNtkuD+HKqRv0?x!B`lUhY29T za8pdZ17np~2-Vg_ZA%xmgD_H+=V?5{0EwB@oKu+9hS*coVS+?$4(y&&5EM4VY|##r zBlZ?`m*&R*Cgm;6Por*;hllY-ig z1%cR~L-po|3DDyLL36J&oogBn}v9NAIKb z(SGW!jPw{a?gJ==q9p|oe7y^Nio(r%!YB#OdC3ZZmHt*O1wlvvFM~GT2r5VjZ%4lt zY{&;8CAY~7>0JE%(YOBo^2Z-^{^b2uuKe_GW#y@1*HgFujcZ%%cccMUj#ZgXT23g@ zn9XXSBVp*uT=-7ibx^q;F#KhEM-2tcWqSx({ZV^1o6G*FMP9(bO{ce7Xig3MDyq&( z(I~$`@7Sbj;C<7=%T(I*DmUOoDjn89)W#ubhNzDGp$VLZ`o!dE60#C<9#W5Mb#+#q zMPA(6JUlwSHTg2$Y!folMM20?5kJI3p@?+>s}I#PSoQQm^$e(%)10 zt-=HBH*;6QW9YG`@;LmFwSEFA1wyjz&ueiKtf9->e1Po%yBxVK@xW##$a0xm>)?T*=>UTx^{F3{1@?FV_H%q zSUyGBSCM5k_Lril7xQllj&JdKiO(IpAhxP(H&|vR4OF3x?GVXt<~#u}2R~Y#B%FxJ a64K8q+}g}zGiPTWaZKkq1k(07hyMdOUI@+r literal 0 HcmV?d00001 diff --git a/__pycache__/generate_data.cpython-37.pyc b/__pycache__/generate_data.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4dda5a2d6e5bfcf668850b024d875c80079674f GIT binary patch literal 498 zcmY*WJxc>Y5S`iE%bfwO)L5hlb^#GdVWA>^q!v*@h$nKmYdpD+xVIq*+<_2(h?f4P zX|4PVR?Y?l9e8hN-(z-W*pubuCU9)t(TzrcPj~qZ!O0P$TxC+AFogjqp}bcZpu$V& z$}e`{?A~_=#}Xr8oHEK6CJPnU%|-1`M;4W+xOxMWH~0E*8`TXZ>tMRGaPW`Qu0t!V z_a-Z^lJ8=U%5Omxw4e^2cdt47?>fEm-~1|2jTXEk!gKqYC*}!*Gs21C&EU%s!8dLZ zHoFay#YpGmn>Zh8@=9x{!MQ3j3dVWzP?(JTiAk*PE>ZZ9q&kl?O*kc-k(U+|l8?#o zk)*Q-lYz848xcNzfLY^C-~0W&=-d=f`rek&)kUYb)9ZCEt%ugYXkQHj2>O;4B I>Ao`*Kjn~kAOHXW literal 0 HcmV?d00001 diff --git a/analysis.py b/analysis.py new file mode 100644 index 00000000..2ec9c306 --- /dev/null +++ b/analysis.py @@ -0,0 +1,673 @@ +<<<<<<< HEAD +#this should be imported as a python module using 'import analysis' + +import statistics +import math +import csv +import functools + +class c_entities: + + c_names = [] + c_ids = [] + c_pos = [] + c_porperties = [] + c_logic = [] + +class nc_entities: + + c_names = [] + c_ids = [] + c_pos = [] + c_properties = [] + c_effects = [] + + def debug(self): + print ("nc_entities (non-controlable entities) has attributes names, ids, positions, properties, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of psoitions, 2d array of properties, and 2d array of effects.") + return[self.c_names, self.c_ids, self.c_pos, self.c_properties, self.c_effects] + + def __init__(self, names, ids, pos, properties, effects): + self.c_names = names + self.c_ids = ids + self.c_pos = pos + self.c_properties = properties + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_pos, n_property, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_pos.append(n_pos) + self.c_properties.append(n_property) + self.c_effects.append(n_effect) + + def edit(self, search, n_name, n_id, n_pos, n_property, n_effect): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_pos != "null": + self.c_pos[position] = n_pos + + if n_property != "null": + self.c_properties[position] = n_property + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_pos[position], self.c_properties[position], self.c_effects[position]] + +class obstacles: + + c_names = [] + c_ids = [] + c_perim = [] + c_effects = [] + + def debug(self): + print("obstacles has atributes names, ids, positions, perimeters, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of position, 3d array of perimeters, 2d array of effects.") + return [self.c_names, self.c_ids, self.c_perim, self.c_effects] + + def __init__(self, names, ids, perims, effects): + self.c_names = names + self.c_ids = ids + self.c_perim = perims + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_perim, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_perim.append(n_perim) + self.c_effects.append(n_effect) + return None + + def edit(self, search, n_name, n_id, n_perim, n_effect): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_perim != "null": + self.c_perim[position] = n_perim + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_perim[position], self.c_effects[position]] + +class objectives: + + c_names = [] + c_ids = [] + c_pos = [] + c_effects = [] + + def debug(self): + print("objectives has atributes names, ids, positions, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of position, 1d array of effects.") + return [self.c_names, self.c_ids, self.c_pos, self.c_effects] + + def __init__(self, names, ids, pos, effects): + self.c_names = names + self.c_ids = ids + self.c_pos = pos + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_pos, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_pos.append(n_pos) + self.c_effects.append(n_effect) + return None + + def edit(self, search, n_name, n_id, n_pos, n_effect): + position = 0 + print(self.c_ids) + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_pos != "null": + self.c_pos[position] = n_pos + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_pos[position], self.c_effects[position]] + +def load_csv(filepath): + with open(filepath, newline = '') as csvfile: + file_array = list(csv.reader(csvfile)) + return file_array + +def basic_stats(data, mode, arg): # data=array, mode = ['1d':1d_basic_stats, 'column':c_basic_stats, 'row':r_basic_stats], arg for mode 1 or mode 2 for column or row + + if mode == 'debug': + out = "basic_stats requires 3 args: data, mode, arg; where data is data to be analyzed, mode is an int from 0 - 2 depending on type of analysis (by column or by row) and is only applicable to 2d arrays (for 1d arrays use mode 1), and arg is row/column number for mode 1 or mode 2; function returns: [mean, median, mode, stdev, variance]" + return out + + if mode == "1d" or mode == 0: + + data_t = [] + + for i in range (0, len(data) - 1, 1): + + data_t.append(float(data[i])) + + mean = statistics.mean(data_t) + median = statistics.median(data_t) + try: + mode = statistics.mode(data_t) + except: + mode = None + stdev = statistics.stdev(data_t) + variance = statistics.variance(data_t) + + out = [mean, median, mode, stdev, variance] + + return out + + elif mode == "column" or mode == 1: + + c_data = [] + c_data_sorted = [] + + for i in data: + c_data.append(float(i[arg])) + + mean = statistics.mean(c_data) + median = statistics.median(c_data) + try: + mode = statistics.mode(c_data) + except: + mode = None + stdev = statistics.stdev(c_data) + variance = statistics.variance(c_data) + + out = [mean, median, mode, stdev, variance] + + return out + + elif mode == "row" or mode == 2: + + r_data = [] + + for i in range(len(data[arg])): + r_data.append(float(data[arg][i])) + + mean = statistics.mean(r_data) + median = statistics.median(r_data) + try: + mode = statistics.mode(r_data) + except: + mode = None + stdev = statistics.stdev(r_data) + variance = statistics.variance(r_data) + + out = [mean, median, mode, stdev, variance] + + return out + else: + return ["mode_error", "mode_error"] + +def z_score(point, mean, stdev): + score = (point - mean)/stdev + return score + +def stdev_z_split(mean, stdev, delta, low_bound, high_bound): + + z_split = [] + + i = low_bound + + while True: + + z_split.append(float((1 / (stdev * math.sqrt(2 * math.pi))) * math.e ** (-0.5 * (((i - mean) / stdev) ** 2)))) + + i = i + delta + + if i > high_bound: + + break + + return z_split + +def histo_analysis(hist_data): #note: depreciated + + if hist_data == 'debug': + return['lower estimate (5%)', 'lower middle estimate (25%)', 'middle estimate (50%)', 'higher middle estimate (75%)', 'high estimate (95%)', 'standard deviation', 'note: this has been depreciated'] + + derivative = [] + for i in range(0, len(hist_data) - 1, 1): + derivative.append(float(hist_data[i+1]) - float(hist_data[i])) + + derivative_sorted = sorted(derivative, key=int) + mean_derivative = basic_stats(derivative_sorted, "1d", 0)[0] + stdev_derivative = basic_stats(derivative_sorted, "1d", 0)[3] + + low_bound = mean_derivative + -1.645 * stdev_derivative + lm_bound = mean_derivative + -0.674 * stdev_derivative + mid_bound = mean_derivative * 0 * stdev_derivative + hm_bound = mean_derivative + 0.674 * stdev_derivative + high_bound = mean_derivative + 1.645 * stdev_derivative + + low_est = float(hist_data[-1:][0]) + low_bound + lm_est = float(hist_data[-1:][0]) + lm_bound + mid_est = float(hist_data[-1:][0]) + mid_bound + hm_est = float(hist_data[-1:][0]) + hm_bound + high_est = float(hist_data[-1:][0]) + high_bound + + return [low_est, lm_est, mid_est, hm_est, high_est, stdev_derivative] + +def histo_analysis_2(hist_data, delta, low_bound, high_bound): + + if hist_data == 'debug': + return ('returns list of predicted values based on historical data; input delta for delta step in z-score and lower and igher bounds in number for standard deviations') + + derivative = [] + + for i in range(0, len(hist_data) - 1, 1): + derivative.append(float(hist_data[i + 1]) - float(hist_data [i])) + + derivative_sorted = sorted(derivative, key=int) + mean_derivative = basic_stats(derivative_sorted,"1d", 0)[0] + stdev_derivative = basic_stats(derivative_sorted, "1d", 0)[3] + + predictions = [] + pred_change = 0 + + i = low_bound + + while True: + + pred_change = mean_derivative + i * stdev_derivative + + predictions.append(float(hist_data[-1:][0]) + pred_change) + + i = i + delta + + if i > high_bound: + + break + + return predictions +======= +#this should be imported as a python module using 'import analysis' + +import statistics +import math +import csv +import functools + +class c_entities: + + c_names = [] + c_ids = [] + c_pos = [] + c_porperties = [] + c_logic = [] + +class nc_entities: + + c_names = [] + c_ids = [] + c_pos = [] + c_properties = [] + c_effects = [] + + def debug(self): + print ("nc_entities (non-controlable entities) has attributes names, ids, positions, properties, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of psoitions, 2d array of properties, and 2d array of effects.") + return[self.c_names, self.c_ids, self.c_pos, self.c_properties, self.c_effects] + + def __init__(self, names, ids, pos, properties, effects): + self.c_names = names + self.c_ids = ids + self.c_pos = pos + self.c_properties = properties + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_pos, n_property, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_pos.append(n_pos) + self.c_properties.append(n_property) + self.c_effects.append(n_effect) + + def edit(self, search, n_name, n_id, n_pos, n_property, n_effect): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_pos != "null": + self.c_pos[position] = n_pos + + if n_property != "null": + self.c_properties[position] = n_property + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_pos[position], self.c_properties[position], self.c_effects[position]] + +class obstacles: + + c_names = [] + c_ids = [] + c_perim = [] + c_effects = [] + + def debug(self): + print("obstacles has atributes names, ids, positions, perimeters, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of position, 3d array of perimeters, 2d array of effects.") + return [self.c_names, self.c_ids, self.c_perim, self.c_effects] + + def __init__(self, names, ids, perims, effects): + self.c_names = names + self.c_ids = ids + self.c_perim = perims + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_perim, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_perim.append(n_perim) + self.c_effects.append(n_effect) + return None + + def edit(self, search, n_name, n_id, n_perim, n_effect): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_perim != "null": + self.c_perim[position] = n_perim + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_perim[position], self.c_effects[position]] + +class objectives: + + c_names = [] + c_ids = [] + c_pos = [] + c_effects = [] + + def debug(self): + print("objectives has atributes names, ids, positions, and effects. __init__ takes self, 1d array of names, 1d array of ids, 2d array of position, 1d array of effects.") + return [self.c_names, self.c_ids, self.c_pos, self.c_effects] + + def __init__(self, names, ids, pos, effects): + self.c_names = names + self.c_ids = ids + self.c_pos = pos + self.c_effects = effects + return None + + def append(self, n_name, n_id, n_pos, n_effect): + self.c_names.append(n_name) + self.c_ids.append(n_id) + self.c_pos.append(n_pos) + self.c_effects.append(n_effect) + return None + + def edit(self, search, n_name, n_id, n_pos, n_effect): + position = 0 + print(self.c_ids) + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + if n_name != "null": + self.c_names[position] = n_name + + if n_id != "null": + self.c_ids[position] = n_id + + if n_pos != "null": + self.c_pos[position] = n_pos + + if n_effect != "null": + self.c_effects[position] = n_effect + + return None + + def search(self, search): + position = 0 + for i in range(0, len(self.c_ids), 1): + if self.c_ids[i] == search: + position = i + + return [self.c_names[position], self.c_ids[position], self.c_pos[position], self.c_effects[position]] + +def load_csv(filepath): + with open(filepath, newline = '') as csvfile: + file_array = list(csv.reader(csvfile)) + return file_array + +def basic_stats(data, mode, arg): # data=array, mode = ['1d':1d_basic_stats, 'column':c_basic_stats, 'row':r_basic_stats], arg for mode 1 or mode 2 for column or row + + if mode == 'debug': + out = "basic_stats requires 3 args: data, mode, arg; where data is data to be analyzed, mode is an int from 0 - 2 depending on type of analysis (by column or by row) and is only applicable to 2d arrays (for 1d arrays use mode 1), and arg is row/column number for mode 1 or mode 2; function returns: [mean, median, mode, stdev, variance]" + return out + + if mode == "1d" or mode == 0: + + data_t = [] + + for i in range (0, len(data) - 1, 1): + + data_t.append(float(data[i])) + + mean = statistics.mean(data_t) + median = statistics.median(data_t) + try: + mode = statistics.mode(data_t) + except: + mode = None + stdev = statistics.stdev(data_t) + variance = statistics.variance(data_t) + + out = [mean, median, mode, stdev, variance] + + return out + + elif mode == "column" or mode == 1: + + c_data = [] + c_data_sorted = [] + + for i in data: + c_data.append(float(i[arg])) + + mean = statistics.mean(c_data) + median = statistics.median(c_data) + try: + mode = statistics.mode(c_data) + except: + mode = None + stdev = statistics.stdev(c_data) + variance = statistics.variance(c_data) + + out = [mean, median, mode, stdev, variance] + + return out + + elif mode == "row" or mode == 2: + + r_data = [] + + for i in range(len(data[arg])): + r_data.append(float(data[arg][i])) + + mean = statistics.mean(r_data) + median = statistics.median(r_data) + try: + mode = statistics.mode(r_data) + except: + mode = None + stdev = statistics.stdev(r_data) + variance = statistics.variance(r_data) + + out = [mean, median, mode, stdev, variance] + + return out + else: + return ["mode_error", "mode_error"] + +def z_score(point, mean, stdev): + score = (point - mean)/stdev + return score + +def stdev_z_split(mean, stdev, delta, low_bound, high_bound): + + z_split = [] + + i = low_bound + + while True: + + z_split.append(float((1 / (stdev * math.sqrt(2 * math.pi))) * math.e ** (-0.5 * (((i - mean) / stdev) ** 2)))) + + i = i + delta + + if i > high_bound: + + break + + return z_split + +def histo_analysis(hist_data): #note: depreciated + + if hist_data == 'debug': + return['lower estimate (5%)', 'lower middle estimate (25%)', 'middle estimate (50%)', 'higher middle estimate (75%)', 'high estimate (95%)', 'standard deviation', 'note: this has been depreciated'] + + derivative = [] + for i in range(0, len(hist_data) - 1, 1): + derivative.append(float(hist_data[i+1]) - float(hist_data[i])) + + derivative_sorted = sorted(derivative, key=int) + mean_derivative = basic_stats(derivative_sorted, "1d", 0)[0] + stdev_derivative = basic_stats(derivative_sorted, "1d", 0)[3] + + low_bound = mean_derivative + -1.645 * stdev_derivative + lm_bound = mean_derivative + -0.674 * stdev_derivative + mid_bound = mean_derivative * 0 * stdev_derivative + hm_bound = mean_derivative + 0.674 * stdev_derivative + high_bound = mean_derivative + 1.645 * stdev_derivative + + low_est = float(hist_data[-1:][0]) + low_bound + lm_est = float(hist_data[-1:][0]) + lm_bound + mid_est = float(hist_data[-1:][0]) + mid_bound + hm_est = float(hist_data[-1:][0]) + hm_bound + high_est = float(hist_data[-1:][0]) + high_bound + + return [low_est, lm_est, mid_est, hm_est, high_est, stdev_derivative] + +def histo_analysis_2(hist_data, delta, low_bound, high_bound): + + if hist_data == 'debug': + return ('returns list of predicted values based on historical data; input delta for delta step in z-score and lower and igher bounds in number for standard deviations') + + derivative = [] + + for i in range(0, len(hist_data) - 1, 1): + derivative.append(float(hist_data[i + 1]) - float(hist_data [i])) + + derivative_sorted = sorted(derivative, key=int) + mean_derivative = basic_stats(derivative_sorted,"1d", 0)[0] + stdev_derivative = basic_stats(derivative_sorted, "1d", 0)[3] + + predictions = [] + pred_change = 0 + + i = low_bound + + while True: + + pred_change = mean_derivative + i * stdev_derivative + + predictions.append(float(hist_data[-1:][0]) + pred_change) + + i = i + delta + + if i > high_bound: + + break + + return predictions +>>>>>>> 18189b45b17228228e192c270c12c3cc2cb7f8cf diff --git a/analysis_test.py b/analysis_test.py new file mode 100644 index 00000000..f9f34f2c --- /dev/null +++ b/analysis_test.py @@ -0,0 +1,43 @@ +<<<<<<< HEAD +import analysis + +data = analysis.load_csv('data.txt') +print(analysis.basic_stats(0, 'debug', 0)) +print(analysis.basic_stats(data, "column", 0)) +print(analysis.basic_stats(data, "row", 0)) +print(analysis.z_score(10, analysis.basic_stats(data, "column", 0)[0],analysis.basic_stats(data, "column", 0)[3])) +print(analysis.histo_analysis(data[0])) +print(analysis.histo_analysis_2(data[0], 0.01, -1, 1)) +print(analysis.stdev_z_split(3.3, 0.2, 0.1, -5, 5)) + +game_nc_entities = analysis.nc_entities(["cube", "cube", "ball"], [0, 1, 2], [[0, 0.5], [1, 1.5], [2, 2]], ["1;1;1;10', '2;1;1;20", "r=0.5, 5"], ["1", "1", "0"]) +game_nc_entities.append("cone", 3, [1, -1], "property", "effect") +game_nc_entities.edit(2, "sphere", 10, [5, -5], "new prop", "new effect") +print(game_nc_entities.search(10)) +print(game_nc_entities.debug()) + +game_obstacles = analysis.obstacles(["wall", "fortress", "castle"], [0, 1, 2],[[[10, 10], [10, 9], [9, 10], [9, 9]], [[-10, 9], [-10, -9], [-9, -10]], [[5, 0], [4, -1], [-4, -1]]] , [0, 0.01, 10]) +game_obstacles.append("bastion", 3, [[50, 50], [49, 50], [50, 49], [49, 49]], 75) +game_obstacles.edit(0, "motte and bailey", "null", [[10, 10], [9, 10], [10, 9], [9, 9]], 0.01) +print(game_obstacles.search(0)) +print(game_obstacles.debug()) + +game_objectives = analysis.objectives(["switch", "scale", "climb"], [0,1,2], [[0,0],[1,1],[2,0]], ["0,1", "1,1", "0,5"]) +game_objectives.append("auto", 3, [0, 10], "1, 10") +game_objectives.edit(3, "null", 4, "null", "null") +print(game_objectives.search(4)) +print(game_objectives.debug()) +======= +import analysis + +data = analysis.load_csv('data.txt') +print(analysis.basic_stats(0, 'debug', 0)) +print(analysis.basic_stats(data, 1, 0)) +print(analysis.basic_stats(data, 2, 0)) +print(analysis.z_score(10, analysis.basic_stats(data, 1, 0)[0],analysis.basic_stats(data, 1, 0)[3])) +print(analysis.histo_analysis(data[0])) +print(analysis.histo_analysis_2(data[0], 0.01, -1, 1)) +print(analysis.stdev_z_split(3.3, 0.2, 0.1, -5, 5)) + +x = analysis.objectives(["switch", "scale", "climb"], [0,1,2], [[0,0],[1,1],[2,0]], ["0,1", "1,1", "0,5"]) +>>>>>>> 18189b45b17228228e192c270c12c3cc2cb7f8cf diff --git a/generate_data.py b/generate_data.py new file mode 100644 index 00000000..1e25f8b4 --- /dev/null +++ b/generate_data.py @@ -0,0 +1,16 @@ +import random + +def generate(filename, x, y, low, high): + + file = open(filename, "w") + + for i in range (0, y - 1, 1): + + temp = "" + + for j in range (0, x - 1, 1): + + temp = str(random.uniform(low, high)) + "," + temp + + temp = temp + str(random.uniform(low, high)) + file.write(temp + "\n") diff --git a/repack_json.py b/repack_json.py new file mode 100644 index 00000000..2abfa8a6 --- /dev/null +++ b/repack_json.py @@ -0,0 +1,28 @@ +import os +import json +import ordereddict +import collections +import unicodecsv + +content = open("realtimeDatabaseExport2018.json").read() + +dict_content = json.loads(content) +list_of_new_data = [] + +for datak, datav in dict_content.iteritems(): + for teamk, teamv in datav["teams"].iteritems(): + for matchk, matchv in teamv.iteritems(): + for detailk, detailv in matchv.iteritems(): + new_data = collections.OrderedDict(detailv) + new_data["uuid"] = detailk + new_data["match"] = matchk + new_data["team"] = teamk + + list_of_new_data.append(new_data) + +allkey = reduce(lambda x, y: x.union(y.keys()), list_of_new_data, set()) +output_file = open('realtimeDatabaseExport2018.csv', 'wb') +dict_writer = unicodecsv.DictWriter(csvfile=output_file, fieldnames=allkey) +dict_writer.writerow(dict((fn,fn) for fn in dict_writer.fieldnames)) +dict_writer.writerows(list_of_new_data) +output_file.close()