3 Commits

Author SHA1 Message Date
ltcptgeneral
04141bbec8 analysis.py v 1.1.13.006
regression.py v 1.0.0.003
analysis pkg v 1.0.0.8
2020-03-08 16:48:19 -05:00
ltcptgeneral
40e5899972 added get_team_rakings.py 2020-03-08 14:26:21 -05:00
ltcptgeneral
025c7f9b3c a 2020-03-06 21:39:46 -06:00
14 changed files with 129 additions and 53 deletions

View File

@@ -1,6 +1,6 @@
Metadata-Version: 2.1 Metadata-Version: 2.1
Name: analysis Name: analysis
Version: 1.0.0.7 Version: 1.0.0.8
Summary: analysis package developed by Titan Scouting for The Red Alliance Summary: analysis package developed by Titan Scouting for The Red Alliance
Home-page: https://github.com/titanscout2022/tr2022-strategy Home-page: https://github.com/titanscout2022/tr2022-strategy
Author: The Titan Scouting Team Author: The Titan Scouting Team

View File

@@ -8,4 +8,5 @@ analysis/visualization.py
analysis.egg-info/PKG-INFO analysis.egg-info/PKG-INFO
analysis.egg-info/SOURCES.txt analysis.egg-info/SOURCES.txt
analysis.egg-info/dependency_links.txt analysis.egg-info/dependency_links.txt
analysis.egg-info/requires.txt
analysis.egg-info/top_level.txt analysis.egg-info/top_level.txt

View File

@@ -0,0 +1,6 @@
numba
numpy
scipy
scikit-learn
six
matplotlib

View File

@@ -7,10 +7,12 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.13.005" __version__ = "1.1.13.006"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.13.006:
- cleaned up imports
1.1.13.005: 1.1.13.005:
- cleaned up package - cleaned up package
1.1.13.004: 1.1.13.004:
@@ -283,10 +285,7 @@ import scipy
from scipy import * from scipy import *
import sklearn import sklearn
from sklearn import * from sklearn import *
try: from analysis import trueskill as Trueskill
from analysis import trueskill as Trueskill
except:
import trueskill as Trueskill
class error(ValueError): class error(ValueError):
pass pass

View File

@@ -5,19 +5,22 @@
# this module is cuda-optimized and vectorized (except for one small part) # this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.003" __version__ = "1.0.0.004"
# changelog should be viewed using print(analysis.regression.__changelog__) # changelog should be viewed using print(analysis.regression.__changelog__)
__changelog__ = """ __changelog__ = """
1.0.0.003: 1.0.0.004:
- bug fixes - bug fixes
1.0.0.002: - fixed changelog
-Added more parameters to log, exponential, polynomial 1.0.0.003:
-Added SigmoidalRegKernelArthur, because Arthur apparently needs - bug fixes
to train the scaling and shifting of sigmoids 1.0.0.002:
1.0.0.001: -Added more parameters to log, exponential, polynomial
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels -Added SigmoidalRegKernelArthur, because Arthur apparently needs
-already vectorized (except for polynomial generation) and CUDA-optimized to train the scaling and shifting of sigmoids
1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized
""" """
__author__ = ( __author__ = (
@@ -40,6 +43,8 @@ __all__ = [
'CustomTrain' 'CustomTrain'
] ]
import torch
global device global device
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"

View File

@@ -7,10 +7,20 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.13.001" __version__ = "1.1.13.006"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.13.006:
- cleaned up imports
1.1.13.005:
- cleaned up package
1.1.13.004:
- small fixes to regression to improve performance
1.1.13.003:
- filtered nans from regression
1.1.13.002:
- removed torch requirement, and moved Regression back to regression.py
1.1.13.001: 1.1.13.001:
- bug fix with linear regression not returning a proper value - bug fix with linear regression not returning a proper value
- cleaned up regression - cleaned up regression
@@ -239,7 +249,6 @@ __author__ = (
) )
__all__ = [ __all__ = [
'_init_device',
'load_csv', 'load_csv',
'basic_stats', 'basic_stats',
'z_score', 'z_score',
@@ -260,7 +269,6 @@ __all__ = [
'SVM', 'SVM',
'random_forest_classifier', 'random_forest_classifier',
'random_forest_regressor', 'random_forest_regressor',
'Regression',
'Glicko2', 'Glicko2',
# all statistics functions left out due to integration in other functions # all statistics functions left out due to integration in other functions
] ]
@@ -273,15 +281,11 @@ import csv
import numba import numba
from numba import jit from numba import jit
import numpy as np import numpy as np
import math
import scipy import scipy
from scipy import * from scipy import *
import sklearn import sklearn
from sklearn import * from sklearn import *
try: from analysis import trueskill as Trueskill
from analysis import trueskill as Trueskill
except:
import trueskill as Trueskill
class error(ValueError): class error(ValueError):
pass pass
@@ -344,15 +348,15 @@ def histo_analysis(hist_data):
def regression(inputs, outputs, args): # inputs, outputs expects N-D array def regression(inputs, outputs, args): # inputs, outputs expects N-D array
X = np.array(inputs)
y = np.array(outputs)
regressions = [] regressions = []
if 'lin' in args: # formula: ax + b if 'lin' in args: # formula: ax + b
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b): def func(x, a, b):
return a * x + b return a * x + b
@@ -369,9 +373,6 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):
return a * np.log(b*(x + c)) + d return a * np.log(b*(x + c)) + d
@@ -386,10 +387,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
if 'exp' in args: # formula: a e ^ (b(x + c)) + d if 'exp' in args: # formula: a e ^ (b(x + c)) + d
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):
@@ -405,8 +403,8 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
if 'ply' in args: # formula: a + bx^1 + cx^2 + dx^3 + ... if 'ply' in args: # formula: a + bx^1 + cx^2 + dx^3 + ...
inputs = [inputs] inputs = np.array([inputs])
outputs = [outputs] outputs = np.array([outputs])
plys = [] plys = []
limit = len(outputs[0]) limit = len(outputs[0])
@@ -428,10 +426,7 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
if 'sig' in args: # formula: a tanh (b(x + c)) + d if 'sig' in args: # formula: a tanh (b(x + c)) + d
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):

View File

@@ -5,19 +5,22 @@
# this module is cuda-optimized and vectorized (except for one small part) # this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.003" __version__ = "1.0.0.004"
# changelog should be viewed using print(analysis.regression.__changelog__) # changelog should be viewed using print(analysis.regression.__changelog__)
__changelog__ = """ __changelog__ = """
1.0.0.003: 1.0.0.004:
- bug fixes - bug fixes
1.0.0.002: - fixed changelog
-Added more parameters to log, exponential, polynomial 1.0.0.003:
-Added SigmoidalRegKernelArthur, because Arthur apparently needs - bug fixes
to train the scaling and shifting of sigmoids 1.0.0.002:
1.0.0.001: -Added more parameters to log, exponential, polynomial
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels -Added SigmoidalRegKernelArthur, because Arthur apparently needs
-already vectorized (except for polynomial generation) and CUDA-optimized to train the scaling and shifting of sigmoids
1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized
""" """
__author__ = ( __author__ = (
@@ -40,6 +43,8 @@ __all__ = [
'CustomTrain' 'CustomTrain'
] ]
import torch
global device global device
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -2,7 +2,7 @@ import setuptools
setuptools.setup( setuptools.setup(
name="analysis", # Replace with your own username name="analysis", # Replace with your own username
version="1.0.0.007", version="1.0.0.008",
author="The Titan Scouting Team", author="The Titan Scouting Team",
author_email="titanscout2022@gmail.com", author_email="titanscout2022@gmail.com",
description="analysis package developed by Titan Scouting for The Red Alliance", description="analysis package developed by Titan Scouting for The Red Alliance",
@@ -10,6 +10,14 @@ setuptools.setup(
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
url="https://github.com/titanscout2022/tr2022-strategy", url="https://github.com/titanscout2022/tr2022-strategy",
packages=setuptools.find_packages(), packages=setuptools.find_packages(),
install_requires=[
"numba",
"numpy",
"scipy",
"scikit-learn",
"six",
"matplotlib"
],
license = "GNU General Public License v3.0", license = "GNU General Public License v3.0",
classifiers=[ classifiers=[
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",

View File

@@ -0,0 +1,59 @@
import data as d
from analysis import analysis as an
import pymongo
import operator
def load_config(file):
config_vector = {}
file = an.load_csv(file)
for line in file[1:]:
config_vector[line[0]] = line[1:]
return (file[0][0], config_vector)
def get_metrics_processed_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i))
except:
pass
return out
def main():
apikey = an.load_csv("keys.txt")[0][0]
tbakey = an.load_csv("keys.txt")[1][0]
competition, config = load_config("config.csv")
metrics = get_metrics_processed_formatted(apikey, competition)
elo = {}
gl2 = {}
for team in metrics:
elo[team] = metrics[team]["metrics"]["elo"]["score"]
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
for team in elo:
print("teams sorted by elo:")
print("" + str(team) + " | " + str(elo[team]))
print("*"*25)
for team in gl2:
print("teams sorted by glicko2:")
print("" + str(team) + " | " + str(gl2[team]))
main()

View File

@@ -208,8 +208,6 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric
matches = d.pull_new_tba_matches(tbakey, competition, timestamp) matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
return_vector = {}
red = {} red = {}
blu = {} blu = {}