48 Commits

Author SHA1 Message Date
Arthur Lu
93091b6bd2 appeased pylint in config.py attr lookup 2022-03-31 02:18:30 +00:00
Arthur Lu
0024a94f4e added file logging with default,
added basic progress bars for each module
2022-03-30 04:53:40 +00:00
Arthur Lu
5885224231 removed match printing,
CLI args use argparse

Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 23:09:37 +00:00
Arthur Lu
64ea7c227c removed commented code
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 22:23:24 +00:00
Arthur Lu
ddf6faeecf fixed metrics processing ordering,
added metrics logging
2022-03-29 21:15:24 +00:00
Arthur Lu
b4766d1b3e fixed Module template __init__ definition
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 16:49:38 +00:00
Arthur Lu
e04245952a merged data and pull functions into Client class,
removed pull.py dep.py,
modified existing code to work with new Client class
2022-03-29 05:48:39 +00:00
Arthur Lu
2ebaddb92c updated usage 2022-03-29 04:44:59 +00:00
Arthur Lu
8b09e155dc updated changelog 2022-03-29 04:42:26 +00:00
Arthur Lu
5ca474d158 finished removing socket functionality 2022-03-29 04:39:52 +00:00
Arthur Lu
e7a8a259fc finished removing daemon functionality 2022-03-29 04:35:01 +00:00
Arthur Lu
5553e3dddf fixed CLI options,
implemented better config attr search member,
fixed imports
2022-03-29 04:28:09 +00:00
Arthur Lu
0212e6b2ca pylint now uses tab indent 2022-03-29 04:15:47 +00:00
Arthur Lu
14f8901803 removed unnecessary imports 2022-03-28 23:22:42 +00:00
Arthur Lu
a5f9e55cf4 fixed build scripts 2022-03-28 23:15:13 +00:00
Arthur Lu
34f0b3f10c removed: daemonization,
socket messaging

added: CLI option to specify config file

Not working, requires data.py changes in competition branch
2022-03-28 22:42:04 +00:00
Arthur Lu
6b070c7b08 fixed merge changes 2022-03-15 05:31:51 +00:00
Arthur Lu
9279311664 Merge branch 'master' into superscript-v1 2022-03-15 05:27:11 +00:00
Dev Singh
de4d3d4967 Update README.md 2021-10-21 14:21:20 -05:00
Arthur Lu
d56411253c fixed badge url 2021-08-26 18:20:11 -07:00
Arthur Lu
c415225afe Update release badge 2021-08-26 18:11:25 -07:00
Arthur Lu
d684813ee0 Merge pull request #10 from titanscouting/automate-build
Automate build
2021-06-09 14:58:21 -07:00
Arthur Lu
26079f3180 fixed pathing for build-CLI.*
added temp directory to gitignore
2021-04-27 07:26:14 +00:00
Arthur Lu
99e722c400 removed ThreadPoolExecutor import 2021-04-25 06:05:33 +00:00
Arthur Lu
f5a0e0fe8c added sample build-cli workflow 2021-04-25 03:51:01 +00:00
Arthur Lu
28e423942f added .gitattributes 2021-04-15 19:41:10 +00:00
Arthur Lu
8977f8c277 added compiled binaries with no file endings
to gitignore
2021-04-13 04:05:46 +00:00
Arthur Lu
2b0f718aa5 removed compiled binaries
added compiled binaries in /dist/ to gitignore
2021-04-13 04:03:07 +00:00
Arthur Lu
30469a3211 removed matplotlib import
removed plotting pit analysis
fixed warning supression for win exe
superscript v 0.8.6
2021-04-12 15:13:54 -07:00
Arthur Lu
391d4e1996 created batch script for windows compilation 2021-04-12 14:39:00 -07:00
Arthur Lu
224f64e8b7 better fix for devcontainer.json 2021-04-12 06:30:21 +00:00
Arthur Lu
aa7d7ca927 quick patch for devcontainer.json 2021-04-12 06:27:50 +00:00
Arthur Lu
d10c16d483 superscript v 0.8.5 2021-04-10 06:08:18 +00:00
Arthur Lu
f211d00f2d superscript v 0.8.4 2021-04-09 23:45:16 +00:00
Arthur Lu
69c707689b superscript v 0.8.3 2021-04-03 20:47:45 +00:00
Arthur Lu
d2f9c802b3 built and verified threading fixes 2021-04-02 22:04:06 +00:00
Arthur Lu
99e28f5e83 fixed .gitignore
added build-CLI script
fixed threading in superscript
2021-04-02 21:58:35 +00:00
Arthur Lu
18dbc174bd deleted config.json
changed superscript config lookup to relative path
added additional requirements to requirements.txt
added build spec file for superscript
2021-04-02 21:35:05 +00:00
Arthur Lu
79689d69c8 fixed spelling in default config,
added config to git ignore
2021-04-02 01:28:25 +00:00
Dev Singh
80c3f1224b Merge pull request #3 from titanscouting/superscript-main
Merge initial changes
2021-04-01 13:40:29 -05:00
Dev Singh
960a1b3165 fix ut and file structure 2021-04-01 13:38:53 -05:00
Arthur Lu
89fcd366d3 Merge branch 'master' into superscript-main 2021-04-01 11:34:44 -07:00
Dev Singh
79cde44108 Create SECURITY.md 2021-04-01 13:11:38 -05:00
Dev Singh
2b896db9a9 Create MAINTAINERS 2021-04-01 13:11:22 -05:00
Dev Singh
483897c011 Merge pull request #1 from titanscouting/add-license-1
Create LICENSE
2021-04-01 13:11:03 -05:00
Dev Singh
9287d98fe2 Create LICENSE 2021-04-01 13:10:50 -05:00
Dev Singh
991751a340 Create CONTRIBUTING.md 2021-04-01 13:10:14 -05:00
Dev Singh
9d2476b5eb Create README.md 2021-04-01 13:09:18 -05:00
26 changed files with 612 additions and 690 deletions

View File

@@ -1,7 +1,7 @@
{
"name": "TRA Analysis Development Environment",
"build": {
"dockerfile": "Dockerfile"
"dockerfile": "Dockerfile",
},
"settings": {
"terminal.integrated.shell.linux": "/bin/bash",
@@ -9,6 +9,7 @@
"python.linting.enabled": true,
"python.linting.pylintEnabled": true,
"python.linting.pylintPath": "/usr/local/bin/pylint",
"python.linting.pylintArgs": ["--indent-string", "\t"],
"python.testing.pytestPath": "/usr/local/bin/pytest",
"editor.tabSize": 4,
"editor.insertSpaces": false
@@ -19,4 +20,4 @@
"waderyan.gitblame"
],
"postCreateCommand": ""
}
}

View File

@@ -1,14 +1,11 @@
cerberus
dnspython
numpy
pandas
pyinstaller
pylint
pymongo
pyparsing
pytest
python-daemon
pyzmq
requests
scikit-learn
scipy

38
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,38 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

35
.github/workflows/build-cli.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build Superscript Linux
on:
release:
types: [published, edited]
jobs:
generate:
name: Build Linux
runs-on: ubuntu-latest
steps:
- name: Checkout master
uses: actions/checkout@master
- name: Install Dependencies
run: pip install -r requirements.txt
working-directory: src/
- name: Give Execute Permission
run: chmod +x build-CLI.sh
working-directory: build/
- name: Build Binary
run: ./build-CLI.sh
working-directory: build/
- name: Copy Binary to Root Dir
run: cp superscript ..
working-directory: dist/
- name: Upload Release Asset
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: superscript
asset_name: superscript
tag: ${{ github.ref }}

34
.github/workflows/superscript-unit.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Superscript Unit Tests
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest
if [ -f src/requirements.txt ]; then pip install -r src/requirements.txt; fi
- name: Test with pytest
run: |
pytest test/

3
.gitignore vendored
View File

@@ -16,6 +16,3 @@
**/*.log
**/errorlog.txt
/dist/*
slurm-tra-superscript.out
config*.json

5
build/build-CLI.bat Normal file
View File

@@ -0,0 +1,5 @@
set pathtospec="superscript.spec"
set pathtodist="../dist/"
set pathtowork="temp/"
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%

5
build/build-CLI.sh Normal file
View File

@@ -0,0 +1,5 @@
pathtospec="superscript.spec"
pathtodist="../dist/"
pathtowork="temp/"
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}

35
build/superscript.spec Normal file
View File

@@ -0,0 +1,35 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['../src/superscript.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=['dnspython', 'sklearn.utils._weight_vector', 'sklearn.utils._typedefs', 'sklearn.neighbors._partition_nodes', 'requests'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[('W ignore', None, 'OPTION')],
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@@ -1,218 +0,0 @@
from calendar import c
import requests
import pull
import pandas as pd
import json
def pull_new_tba_matches(apikey, competition, last_match):
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match :
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.sort(key=lambda x: x['match'])
return out
def pull_new_tba_matches_manual(apikey, competition, cutoff):
filename = competition+"-wins.json"
with open(filename, 'r') as f:
data = json.load(f)
return data
def get_team_match_data(client, competition, team_num):
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def clear_metrics(client, competition):
db = client.data_processing
data = db.team_metrics
data.delete_many({competition: competition})
return True
def get_team_pit_data(client, competition, team_num):
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num):
db = client.data_processing
mdata = db.team_metrics
temp = mdata.find_one({"team": team_num})
if temp != None:
if competition in temp['metrics'].keys():
temp = temp['metrics'][competition]
else :
temp = None
else:
temp = None
return temp
def get_match_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = get_team_metrics_data(client, competition, int(team))
except:
pass
return out
def get_pit_data_formatted(client, competition):
x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(client, competition, int(i))
except:
pass
return out
def get_pit_variable_data(client, competition):
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(client, competition):
temp = get_pit_variable_data(client, competition)
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
db = client[dbname]
mdata = db[colname]
mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True)
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag):
db = client.data_processing
mdata = db.flags
return mdata.find_one({"_id": "2022"})
def set_analysis_flags(client, flag, data):
db = client.data_processing
mdata = db.flags
return mdata.update_one({"_id": "2022"}, {"$set": data})
def unkeyify_2l(layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
if previous_time == None:
set_analysis_flags(client, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
group[team] = {"gl2": gl2}
else:
metrics = db_data
gl2 = metrics["gl2"]
group[team] = {"gl2": gl2}
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

View File

@@ -1,132 +0,0 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

View File

@@ -1,63 +0,0 @@
import requests
from exceptions import APIError
from dep import load_config
url = "https://scouting.titanrobotics2022.com"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -1,15 +0,0 @@
cerberus
dnspython
numpy
pandas
pyinstaller
pylint
pymongo
pyparsing
python-daemon
pyzmq
requests
scikit-learn
scipy
six
tra-analysis

View File

@@ -2,8 +2,6 @@ import json
from exceptions import ConfigurationError
from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration:
path = None
@@ -185,33 +183,24 @@ class Configuration:
if not isValidated:
raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent":
return self.config["persistent"]
elif name == "key":
return self.config["persistent"]["key"]
elif name == "database":
# soon to be deprecated
return self.config["persistent"]["key"]["database"]
elif name == "tba":
return self.config["persistent"]["key"]["tba"]
elif name == "tra":
return self.config["persistent"]["key"]["tra"]
elif name == "priority":
return self.config["persistent"]["config-preference"]
elif name == "sync":
return self.config["persistent"]["synchronize-config"]
elif name == "variable":
return self.config["variable"]
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
def __getattr__(self, name): # better hashed lookup method for common multikey-value paths, TYPE UNSAFE
attr_lookup = {
"persistent": self.config["persistent"],
"key": self.config["persistent"]["key"],
"database": self.config["persistent"]["key"]["database"],
"tba": self.config["persistent"]["key"]["tba"],
"tra": self.config["persistent"]["key"]["tra"],
"priority": self.config["persistent"]["config-preference"],
"sync": self.config["persistent"]["synchronize-config"],
"variable": self.config["variable"],
"event_delay": self.config["variable"]["event-delay"],
"loop_delay": self.config["variable"]["loop-delay"],
"competition": self.config["variable"]["competition"],
"modules": self.config["variable"]["modules"]
}
try:
return attr_lookup[name]
except KeyError:
return None
def __getitem__(self, key):
@@ -224,14 +213,14 @@ class Configuration:
if sync:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
remote_config = client.get_database_config()
if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"])
client.set_database_config(self.config["variable"])
logger.info("database config was different and was updated")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client)
remote_config = client.get_database_config()
if remote_config != self.config["variable"]:
self.config["variable"] = remote_config
self.save_config()
@@ -245,7 +234,7 @@ class Configuration:
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client)
self.config["variable"] = client.get_database_config()
# change variable to match remote without updating local version
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

298
src/data.py Normal file
View File

@@ -0,0 +1,298 @@
import requests
import pandas as pd
import pymongo
from exceptions import APIError
class Client:
def __init__(self, config):
self.competition = config.competition
self.tbakey = config.tba
self.mongoclient = pymongo.MongoClient(config.database)
self.trakey = config.tra
def close(self):
self.mongoclient.close()
def pull_new_tba_matches(self, cutoff):
competition = self.competition
api_key= self.tbakey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.sort(key=lambda x: x['match'])
return out
def get_team_match_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_metrics_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num})
def get_team_pit_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_scouting
mdata = db.pitdata
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def unkeyify_2l(self, layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_match_data_formatted(self):
teams_at_comp = self.get_teams_at_competition()
out = {}
for team in teams_at_comp:
try:
out[int(team)] = self.unkeyify_2l(self.get_team_match_data(team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(self):
competition = self.competition
teams_at_comp = self.get_teams_at_competition()
out = {}
for team in teams_at_comp:
try:
out[int(team)] = self.get_team_metrics_data(int(team))
except:
pass
return out
def get_pit_data_formatted(self):
client = self.mongoclient
competition = self.competition
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = self.get_team_pit_data(int(i))
except:
pass
return out
def get_pit_variable_data(self):
client = self.mongoclient
db = client.data_processing
mdata = db.team_pit
return mdata.find()
def get_pit_variable_formatted(self):
temp = self.get_pit_variable_data()
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(self, team_num, data, dbname = "data_processing", colname = "team_tests"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(self, team_num, data, dbname = "data_processing", colname = "team_metrics"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(self, variable, data, dbname = "data_processing", colname = "team_pit"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(self, flag):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(self, flag, data):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def get_previous_time(self):
previous_time = self.get_analysis_flags("latest_update")
if previous_time == None:
self.set_analysis_flags("latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(self, current_time):
self.set_analysis_flags("latest_update", {"latest_update":current_time})
def get_database_config(self):
remote_config = self.get_analysis_flags("config")
return remote_config["config"] if remote_config != None else None
def set_database_config(self, config):
self.set_analysis_flags("config", {"config": config})
def load_match(self):
return self.get_match_data_formatted()
def load_metric(self, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = self.get_team_metrics_data(team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(self):
return self.get_pit_data_formatted()
def push_match(self, results):
for team in results:
self.push_team_tests_data(team, results[team])
def push_metric(self, metric):
for team in metric:
self.push_team_metrics_data(team, metric[team])
def push_pit(self, pit):
for variable in pit:
self.push_team_pit_data(variable, pit[variable])
def check_new_database_matches(self):
return True
#----- API implementations below -----#
def get_team_competition(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
""" doesn't seem to be functional:
def get_team_match_data(self, team_num):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)"""
def get_teams_at_competition(self):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -23,7 +23,7 @@ class Logger(L):
self.file = file
if file != None:
if file is not None:
self.targets.append(self._send_file)
if profile:

View File

@@ -1,5 +1,4 @@
import abc
import data as d
import signal
import numpy as np
from tra_analysis import Analysis as an
@@ -17,7 +16,7 @@ class Module(metaclass = abc.ABCMeta):
callable(subclass.run)
)
@abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
def __init__(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def validate_config(self, *args, **kwargs):
@@ -29,20 +28,16 @@ class Module(metaclass = abc.ABCMeta):
class Match (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
client = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
def __init__(self, config, timestamp, client):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
self.client = client
def validate_config(self):
return True, ""
@@ -53,7 +48,7 @@ class Match (Module):
self._push_results()
def _load_data(self):
self.data = d.load_match(self.apikey, self.competition)
self.data = self.client.load_match()
def _simplestats(self, data_test):
@@ -93,7 +88,7 @@ class Match (Module):
input_vector = []
for team in data:
for team in tqdm(data, desc = "Match Module ", unit = " team"):
for variable in data[team]:
@@ -141,25 +136,21 @@ class Match (Module):
self.results = return_vector
d.push_match(self.apikey, self.competition, self.results)
self.client.push_match(self.results)
class Metric (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
client = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
def __init__(self, config, timestamp, client):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
self.client = client
def validate_config(self):
return True, ""
@@ -170,21 +161,27 @@ class Metric (Module):
self._push_results()
def _load_data(self):
self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match']
print("Previous last match", self.last_match)
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match)
self.data = self.client.pull_new_tba_matches(self.timestamp)
def _process_data(self):
self.results = {}
self.match = self.last_match
elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"]
matches = self.data
red = {}
blu = {}
for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one
self.match = max(self.match, int(match['match']))
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue
for match in tqdm(matches, desc = "Metric Module ", unit = " match"):
red = self.client.load_metric(match, "red", self.config["tests"])
blu = self.client.load_metric(match, "blue", self.config["tests"])
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
@@ -195,83 +192,90 @@ class Metric (Module):
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red: # for each team in red, add up gl2 score components
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu: # for each team in blue, add up gl2 score components
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red: # for each team on red, add the previous score with the delta to find the new score
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu: # for each team on blue, add the previous score with the delta to find the new score
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red) # update the team's score with the temporay vector
temp_vector.update(red)
temp_vector.update(blu)
self.results[match['match']] = temp_vector
d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db
print("New last match", self.match)
d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match})
self.client.push_metric(temp_vector)
def _push_results(self):
pass
class Pit (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
client = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
def __init__(self, config, timestamp, client):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
self.client = client
def validate_config(self):
return True, ""
@@ -282,12 +286,12 @@ class Pit (Module):
self._push_results()
def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition)
self.data = self.client.load_pit()
def _process_data(self):
tests = self.config["tests"]
return_vector = {}
for team in self.data:
for team in tqdm(self.data, desc = "Pit Module ", unit = " team"):
for variable in self.data[team]:
if variable in tests:
if not variable in return_vector:
@@ -297,7 +301,7 @@ class Pit (Module):
self.results = return_vector
def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results)
self.client.push_pit(self.results)
class Rating (Module):
pass

View File

@@ -9,8 +9,7 @@ __version__ = "1.0.0"
__changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
- linux daemon now sends stderr to errorlog.log
- removed daemon and socket functionality, user can implement using external software
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
@@ -149,19 +148,13 @@ __author__ = (
# imports:
import os, sys, time
import pymongo # soon to be deprecated
import traceback
import warnings
import argparse, sys, time, traceback, warnings
from config import Configuration, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics
from data import Client
from interface import Logger
from module import Match, Metric, Pit
import zmq
config_path = "config.json"
def main(logger, verbose, profile, debug, socket_send = None):
def main(logger, verbose, profile, debug, config_path):
def close_all():
if "client" in locals():
@@ -180,51 +173,42 @@ def main(logger, verbose, profile, debug, socket_send = None):
loop_start = time.time()
logger.info("current time: " + str(loop_start))
socket_send("current time: " + str(loop_start))
config = Configuration(config_path)
logger.info("found and loaded config at <" + config_path + ">")
socket_send("found and loaded config at <" + config_path + ">")
apikey, tbakey = config.database, config.tba
logger.info("found and loaded database and tba keys")
socket_send("found and loaded database and tba keys")
client = pymongo.MongoClient(apikey)
client = Client(config)
logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client)
previous_time = client.get_previous_time()
logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
client.competition = competition
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
current_module = modules[m](config_modules[m], previous_time, client)
valid = current_module.validate_config()
if not valid:
continue
current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
set_current_time(client, loop_start)
client.set_current_time(loop_start)
close_all()
logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
return 0
@@ -235,33 +219,27 @@ def main(logger, verbose, profile, debug, socket_send = None):
event_delay = config["variable"]["event-delay"]
if event_delay:
logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = check_new_database_matches(client, competition)
new_match = client.check_new_database_matches()
logger.info("database returned new matches")
socket_send("database returned new matches")
else:
loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0:
logger.info("loop delayed by " + str(remaining_time) + " seconds")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
except KeyboardInterrupt:
close_all()
logger.info("detected KeyboardInterrupt, exiting")
socket_send("detected KeyboardInterrupt, exiting")
return 0
except ConfigurationError as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e))
logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all()
return 1
@@ -269,134 +247,56 @@ def main(logger, verbose, profile, debug, socket_send = None):
str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running")
logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all()
return 1
def start(pid_path, verbose, profile, debug):
def start(verbose, profile, debug, config_path, log_path):
logger = Logger(verbose, profile, debug, file = log_path)
if profile:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
import cProfile, pstats, io
profile = cProfile.Profile()
profile.enable()
exit_code = main(logger, verbose, profile, debug, socket_send = send)
exit_code = main(logger, verbose, profile, debug, config_path)
profile.disable()
f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
f = open("profile.txt", "w+")
ps = pstats.Stats(profile, stream = f).sort_stats("cumtime")
ps.print_stats()
sys.exit(exit_code)
elif verbose:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
exit_code = main(logger, verbose, profile, debug, config_path)
sys.exit(exit_code)
elif debug:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
exit_code = main(logger, verbose, profile, debug, config_path)
sys.exit(exit_code)
else:
logfile = "logfile.log"
f = open(logfile, 'w+')
f.close()
e = open('errorlog.log', 'w+')
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = e
):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(msg):
socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
socket.close()
f.close()
sys.exit(exit_code)
def stop(pid_path):
try:
pf = open(pid_path, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
return
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else:
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def restart(pid_path):
stop(pid_path)
start(pid_path, False, False, False)
pass # must be vebose, debug or profile
if __name__ == "__main__":
if sys.platform.startswith("win"):
start(None, verbose = True)
parser = argparse.ArgumentParser(description = "TRA data processing application.")
parser.add_argument("mode", metavar = "MODE", type = str, nargs = 1, choices = ["verbose", "profile", "debug"], help = "verbose, debug, profile")
parser.add_argument("--config", dest = "config", default = "config.json", type = str, help = "path to config file")
parser.add_argument("--logfile", dest = "logfile", default = "logfile.log", type = str, help = "path to log file")
else:
import daemon
from daemon import pidfile
from signal import SIGTERM
pid_path = "tra-daemon.pid"
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
start(pid_path, False, False, False)
elif 'stop' == sys.argv[1]:
stop(pid_path)
elif 'restart' == sys.argv[1]:
restart(pid_path)
elif 'verbose' == sys.argv[1]:
start(None, True, False, False)
elif 'profile' == sys.argv[1]:
start(None, False, True, False)
elif 'debug' == sys.argv[1]:
start(None, False, False, True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
args = parser.parse_args()
mode = args.mode[0]
config_path = args.config
log_path = args.logfile
if mode == "verbose":
start(True, False, False, config_path = config_path, log_path = log_path)
elif mode == "profile":
start(False, True, False, config_path = config_path, log_path = log_path)
elif mode == "debug":
start(False, False, True, config_path = config_path, log_path = log_path)
exit(0)

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
#SBATCH --job-name=tra-superscript
#SBATCH --output=slurm-tra-superscript.out
#SBATCH --ntasks=8
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=256
#SBATCH --mail-user=dsingh@imsa.edu
#SBATCH -p cpu-long
cd competition
python superscript.py debug

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
#SBATCH --job-name=tra-superscript
#SBATCH --output=PROD_slurm-tra-superscript.out
#SBATCH --ntasks=8
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=256
#SBATCH --mail-user=dsingh@imsa.edu
#SBATCH -p cpu-long
cd competition
python superscript.py verbose

2
test/test_superscript.py Normal file
View File

@@ -0,0 +1,2 @@
def test_():
assert 1 == 1

14
test/test_zmq.py Normal file
View File

@@ -0,0 +1,14 @@
import signal
import zmq
signal.signal(signal.SIGINT, signal.SIG_DFL)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://localhost:5678')
socket.setsockopt(zmq.SUBSCRIBE, b'status')
while True:
message = socket.recv_multipart()
print(f'Received: {message}')