Compare commits
507 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
73da5fa68b | ||
|
7be57f7e7e | ||
|
7d64e67ad3 | ||
|
def639284f | ||
|
18430208ff | ||
|
ba5fb2d72b | ||
|
f34452d584 | ||
|
5fd5e32cb1 | ||
|
edbfa5184a | ||
|
635f736a69 | ||
|
16fb21001a | ||
|
69559e9e4a | ||
|
430822cdeb | ||
|
648ac945ac | ||
|
d59d069943 | ||
|
1d5a67c4f7 | ||
|
e4ab0487d0 | ||
|
4f439d6094 | ||
|
ae64c7f10e | ||
|
d1dfe3b01b | ||
|
3dd24dcd30 | ||
|
2be67b2cc3 | ||
|
f91159c49c | ||
|
df046d4806 | ||
|
c838c4fc15 | ||
|
cbf5d18332 | ||
|
641905e87a | ||
|
3daa12a3da | ||
|
3c4fe7ab46 | ||
|
4e3f6b4480 | ||
|
414ffdf96c | ||
|
6296f78ff5 | ||
|
7ae64d5dbf | ||
|
fd2ac12dad | ||
|
0f2bbd1a16 | ||
|
83bc7fa743 | ||
|
83eabce8cd | ||
|
e2e73986a2 | ||
|
91ae1c0df6 | ||
|
efad5bd71c | ||
|
3d5e0aac59 | ||
|
7937fb6ee6 | ||
|
871ecb5561 | ||
|
7d738ca51e | ||
|
eeee957d23 | ||
|
f55f3cb7d1 | ||
|
dd11689c8c | ||
|
1c4b1d1971 | ||
|
94a7aae491 | ||
|
26f4224caa | ||
|
386b7c75ee | ||
|
27feb0bf93 | ||
|
233440f03d | ||
|
37c247aa46 | ||
|
eeb5e46814 | ||
|
4739c439f0 | ||
|
2e41326373 | ||
|
e8ba8e1008 | ||
|
dd49f6724f | ||
|
b376f7c0c5 | ||
|
4213386035 | ||
|
3fdae646b8 | ||
|
8f8fb6c156 | ||
|
30b39aafff | ||
|
77353c87e3 | ||
|
ca2ebe5f6d | ||
|
55c7589c7d | ||
|
6cff61cbe4 | ||
|
5474081523 | ||
|
4c25a5ce09 | ||
|
3451bac6f5 | ||
|
7e37dd72bb | ||
|
a9014c5d34 | ||
|
230e98a745 | ||
|
1c6ecb149b | ||
|
6d544a434e | ||
|
5a1aa780ff | ||
|
952981cdb9 | ||
|
6fee42f6d2 | ||
|
24f8961500 | ||
|
db8fbbf068 | ||
|
64ae1b7026 | ||
|
4498387ac5 | ||
|
7a362476c9 | ||
|
b79cedae68 | ||
|
2bcd4236bb | ||
|
0cc35dc02d | ||
|
43bb9ef2bb | ||
|
3ab1d0f50a | ||
|
88e7c52c8b | ||
|
b345bfb95b | ||
|
aeb4990c81 | ||
|
0a721ca500 | ||
|
37a4a0085e | ||
|
429d3eb42c | ||
|
60ffe7645b | ||
|
adfa6f5cc0 | ||
|
f9c25dad09 | ||
|
b1d5834ff1 | ||
|
357d4977eb | ||
|
4545f5721a | ||
|
8d703b10b3 | ||
|
df305f30f0 | ||
|
a123b71ac9 | ||
|
a02668e59c | ||
|
4d6372f620 | ||
|
9d0b6e68d8 | ||
|
b8d51811e0 | ||
|
7a58cd08e2 | ||
|
337fae68ee | ||
|
5e71d05626 | ||
|
01df42aa49 | ||
|
33eea153c1 | ||
|
114eee5d57 | ||
|
06f008746a | ||
|
4f9c4e0dbb | ||
|
5697e8b79e | ||
|
e054e66743 | ||
|
c914bd3754 | ||
|
6c08885a53 | ||
|
375befd0c4 | ||
|
893d1fb1d0 | ||
|
6a426ae4cd | ||
|
50c064ffa4 | ||
|
1b0a9967c8 | ||
|
2605f7c29f | ||
|
6f5a3edd88 | ||
|
457146b0e4 | ||
|
f7fd8ffcf9 | ||
|
77bc792426 | ||
|
39146cc555 | ||
|
04141bbec8 | ||
|
40e5899972 | ||
|
025c7f9b3c | ||
|
2daa09c040 | ||
|
9776136649 | ||
|
68d27a6302 | ||
|
7fc18b7c35 | ||
|
9b412b51a8 | ||
|
b6ac05a66e | ||
|
435c8a7bc6 | ||
|
a69b18354b | ||
|
7b9e6921d0 | ||
|
fb2800cf9e | ||
|
12cbb21077 | ||
|
46d1a48999 | ||
|
ad0a761d53 | ||
|
43f503a38d | ||
|
d38744438b | ||
|
eb8914aa26 | ||
|
283140094f | ||
|
66ac1c304e | ||
|
0eb9e07711 | ||
|
f56c85b298 | ||
|
6a9a17c5b4 | ||
|
e24c49bedb | ||
|
2daed73aaa | ||
|
8ebdb3b89b | ||
|
a0e1293361 | ||
|
b669e55283 | ||
|
3e38446eae | ||
|
dac0a4a0cd | ||
|
897ba03078 | ||
|
e815a2fbf7 | ||
|
941383de4b | ||
|
5771c7957e | ||
|
72c233649d | ||
|
c7031361b0 | ||
|
59508574c9 | ||
|
d57d1ebc6d | ||
|
70b2ff1151 | ||
|
c3746539b3 | ||
|
405ab3ac74 | ||
|
94dd51566a | ||
|
b5718a500a | ||
|
2eaa390f2f | ||
|
9c666b95be | ||
|
dfc01a13bd | ||
|
d4328e6027 | ||
|
f9a3150438 | ||
|
6decf183dd | ||
|
67f940eadb | ||
|
56d0578d86 | ||
|
5e9e90507b | ||
|
8b4c50827c | ||
|
f8cdd73655 | ||
|
74dc02ca99 | ||
|
5915827d15 | ||
|
f9b0343aa1 | ||
|
938caa75d1 | ||
|
df66d28959 | ||
|
2710642f15 | ||
|
51b3dd91b5 | ||
|
d00cf142c0 | ||
|
ae8706ac08 | ||
|
5305e4a30f | ||
|
908a1cd368 | ||
|
19e0044e0e | ||
|
7ad43e970f | ||
|
fbb3fde754 | ||
|
81c81bed94 | ||
|
f3fc4cefd0 | ||
|
376ea248a4 | ||
|
9824f9349d | ||
|
eb90582db8 | ||
|
bad9e497b1 | ||
|
c3b993cfce | ||
|
2cb5c54d8b | ||
|
7f705915f0 | ||
|
2a8a21b82a | ||
|
2e09cba94e | ||
|
de9d151ad6 | ||
|
452b55ac6f | ||
|
fe31db07f9 | ||
|
52d79ea25e | ||
|
20833b29c1 | ||
|
978a9a9a25 | ||
|
9da4322aa9 | ||
|
5bdd77ddc6 | ||
|
2782dc006c | ||
|
de6c582b8f | ||
|
32bc329e91 | ||
|
4e50a79614 | ||
|
190fbf6cac | ||
|
a8bf4e46e9 | ||
|
478c793917 | ||
|
4b44c7978a | ||
|
0fbb958dd9 | ||
|
031e45ac19 | ||
|
96bf376b70 | ||
|
eca8d4efc1 | ||
|
d5a7f52b83 | ||
|
ae4ecbd67c | ||
|
0ba3a56ea7 | ||
|
1717cc17a1 | ||
|
947f7422dc | ||
|
cf14005b67 | ||
|
08ff6aec8e | ||
|
234f54ef5d | ||
|
df42ae734e | ||
|
4979c4b414 | ||
|
d6cc419c40 | ||
|
a73ce4080c | ||
|
456836bdb8 | ||
|
a51f1f134d | ||
|
db9ce0c25a | ||
|
92c8b9c8c3 | ||
|
06b0acb9f8 | ||
|
7c957d9ddc | ||
|
efab5bfde8 | ||
|
c886ca8e3f | ||
|
2cf7d73c9c | ||
|
f12cbcc847 | ||
|
df6c184b84 | ||
|
1ea7306eeb | ||
|
bb41c26531 | ||
|
1d4b2bd49d | ||
|
8dd2440f08 | ||
|
ab9b38da95 | ||
|
dacf12f8a4 | ||
|
3894eb481c | ||
|
0198d6896b | ||
|
6902521d6b | ||
|
590e8424e7 | ||
|
bc6916ab15 | ||
|
2590a40827 | ||
|
68006de8c0 | ||
|
9f0d366408 | ||
|
2bdb15a2b3 | ||
|
56b575a753 | ||
|
ff2f0787ae | ||
|
7c121d48fc | ||
|
8eac3d5af1 | ||
|
f47be637a0 | ||
|
c824087335 | ||
|
a92dacc7ff | ||
|
37c3430433 | ||
|
3bcf832db0 | ||
|
591ddbde9d | ||
|
eaa0bcd5d8 | ||
|
45abb9e24d | ||
|
a853e9b02b | ||
|
af20fb0fa7 | ||
|
3a17ac5154 | ||
|
1cdeab4b6b | ||
|
b2ce781961 | ||
|
400b5bb81e | ||
|
fd7ab3a598 | ||
|
9175c2921a | ||
|
1d3de02763 | ||
|
b6299ce397 | ||
|
8801a300c4 | ||
|
acdcb42e6d | ||
|
484adfcda8 | ||
|
4d01067a57 | ||
|
0991757ddb | ||
|
de0cb1a4e3 | ||
|
bca13420b2 | ||
|
236ca3bcfd | ||
|
b2aa6357d8 | ||
|
941dd4838a | ||
|
91d727b6ad | ||
|
2c00f5b26e | ||
|
4f981df7bb | ||
|
c24e51e2b6 | ||
|
f565744867 | ||
|
d3ee8621f0 | ||
|
e38c12f765 | ||
|
d71b45a8e9 | ||
|
6f9527c726 | ||
|
9a99b8de2a | ||
|
c32b0150bd | ||
|
86327e97f9 | ||
|
4fd18ec7fe | ||
|
dc6f896071 | ||
|
c5d087dada | ||
|
bda2db7003 | ||
|
53d4a0ecde | ||
|
db19127d28 | ||
|
3ec7e5fed5 | ||
|
8bd07cbd32 | ||
|
f5b9a678fc | ||
|
1c8f8fdfe7 | ||
|
f63c473166 | ||
|
936354a1a2 | ||
|
43d059b477 | ||
|
173f9b3460 | ||
|
eb51d876a5 | ||
|
bee1edbf25 | ||
|
13c17b092a | ||
|
800601121e | ||
|
79e77af304 | ||
|
4d6273fa05 | ||
|
c9567f0d7c | ||
|
37d3c2b1d2 | ||
|
b689dada3d | ||
|
e914d32b37 | ||
|
5dc3fa344c | ||
|
c7859bf681 | ||
|
620b6de028 | ||
|
c1635f79fe | ||
|
a9d3ef2b51 | ||
|
aa107249fd | ||
|
0c47283dd5 | ||
|
f49bb58215 | ||
|
b91ad29ae4 | ||
|
8a869e037b | ||
|
20f082b760 | ||
|
ef81273d4a | ||
|
3761274ee3 | ||
|
506c779d82 | ||
|
892b57a1eb | ||
|
94cc4adbf9 | ||
|
2e189bcfa2 | ||
|
a21d0b5ec6 | ||
|
ebb5f3b09e | ||
|
5c4bed42d6 | ||
|
c15d037109 | ||
|
56f704c464 | ||
|
14a0414265 | ||
|
6dbdfe00fc | ||
|
00c9df4239 | ||
|
9c725887c5 | ||
|
9562cc594f | ||
|
56f6752ff7 | ||
|
31c8c9ee86 | ||
|
0f671daf30 | ||
|
e1027a9562 | ||
|
628dac5835 | ||
|
fed48bc999 | ||
|
7be5e15e9a | ||
|
365b9e1882 | ||
|
e5bb5b6ef7 | ||
|
f9e4a6c53d | ||
|
e0099aab60 | ||
|
925087886c | ||
|
35f8cd693e | ||
|
a795a89c2d | ||
|
92602b3122 | ||
|
aa86a2af7b | ||
|
6f1cf1828a | ||
|
bb5c38fbfe | ||
|
169c1737b2 | ||
|
8244efa09b | ||
|
4c1abeb200 | ||
|
b41683eaa9 | ||
|
1f29718795 | ||
|
5716a7957e | ||
|
7c21c277dd | ||
|
91ddbb5531 | ||
|
21be310e1f | ||
|
0a687648e0 | ||
|
80c6b9ba67 | ||
|
1f20ad7f37 | ||
|
8de7078240 | ||
|
b88a7f7aa8 | ||
|
313d627fa8 | ||
|
cbe1d9a015 | ||
|
a4288e2a0d | ||
|
4f631a4b79 | ||
|
e992483f35 | ||
|
4671eacb6e | ||
|
c76a5ddb5e | ||
|
1989ec5ad4 | ||
|
0124d9db97 | ||
|
02ce675a0b | ||
|
64da97bfdc | ||
|
23d6eebff1 | ||
|
8dcb59a15c | ||
|
ebe25312b5 | ||
|
28b5c6868e | ||
|
0c09631813 | ||
|
23d821b773 | ||
|
cc958e0927 | ||
|
7e23641591 | ||
|
dc8bc17324 | ||
|
23b16d2e92 | ||
|
5200dbc4d7 | ||
|
3fd42c46c9 | ||
|
19015d79e6 | ||
|
3eef220768 | ||
|
2f088898f8 | ||
|
f091dd9113 | ||
|
2a32386a9e | ||
|
45055b1505 | ||
|
afcda88760 | ||
|
3ad0dcd851 | ||
|
ac32743210 | ||
|
978342c480 | ||
|
8e6a927032 | ||
|
0bf52e1c29 | ||
|
06242f0b2a | ||
|
4398de71ba | ||
|
15f504ecc3 | ||
|
06be451456 | ||
|
e498f4275e | ||
|
1633ef7862 | ||
|
2cff74aa54 | ||
|
040b4dc52a | ||
|
35d8e5ff77 | ||
|
d3b39d8167 | ||
|
c7b3d7e9a3 | ||
|
10f8839bbd | ||
|
1eb568c807 | ||
|
12cf4a55d7 | ||
|
e81f6052e3 | ||
|
bbebc4350c | ||
|
ac7c169a27 | ||
|
268b01fc93 | ||
|
9c7647aba9 | ||
|
1f2edeba51 | ||
|
b3781ada45 | ||
|
0d8780b3c1 | ||
|
64a89cc58f | ||
|
f092bd3cb1 | ||
|
c4309f5679 | ||
|
e19bb8dcc1 | ||
|
c9436f15f8 | ||
|
8c867dcf95 | ||
|
d3e98391d4 | ||
|
ef336eb454 | ||
|
12f5536026 | ||
|
6a0d8f4144 | ||
|
7f80339fb4 | ||
|
9ea074c99c | ||
|
4188b4b1c3 | ||
|
41ea4e9ed8 | ||
|
9fe9084341 | ||
|
9f894428c1 | ||
|
4227106b4f | ||
|
82754ede58 | ||
|
a1d0cd37b7 | ||
|
5e13ca3b5e | ||
|
7c96233f5b | ||
|
3ecf08cf9b | ||
|
04c561baea | ||
|
5eaf733651 | ||
|
d0435a5528 | ||
|
55cc572d5c | ||
|
8577d4dafa | ||
|
08aec2537e | ||
|
975db73aae | ||
|
6cb09240ab | ||
|
c74b0f34a6 | ||
|
3e47a232cc | ||
|
2e356405e1 | ||
|
f59d94282d | ||
|
b0ad3bdf9c | ||
|
733c7cbfe7 | ||
|
a95684213c | ||
|
76b4107999 | ||
|
b2bb2df3f0 | ||
|
3ec4de4fb1 | ||
|
bf1572765c | ||
|
0717ed4979 | ||
|
ab421e4170 | ||
|
c5f6ecae68 | ||
|
7dbffc940a | ||
|
8f4e6e3510 | ||
|
86325e7d2b | ||
|
cf6c6180d3 | ||
|
da315ac908 | ||
|
3b95963eb1 | ||
|
1fdd80e31b | ||
|
926db38db9 | ||
|
f483cbbcfb | ||
|
0d111296af |
2
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM python
|
||||
WORKDIR ~/
|
27
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"name": "TRA Analysis Development Environment",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
"python.pythonPath": "/usr/local/bin/python",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
|
||||
"python.formatting.blackPath": "/usr/local/py-utils/bin/black",
|
||||
"python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
|
||||
"python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
|
||||
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
|
||||
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
|
||||
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
|
||||
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
|
||||
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
|
||||
"python.testing.pytestPath": "/usr/local/py-utils/bin/pytest"
|
||||
},
|
||||
"extensions": [
|
||||
"mhutchie.git-graph",
|
||||
"donjayamanne.jupyter",
|
||||
],
|
||||
"postCreateCommand": "pip install -r analysis-master/requirements.txt"
|
||||
}
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
36
.github/workflows/publish-analysis.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# This workflows will upload a Python Package using Twine when a release is created
|
||||
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
|
||||
|
||||
name: Upload Analysis Package
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
jobs:
|
||||
deploy:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
working-directory: ./analysis-master/
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.x'
|
||||
- name: Install dependencies
|
||||
working-directory: ${{env.working-directory}}
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install setuptools wheel twine
|
||||
- name: Build package
|
||||
working-directory: ${{env.working-directory}}
|
||||
run: |
|
||||
python setup.py sdist bdist_wheel
|
||||
- name: Publish package to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@master
|
||||
with:
|
||||
user: __token__
|
||||
password: ${{ secrets.PYPI_TOKEN }}
|
||||
packages_dir: analysis-master/dist/
|
38
.github/workflows/ut-analysis.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Analysis Unit Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
|
||||
env:
|
||||
working-directory: ./analysis-master/
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
working-directory: ${{ env.working-directory }}
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
working-directory: ${{ env.working-directory }}
|
38
.github/workflows/ut-superscript.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Superscript Unit Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
|
||||
env:
|
||||
working-directory: ./data-analysis/
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
working-directory: ${{ env.working-directory }}
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
working-directory: ${{ env.working-directory }}
|
41
.gitignore
vendored
@@ -1,8 +1,39 @@
|
||||
|
||||
benchmark_data.csv
|
||||
|
||||
|
||||
data analysis/keys/keytemp.json
|
||||
data analysis/__pycache__/analysis.cpython-37.pyc
|
||||
data-analysis/keys/keytemp.json
|
||||
data-analysis/__pycache__/analysis.cpython-37.pyc
|
||||
apps/android/source/app/src/main/res/drawable-v24/uuh.png
|
||||
apps/android/source/app/src/main/java/com/example/titanscouting/tits.java
|
||||
|
||||
data-analysis/analysis.cp37-win_amd64.pyd
|
||||
data-analysis/analysis/analysis.c
|
||||
data-analysis/analysis/analysis.cp37-win_amd64.pyd
|
||||
data-analysis/analysis/build/temp.win-amd64-3.7/Release/analysis.cp37-win_amd64.exp
|
||||
data-analysis/analysis/build/temp.win-amd64-3.7/Release/analysis.cp37-win_amd64.lib
|
||||
data-analysis/analysis/build/temp.win-amd64-3.7/Release/analysis.obj
|
||||
data-analysis/test.ipynb
|
||||
data-analysis/.ipynb_checkpoints/test-checkpoint.ipynb
|
||||
.vscode/settings.json
|
||||
.vscode
|
||||
data-analysis/arthur_pull.ipynb
|
||||
data-analysis/keys.txt
|
||||
data-analysis/check_for_new_matches.ipynb
|
||||
data-analysis/test.ipynb
|
||||
data-analysis/visualize_pit.ipynb
|
||||
data-analysis/config/keys.config
|
||||
analysis-master/analysis/__pycache__/
|
||||
analysis-master/analysis/metrics/__pycache__/
|
||||
data-analysis/__pycache__/
|
||||
analysis-master/analysis.egg-info/
|
||||
analysis-master/build/
|
||||
analysis-master/metrics/
|
||||
data-analysis/config-pop.json
|
||||
data-analysis/__pycache__/
|
||||
analysis-master/__pycache__/
|
||||
analysis-master/.pytest_cache/
|
||||
data-analysis/.pytest_cache/
|
||||
analysis-master/tra_analysis.egg-info
|
||||
analysis-master/tra_analysis/__pycache__
|
||||
analysis-master/tra_analysis/.ipynb_checkpoints
|
||||
.pytest_cache
|
||||
analysis-master/tra_analysis/metrics/__pycache__
|
||||
analysis-master/dist
|
66
CONTRIBUTING.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
This project accept contributions via GitHub pull requests.
|
||||
This document outlines some of the
|
||||
conventions on development workflow, commit message formatting, contact points,
|
||||
and other resources to make it easier to get your contribution accepted.
|
||||
|
||||
## Certificate of Origin
|
||||
|
||||
By contributing to this project, you agree to the [Developer Certificate of
|
||||
Origin (DCO)](https://developercertificate.org/). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution.
|
||||
|
||||
In order to show your agreement with the DCO you should include at the end of the commit message,
|
||||
the following line: `Signed-off-by: John Doe <john.doe@example.com>`, using your real name.
|
||||
|
||||
This can be done easily using the [`-s`](https://github.com/git/git/blob/b2c150d3aa82f6583b9aadfecc5f8fa1c74aca09/Documentation/git-commit.txt#L154-L161) flag on the `git commit`.
|
||||
|
||||
Visual Studio code also has a flag to enable signoff on commits
|
||||
|
||||
If you find yourself pushed a few commits without `Signed-off-by`, you can still add it afterwards. Read this for help: [fix-DCO.md](https://github.com/src-d/guide/blob/master/developer-community/fix-DCO.md).
|
||||
|
||||
## Support Channels
|
||||
|
||||
The official support channel, for both users and contributors, is:
|
||||
|
||||
- GitHub issues: each repository has its own list of issues.
|
||||
|
||||
*Before opening a new issue or submitting a new pull request, it's helpful to
|
||||
search the project - it's likely that another user has already reported the
|
||||
issue you're facing, or it's a known issue that we're already aware of.
|
||||
|
||||
|
||||
## How to Contribute
|
||||
In general, please use conventional approaches to development and contribution such as:
|
||||
* Create branches for additions or deletions, and or side projects
|
||||
* Do not commit to master!
|
||||
* Use Pull Requests (PRs) to indicate that an addition is ready to merge.
|
||||
PRs are the main and exclusive way to contribute code to source{d} projects.
|
||||
In order for a PR to be accepted it needs to pass this list of requirements:
|
||||
|
||||
- The contribution must be correctly explained with natural language and providing a minimum working example that reproduces it.
|
||||
- All PRs must be written idiomaticly:
|
||||
- for Node: formatted according to [AirBnB standards](https://github.com/airbnb/javascript), and no warnings from `eslint` using the AirBnB style guide
|
||||
- for other languages, similar constraints apply.
|
||||
- They should in general include tests, and those shall pass.
|
||||
- In any case, all the PRs have to pass the personal evaluation of at least one of the [maintainers](MAINTAINERS) of the project.
|
||||
|
||||
|
||||
### Format of the commit message
|
||||
|
||||
Every commit message should describe what was changed, under which context and, if applicable, the issue it relates to (mentioning a GitHub issue number when applicable):
|
||||
|
||||
For small changes, or changes to a testing or personal branch, the commit message should be a short changelog entry
|
||||
|
||||
For larger changes or for changes on branches that are more widely used, the commit message should simply reference an entry to some other changelog system. It is encouraged to use some sort of versioning system to log changes. Example commit messages:
|
||||
```
|
||||
superscript.py v 2.0.5.006
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<package> v <version number>
|
||||
```
|
@@ -1,15 +1,21 @@
|
||||
Copyright (c) 2015, Scott Motte
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2020, Titan Robotics FRC 2022
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
3
MAINTAINERS
Normal file
@@ -0,0 +1,3 @@
|
||||
Arthur Lu <learthurgo@gmail.com>
|
||||
Jacob Levine <jacoblevine18@gmail.com>
|
||||
Dev Singh <dev@devksingh.com>
|
@@ -1,2 +1,5 @@
|
||||
# tr2022-strategy
|
||||
Titan Robotics 2022 Strategy Team Repository
|
||||
# red-alliance-analysis
|
||||
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.
|
||||
|
||||
# Installing
|
||||
`pip install tra_analysis`
|
1
analysis-master/build.sh
Normal file
@@ -0,0 +1 @@
|
||||
python setup.py sdist bdist_wheel || python3 setup.py sdist bdist_wheel
|
5
analysis-master/docker/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM python
|
||||
WORKDIR ~/
|
||||
COPY ./ ./
|
||||
RUN pip install -r requirements.txt
|
||||
CMD ["bash"]
|
3
analysis-master/docker/start-docker.sh
Normal file
@@ -0,0 +1,3 @@
|
||||
cd ..
|
||||
docker build -t tra-analysis-amd64-dev -f docker/Dockerfile .
|
||||
docker run -it tra-analysis-amd64-dev
|
6
analysis-master/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
numba
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
six
|
||||
matplotlib
|
26
analysis-master/setup.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import setuptools
|
||||
|
||||
requirements = []
|
||||
|
||||
with open("requirements.txt", 'r') as file:
|
||||
for line in file:
|
||||
requirements.append(line)
|
||||
|
||||
setuptools.setup(
|
||||
name="tra_analysis",
|
||||
version="2.0.3",
|
||||
author="The Titan Scouting Team",
|
||||
author_email="titanscout2022@gmail.com",
|
||||
description="Analysis package developed by Titan Scouting for The Red Alliance",
|
||||
long_description="",
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/titanscout2022/tr2022-strategy",
|
||||
packages=setuptools.find_packages(),
|
||||
install_requires=requirements,
|
||||
license = "GNU General Public License v3.0",
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3",
|
||||
"Operating System :: OS Independent",
|
||||
],
|
||||
python_requires='>=3.6',
|
||||
)
|
31
analysis-master/test_analysis.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from tra_analysis import analysis as an
|
||||
from tra_analysis import metrics
|
||||
|
||||
def test_():
|
||||
test_data_linear = [1, 3, 6, 7, 9]
|
||||
y_data_ccu = [1, 3, 7, 14, 21]
|
||||
y_data_ccd = [1, 5, 7, 8.5, 8.66]
|
||||
test_data_scrambled = [-32, 34, 19, 72, -65, -11, -43, 6, 85, -17, -98, -26, 12, 20, 9, -92, -40, 98, -78, 17, -20, 49, 93, -27, -24, -66, 40, 84, 1, -64, -68, -25, -42, -46, -76, 43, -3, 30, -14, -34, -55, -13, 41, -30, 0, -61, 48, 23, 60, 87, 80, 77, 53, 73, 79, 24, -52, 82, 8, -44, 65, 47, -77, 94, 7, 37, -79, 36, -94, 91, 59, 10, 97, -38, -67, 83, 54, 31, -95, -63, 16, -45, 21, -12, 66, -48, -18, -96, -90, -21, -83, -74, 39, 64, 69, -97, 13, 55, 27, -39]
|
||||
test_data_sorted = [-98, -97, -96, -95, -94, -92, -90, -83, -79, -78, -77, -76, -74, -68, -67, -66, -65, -64, -63, -61, -55, -52, -48, -46, -45, -44, -43, -42, -40, -39, -38, -34, -32, -30, -27, -26, -25, -24, -21, -20, -18, -17, -14, -13, -12, -11, -3, 0, 1, 6, 7, 8, 9, 10, 12, 13, 16, 17, 19, 20, 21, 23, 24, 27, 30, 31, 34, 36, 37, 39, 40, 41, 43, 47, 48, 49, 53, 54, 55, 59, 60, 64, 65, 66, 69, 72, 73, 77, 79, 80, 82, 83, 84, 85, 87, 91, 93, 94, 97, 98]
|
||||
assert an.basic_stats(test_data_linear) == {"mean": 5.2, "median": 6.0, "standard-deviation": 2.85657137141714, "variance": 8.16, "minimum": 1.0, "maximum": 9.0}
|
||||
assert an.z_score(3.2, 6, 1.5) == -1.8666666666666665
|
||||
assert an.z_normalize([test_data_linear], 1).tolist() == [[0.07537783614444091, 0.22613350843332272, 0.45226701686664544, 0.5276448530110863, 0.6784005252999682]]
|
||||
assert all(isinstance(item, str) for item in an.regression(test_data_linear, y_data_ccu, ["lin"])) == True
|
||||
#assert all(isinstance(item, str) for item in an.regression(test_data_linear, y_data_ccd, ["log"])) == True
|
||||
#assert all(isinstance(item, str) for item in an.regression(test_data_linear, y_data_ccu, ["exp"])) == True
|
||||
#assert all(isinstance(item, str) for item in an.regression(test_data_linear, y_data_ccu, ["ply"])) == True
|
||||
#assert all(isinstance(item, str) for item in an.regression(test_data_linear, y_data_ccd, ["sig"])) == True
|
||||
assert an.Metric().elo(1500, 1500, [1, 0], 400, 24) == 1512.0
|
||||
assert an.Metric().glicko2(1500, 250, 0.06, [1500, 1400], [250, 240], [1, 0]) == (1478.864307445517, 195.99122679202452, 0.05999602937563585)
|
||||
#assert an.Metric().trueskill([[(25, 8.33), (24, 8.25), (32, 7.5)], [(25, 8.33), (25, 8.33), (21, 6.5)]], [1, 0]) == [(metrics.trueskill.Rating(mu=21.346, sigma=7.875), metrics.trueskill.Rating(mu=20.415, sigma=7.808), metrics.trueskill.Rating(mu=29.037, sigma=7.170)), (metrics.trueskill.Rating(mu=28.654, sigma=7.875), metrics.trueskill.Rating(mu=28.654, sigma=7.875), metrics.trueskill.Rating(mu=23.225, sigma=6.287))]
|
||||
assert all(a == b for a, b in zip(an.Sort().quicksort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().mergesort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().introsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().heapsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().insertionsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().timsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().selectionsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().shellsort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().bubblesort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().cyclesort(test_data_scrambled), test_data_sorted))
|
||||
assert all(a == b for a, b in zip(an.Sort().cocktailsort(test_data_scrambled), test_data_sorted))
|
1491
analysis-master/tra_analysis/analysis.py
Normal file
162
analysis-master/tra_analysis/equation.ipynb
Normal file
@@ -0,0 +1,162 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import re\n",
|
||||
"from decimal import Decimal\n",
|
||||
"from functools import reduce"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def add(string):\n",
|
||||
" while(len(re.findall(\"[+]{1}[-]?\", string)) != 0):\n",
|
||||
" string = re.sub(\"[-]?\\d+[.]?\\d*[+]{1}[-]?\\d+[.]?\\d*\", str(\"%f\" % reduce((lambda x, y: x + y), [Decimal(i) for i in re.split(\"[+]{1}\", re.search(\"[-]?\\d+[.]?\\d*[+]{1}[-]?\\d+[.]?\\d*\", string).group())])), string, 1)\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def sub(string):\n",
|
||||
" while(len(re.findall(\"\\d+[.]?\\d*[-]{1,2}\\d+[.]?\\d*\", string)) != 0):\n",
|
||||
" g = re.search(\"\\d+[.]?\\d*[-]{1,2}\\d+[.]?\\d*\", string).group()\n",
|
||||
" if(re.search(\"[-]{1,2}\", g).group() == \"-\"):\n",
|
||||
" r = re.sub(\"[-]{1}\", \"+-\", g, 1)\n",
|
||||
" string = re.sub(g, r, string, 1)\n",
|
||||
" elif(re.search(\"[-]{1,2}\", g).group() == \"--\"):\n",
|
||||
" r = re.sub(\"[-]{2}\", \"+\", g, 1)\n",
|
||||
" string = re.sub(g, r, string, 1)\n",
|
||||
" else:\n",
|
||||
" pass\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def mul(string):\n",
|
||||
" while(len(re.findall(\"[*]{1}[-]?\", string)) != 0):\n",
|
||||
" string = re.sub(\"[-]?\\d+[.]?\\d*[*]{1}[-]?\\d+[.]?\\d*\", str(\"%f\" % reduce((lambda x, y: x * y), [Decimal(i) for i in re.split(\"[*]{1}\", re.search(\"[-]?\\d+[.]?\\d*[*]{1}[-]?\\d+[.]?\\d*\", string).group())])), string, 1)\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def div(string):\n",
|
||||
" while(len(re.findall(\"[/]{1}[-]?\", string)) != 0):\n",
|
||||
" string = re.sub(\"[-]?\\d+[.]?\\d*[/]{1}[-]?\\d+[.]?\\d*\", str(\"%f\" % reduce((lambda x, y: x / y), [Decimal(i) for i in re.split(\"[/]{1}\", re.search(\"[-]?\\d+[.]?\\d*[/]{1}[-]?\\d+[.]?\\d*\", string).group())])), string, 1)\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def exp(string):\n",
|
||||
" while(len(re.findall(\"[\\^]{1}[-]?\", string)) != 0):\n",
|
||||
" string = re.sub(\"[-]?\\d+[.]?\\d*[\\^]{1}[-]?\\d+[.]?\\d*\", str(\"%f\" % reduce((lambda x, y: x ** y), [Decimal(i) for i in re.split(\"[\\^]{1}\", re.search(\"[-]?\\d+[.]?\\d*[\\^]{1}[-]?\\d+[.]?\\d*\", string).group())])), string, 1)\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def evaluate(string):\n",
|
||||
" string = exp(string)\n",
|
||||
" string = div(string)\n",
|
||||
" string = mul(string)\n",
|
||||
" string = sub(string)\n",
|
||||
" print(string)\n",
|
||||
" string = add(string)\n",
|
||||
" return string"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "error",
|
||||
"ename": "SyntaxError",
|
||||
"evalue": "unexpected EOF while parsing (<ipython-input-13-f9fb4aededd9>, line 1)",
|
||||
"traceback": [
|
||||
"\u001b[1;36m File \u001b[1;32m\"<ipython-input-13-f9fb4aededd9>\"\u001b[1;36m, line \u001b[1;32m1\u001b[0m\n\u001b[1;33m def parentheses(string):\u001b[0m\n\u001b[1;37m ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m unexpected EOF while parsing\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def parentheses(string):"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"output_type": "stream",
|
||||
"name": "stdout",
|
||||
"text": "-158456325028528675187087900672.000000+0.8\n"
|
||||
},
|
||||
{
|
||||
"output_type": "execute_result",
|
||||
"data": {
|
||||
"text/plain": "'-158456325028528675187087900672.000000'"
|
||||
},
|
||||
"metadata": {},
|
||||
"execution_count": 22
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"string = \"8^32*4/-2+0.8\"\n",
|
||||
"evaluate(string)"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.6-final"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
7
analysis-master/tra_analysis/metrics/elo.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import numpy as np
|
||||
|
||||
def calculate(starting_score, opposing_score, observed, N, K):
|
||||
|
||||
expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
|
||||
|
||||
return starting_score + K*(np.sum(observed) - np.sum(expected))
|
99
analysis-master/tra_analysis/metrics/glicko2.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import math
|
||||
|
||||
class Glicko2:
|
||||
_tau = 0.5
|
||||
|
||||
def getRating(self):
|
||||
return (self.__rating * 173.7178) + 1500
|
||||
|
||||
def setRating(self, rating):
|
||||
self.__rating = (rating - 1500) / 173.7178
|
||||
|
||||
rating = property(getRating, setRating)
|
||||
|
||||
def getRd(self):
|
||||
return self.__rd * 173.7178
|
||||
|
||||
def setRd(self, rd):
|
||||
self.__rd = rd / 173.7178
|
||||
|
||||
rd = property(getRd, setRd)
|
||||
|
||||
def __init__(self, rating = 1500, rd = 350, vol = 0.06):
|
||||
|
||||
self.setRating(rating)
|
||||
self.setRd(rd)
|
||||
self.vol = vol
|
||||
|
||||
def _preRatingRD(self):
|
||||
|
||||
self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2))
|
||||
|
||||
def update_player(self, rating_list, RD_list, outcome_list):
|
||||
|
||||
rating_list = [(x - 1500) / 173.7178 for x in rating_list]
|
||||
RD_list = [x / 173.7178 for x in RD_list]
|
||||
|
||||
v = self._v(rating_list, RD_list)
|
||||
self.vol = self._newVol(rating_list, RD_list, outcome_list, v)
|
||||
self._preRatingRD()
|
||||
|
||||
self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v))
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * \
|
||||
(outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
self.__rating += math.pow(self.__rd, 2) * tempSum
|
||||
|
||||
|
||||
def _newVol(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
i = 0
|
||||
delta = self._delta(rating_list, RD_list, outcome_list, v)
|
||||
a = math.log(math.pow(self.vol, 2))
|
||||
tau = self._tau
|
||||
x0 = a
|
||||
x1 = 0
|
||||
|
||||
while x0 != x1:
|
||||
# New iteration, so x(i) becomes x(i-1)
|
||||
x0 = x1
|
||||
d = math.pow(self.__rating, 2) + v + math.exp(x0)
|
||||
h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \
|
||||
/ d + 0.5 * math.exp(x0) * math.pow(delta / d, 2)
|
||||
h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \
|
||||
(math.pow(self.__rating, 2) + v) \
|
||||
/ math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \
|
||||
* (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3)
|
||||
x1 = x0 - (h1 / h2)
|
||||
|
||||
return math.exp(x1 / 2)
|
||||
|
||||
def _delta(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
return v * tempSum
|
||||
|
||||
def _v(self, rating_list, RD_list):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempE = self._E(rating_list[i], RD_list[i])
|
||||
tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE)
|
||||
return 1 / tempSum
|
||||
|
||||
def _E(self, p2rating, p2RD):
|
||||
|
||||
return 1 / (1 + math.exp(-1 * self._g(p2RD) * \
|
||||
(self.__rating - p2rating)))
|
||||
|
||||
def _g(self, RD):
|
||||
|
||||
return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2))
|
||||
|
||||
def did_not_compete(self):
|
||||
|
||||
self._preRatingRD()
|
907
analysis-master/tra_analysis/metrics/trueskill.py
Normal file
@@ -0,0 +1,907 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from itertools import chain
|
||||
import math
|
||||
|
||||
from six import iteritems
|
||||
from six.moves import map, range, zip
|
||||
from six import iterkeys
|
||||
|
||||
import copy
|
||||
try:
|
||||
from numbers import Number
|
||||
except ImportError:
|
||||
Number = (int, long, float, complex)
|
||||
|
||||
inf = float('inf')
|
||||
|
||||
class Gaussian(object):
|
||||
#: Precision, the inverse of the variance.
|
||||
pi = 0
|
||||
#: Precision adjusted mean, the precision multiplied by the mean.
|
||||
tau = 0
|
||||
|
||||
def __init__(self, mu=None, sigma=None, pi=0, tau=0):
|
||||
if mu is not None:
|
||||
if sigma is None:
|
||||
raise TypeError('sigma argument is needed')
|
||||
elif sigma == 0:
|
||||
raise ValueError('sigma**2 should be greater than 0')
|
||||
pi = sigma ** -2
|
||||
tau = pi * mu
|
||||
self.pi = pi
|
||||
self.tau = tau
|
||||
|
||||
@property
|
||||
def mu(self):
|
||||
return self.pi and self.tau / self.pi
|
||||
|
||||
@property
|
||||
def sigma(self):
|
||||
return math.sqrt(1 / self.pi) if self.pi else inf
|
||||
|
||||
def __mul__(self, other):
|
||||
pi, tau = self.pi + other.pi, self.tau + other.tau
|
||||
return Gaussian(pi=pi, tau=tau)
|
||||
|
||||
def __truediv__(self, other):
|
||||
pi, tau = self.pi - other.pi, self.tau - other.tau
|
||||
return Gaussian(pi=pi, tau=tau)
|
||||
|
||||
__div__ = __truediv__ # for Python 2
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.pi == other.pi and self.tau == other.tau
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.mu < other.mu
|
||||
|
||||
def __le__(self, other):
|
||||
return self.mu <= other.mu
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.mu > other.mu
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.mu >= other.mu
|
||||
|
||||
def __repr__(self):
|
||||
return 'N(mu={:.3f}, sigma={:.3f})'.format(self.mu, self.sigma)
|
||||
|
||||
def _repr_latex_(self):
|
||||
latex = r'\mathcal{{ N }}( {:.3f}, {:.3f}^2 )'.format(self.mu, self.sigma)
|
||||
return '$%s$' % latex
|
||||
|
||||
class Matrix(list):
|
||||
def __init__(self, src, height=None, width=None):
|
||||
if callable(src):
|
||||
f, src = src, {}
|
||||
size = [height, width]
|
||||
if not height:
|
||||
def set_height(height):
|
||||
size[0] = height
|
||||
size[0] = set_height
|
||||
if not width:
|
||||
def set_width(width):
|
||||
size[1] = width
|
||||
size[1] = set_width
|
||||
try:
|
||||
for (r, c), val in f(*size):
|
||||
src[r, c] = val
|
||||
except TypeError:
|
||||
raise TypeError('A callable src must return an interable '
|
||||
'which generates a tuple containing '
|
||||
'coordinate and value')
|
||||
height, width = tuple(size)
|
||||
if height is None or width is None:
|
||||
raise TypeError('A callable src must call set_height and '
|
||||
'set_width if the size is non-deterministic')
|
||||
if isinstance(src, list):
|
||||
is_number = lambda x: isinstance(x, Number)
|
||||
unique_col_sizes = set(map(len, src))
|
||||
everything_are_number = filter(is_number, sum(src, []))
|
||||
if len(unique_col_sizes) != 1 or not everything_are_number:
|
||||
raise ValueError('src must be a rectangular array of numbers')
|
||||
two_dimensional_array = src
|
||||
elif isinstance(src, dict):
|
||||
if not height or not width:
|
||||
w = h = 0
|
||||
for r, c in iterkeys(src):
|
||||
if not height:
|
||||
h = max(h, r + 1)
|
||||
if not width:
|
||||
w = max(w, c + 1)
|
||||
if not height:
|
||||
height = h
|
||||
if not width:
|
||||
width = w
|
||||
two_dimensional_array = []
|
||||
for r in range(height):
|
||||
row = []
|
||||
two_dimensional_array.append(row)
|
||||
for c in range(width):
|
||||
row.append(src.get((r, c), 0))
|
||||
else:
|
||||
raise TypeError('src must be a list or dict or callable')
|
||||
super(Matrix, self).__init__(two_dimensional_array)
|
||||
|
||||
@property
|
||||
def height(self):
|
||||
return len(self)
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return len(self[0])
|
||||
|
||||
def transpose(self):
|
||||
height, width = self.height, self.width
|
||||
src = {}
|
||||
for c in range(width):
|
||||
for r in range(height):
|
||||
src[c, r] = self[r][c]
|
||||
return type(self)(src, height=width, width=height)
|
||||
|
||||
def minor(self, row_n, col_n):
|
||||
height, width = self.height, self.width
|
||||
if not (0 <= row_n < height):
|
||||
raise ValueError('row_n should be between 0 and %d' % height)
|
||||
elif not (0 <= col_n < width):
|
||||
raise ValueError('col_n should be between 0 and %d' % width)
|
||||
two_dimensional_array = []
|
||||
for r in range(height):
|
||||
if r == row_n:
|
||||
continue
|
||||
row = []
|
||||
two_dimensional_array.append(row)
|
||||
for c in range(width):
|
||||
if c == col_n:
|
||||
continue
|
||||
row.append(self[r][c])
|
||||
return type(self)(two_dimensional_array)
|
||||
|
||||
def determinant(self):
|
||||
height, width = self.height, self.width
|
||||
if height != width:
|
||||
raise ValueError('Only square matrix can calculate a determinant')
|
||||
tmp, rv = copy.deepcopy(self), 1.
|
||||
for c in range(width - 1, 0, -1):
|
||||
pivot, r = max((abs(tmp[r][c]), r) for r in range(c + 1))
|
||||
pivot = tmp[r][c]
|
||||
if not pivot:
|
||||
return 0.
|
||||
tmp[r], tmp[c] = tmp[c], tmp[r]
|
||||
if r != c:
|
||||
rv = -rv
|
||||
rv *= pivot
|
||||
fact = -1. / pivot
|
||||
for r in range(c):
|
||||
f = fact * tmp[r][c]
|
||||
for x in range(c):
|
||||
tmp[r][x] += f * tmp[c][x]
|
||||
return rv * tmp[0][0]
|
||||
|
||||
def adjugate(self):
|
||||
height, width = self.height, self.width
|
||||
if height != width:
|
||||
raise ValueError('Only square matrix can be adjugated')
|
||||
if height == 2:
|
||||
a, b = self[0][0], self[0][1]
|
||||
c, d = self[1][0], self[1][1]
|
||||
return type(self)([[d, -b], [-c, a]])
|
||||
src = {}
|
||||
for r in range(height):
|
||||
for c in range(width):
|
||||
sign = -1 if (r + c) % 2 else 1
|
||||
src[r, c] = self.minor(r, c).determinant() * sign
|
||||
return type(self)(src, height, width)
|
||||
|
||||
def inverse(self):
|
||||
if self.height == self.width == 1:
|
||||
return type(self)([[1. / self[0][0]]])
|
||||
return (1. / self.determinant()) * self.adjugate()
|
||||
|
||||
def __add__(self, other):
|
||||
height, width = self.height, self.width
|
||||
if (height, width) != (other.height, other.width):
|
||||
raise ValueError('Must be same size')
|
||||
src = {}
|
||||
for r in range(height):
|
||||
for c in range(width):
|
||||
src[r, c] = self[r][c] + other[r][c]
|
||||
return type(self)(src, height, width)
|
||||
|
||||
def __mul__(self, other):
|
||||
if self.width != other.height:
|
||||
raise ValueError('Bad size')
|
||||
height, width = self.height, other.width
|
||||
src = {}
|
||||
for r in range(height):
|
||||
for c in range(width):
|
||||
src[r, c] = sum(self[r][x] * other[x][c]
|
||||
for x in range(self.width))
|
||||
return type(self)(src, height, width)
|
||||
|
||||
def __rmul__(self, other):
|
||||
if not isinstance(other, Number):
|
||||
raise TypeError('The operand should be a number')
|
||||
height, width = self.height, self.width
|
||||
src = {}
|
||||
for r in range(height):
|
||||
for c in range(width):
|
||||
src[r, c] = other * self[r][c]
|
||||
return type(self)(src, height, width)
|
||||
|
||||
def __repr__(self):
|
||||
return '{}({})'.format(type(self).__name__, super(Matrix, self).__repr__())
|
||||
|
||||
def _repr_latex_(self):
|
||||
rows = [' && '.join(['%.3f' % cell for cell in row]) for row in self]
|
||||
latex = r'\begin{matrix} %s \end{matrix}' % r'\\'.join(rows)
|
||||
return '$%s$' % latex
|
||||
|
||||
def _gen_erfcinv(erfc, math=math):
|
||||
def erfcinv(y):
|
||||
"""The inverse function of erfc."""
|
||||
if y >= 2:
|
||||
return -100.
|
||||
elif y <= 0:
|
||||
return 100.
|
||||
zero_point = y < 1
|
||||
if not zero_point:
|
||||
y = 2 - y
|
||||
t = math.sqrt(-2 * math.log(y / 2.))
|
||||
x = -0.70711 * \
|
||||
((2.30753 + t * 0.27061) / (1. + t * (0.99229 + t * 0.04481)) - t)
|
||||
for i in range(2):
|
||||
err = erfc(x) - y
|
||||
x += err / (1.12837916709551257 * math.exp(-(x ** 2)) - x * err)
|
||||
return x if zero_point else -x
|
||||
return erfcinv
|
||||
|
||||
def _gen_ppf(erfc, math=math):
|
||||
erfcinv = _gen_erfcinv(erfc, math)
|
||||
def ppf(x, mu=0, sigma=1):
|
||||
return mu - sigma * math.sqrt(2) * erfcinv(2 * x)
|
||||
return ppf
|
||||
|
||||
def erfc(x):
|
||||
z = abs(x)
|
||||
t = 1. / (1. + z / 2.)
|
||||
r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
|
||||
0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
|
||||
0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
|
||||
-0.82215223 + t * 0.17087277
|
||||
)))
|
||||
)))
|
||||
)))
|
||||
return 2. - r if x < 0 else r
|
||||
|
||||
def cdf(x, mu=0, sigma=1):
|
||||
return 0.5 * erfc(-(x - mu) / (sigma * math.sqrt(2)))
|
||||
|
||||
|
||||
def pdf(x, mu=0, sigma=1):
|
||||
return (1 / math.sqrt(2 * math.pi) * abs(sigma) *
|
||||
math.exp(-(((x - mu) / abs(sigma)) ** 2 / 2)))
|
||||
|
||||
ppf = _gen_ppf(erfc)
|
||||
|
||||
def choose_backend(backend):
|
||||
if backend is None: # fallback
|
||||
return cdf, pdf, ppf
|
||||
elif backend == 'mpmath':
|
||||
try:
|
||||
import mpmath
|
||||
except ImportError:
|
||||
raise ImportError('Install "mpmath" to use this backend')
|
||||
return mpmath.ncdf, mpmath.npdf, _gen_ppf(mpmath.erfc, math=mpmath)
|
||||
elif backend == 'scipy':
|
||||
try:
|
||||
from scipy.stats import norm
|
||||
except ImportError:
|
||||
raise ImportError('Install "scipy" to use this backend')
|
||||
return norm.cdf, norm.pdf, norm.ppf
|
||||
raise ValueError('%r backend is not defined' % backend)
|
||||
|
||||
def available_backends():
|
||||
backends = [None]
|
||||
for backend in ['mpmath', 'scipy']:
|
||||
try:
|
||||
__import__(backend)
|
||||
except ImportError:
|
||||
continue
|
||||
backends.append(backend)
|
||||
return backends
|
||||
|
||||
class Node(object):
|
||||
|
||||
pass
|
||||
|
||||
class Variable(Node, Gaussian):
|
||||
|
||||
def __init__(self):
|
||||
self.messages = {}
|
||||
super(Variable, self).__init__()
|
||||
|
||||
def set(self, val):
|
||||
delta = self.delta(val)
|
||||
self.pi, self.tau = val.pi, val.tau
|
||||
return delta
|
||||
|
||||
def delta(self, other):
|
||||
pi_delta = abs(self.pi - other.pi)
|
||||
if pi_delta == inf:
|
||||
return 0.
|
||||
return max(abs(self.tau - other.tau), math.sqrt(pi_delta))
|
||||
|
||||
def update_message(self, factor, pi=0, tau=0, message=None):
|
||||
message = message or Gaussian(pi=pi, tau=tau)
|
||||
old_message, self[factor] = self[factor], message
|
||||
return self.set(self / old_message * message)
|
||||
|
||||
def update_value(self, factor, pi=0, tau=0, value=None):
|
||||
value = value or Gaussian(pi=pi, tau=tau)
|
||||
old_message = self[factor]
|
||||
self[factor] = value * old_message / self
|
||||
return self.set(value)
|
||||
|
||||
def __getitem__(self, factor):
|
||||
return self.messages[factor]
|
||||
|
||||
def __setitem__(self, factor, message):
|
||||
self.messages[factor] = message
|
||||
|
||||
def __repr__(self):
|
||||
args = (type(self).__name__, super(Variable, self).__repr__(),
|
||||
len(self.messages), '' if len(self.messages) == 1 else 's')
|
||||
return '<%s %s with %d connection%s>' % args
|
||||
|
||||
|
||||
class Factor(Node):
|
||||
|
||||
def __init__(self, variables):
|
||||
self.vars = variables
|
||||
for var in variables:
|
||||
var[self] = Gaussian()
|
||||
|
||||
def down(self):
|
||||
return 0
|
||||
|
||||
def up(self):
|
||||
return 0
|
||||
|
||||
@property
|
||||
def var(self):
|
||||
assert len(self.vars) == 1
|
||||
return self.vars[0]
|
||||
|
||||
def __repr__(self):
|
||||
args = (type(self).__name__, len(self.vars),
|
||||
'' if len(self.vars) == 1 else 's')
|
||||
return '<%s with %d connection%s>' % args
|
||||
|
||||
|
||||
class PriorFactor(Factor):
|
||||
|
||||
def __init__(self, var, val, dynamic=0):
|
||||
super(PriorFactor, self).__init__([var])
|
||||
self.val = val
|
||||
self.dynamic = dynamic
|
||||
|
||||
def down(self):
|
||||
sigma = math.sqrt(self.val.sigma ** 2 + self.dynamic ** 2)
|
||||
value = Gaussian(self.val.mu, sigma)
|
||||
return self.var.update_value(self, value=value)
|
||||
|
||||
|
||||
class LikelihoodFactor(Factor):
|
||||
|
||||
def __init__(self, mean_var, value_var, variance):
|
||||
super(LikelihoodFactor, self).__init__([mean_var, value_var])
|
||||
self.mean = mean_var
|
||||
self.value = value_var
|
||||
self.variance = variance
|
||||
|
||||
def calc_a(self, var):
|
||||
return 1. / (1. + self.variance * var.pi)
|
||||
|
||||
def down(self):
|
||||
# update value.
|
||||
msg = self.mean / self.mean[self]
|
||||
a = self.calc_a(msg)
|
||||
return self.value.update_message(self, a * msg.pi, a * msg.tau)
|
||||
|
||||
def up(self):
|
||||
# update mean.
|
||||
msg = self.value / self.value[self]
|
||||
a = self.calc_a(msg)
|
||||
return self.mean.update_message(self, a * msg.pi, a * msg.tau)
|
||||
|
||||
|
||||
class SumFactor(Factor):
|
||||
|
||||
def __init__(self, sum_var, term_vars, coeffs):
|
||||
super(SumFactor, self).__init__([sum_var] + term_vars)
|
||||
self.sum = sum_var
|
||||
self.terms = term_vars
|
||||
self.coeffs = coeffs
|
||||
|
||||
def down(self):
|
||||
vals = self.terms
|
||||
msgs = [var[self] for var in vals]
|
||||
return self.update(self.sum, vals, msgs, self.coeffs)
|
||||
|
||||
def up(self, index=0):
|
||||
coeff = self.coeffs[index]
|
||||
coeffs = []
|
||||
for x, c in enumerate(self.coeffs):
|
||||
try:
|
||||
if x == index:
|
||||
coeffs.append(1. / coeff)
|
||||
else:
|
||||
coeffs.append(-c / coeff)
|
||||
except ZeroDivisionError:
|
||||
coeffs.append(0.)
|
||||
vals = self.terms[:]
|
||||
vals[index] = self.sum
|
||||
msgs = [var[self] for var in vals]
|
||||
return self.update(self.terms[index], vals, msgs, coeffs)
|
||||
|
||||
def update(self, var, vals, msgs, coeffs):
|
||||
pi_inv = 0
|
||||
mu = 0
|
||||
for val, msg, coeff in zip(vals, msgs, coeffs):
|
||||
div = val / msg
|
||||
mu += coeff * div.mu
|
||||
if pi_inv == inf:
|
||||
continue
|
||||
try:
|
||||
# numpy.float64 handles floating-point error by different way.
|
||||
# For example, it can just warn RuntimeWarning on n/0 problem
|
||||
# instead of throwing ZeroDivisionError. So div.pi, the
|
||||
# denominator has to be a built-in float.
|
||||
pi_inv += coeff ** 2 / float(div.pi)
|
||||
except ZeroDivisionError:
|
||||
pi_inv = inf
|
||||
pi = 1. / pi_inv
|
||||
tau = pi * mu
|
||||
return var.update_message(self, pi, tau)
|
||||
|
||||
|
||||
class TruncateFactor(Factor):
|
||||
|
||||
def __init__(self, var, v_func, w_func, draw_margin):
|
||||
super(TruncateFactor, self).__init__([var])
|
||||
self.v_func = v_func
|
||||
self.w_func = w_func
|
||||
self.draw_margin = draw_margin
|
||||
|
||||
def up(self):
|
||||
val = self.var
|
||||
msg = self.var[self]
|
||||
div = val / msg
|
||||
sqrt_pi = math.sqrt(div.pi)
|
||||
args = (div.tau / sqrt_pi, self.draw_margin * sqrt_pi)
|
||||
v = self.v_func(*args)
|
||||
w = self.w_func(*args)
|
||||
denom = (1. - w)
|
||||
pi, tau = div.pi / denom, (div.tau + sqrt_pi * v) / denom
|
||||
return val.update_value(self, pi, tau)
|
||||
|
||||
#: Default initial mean of ratings.
|
||||
MU = 25.
|
||||
#: Default initial standard deviation of ratings.
|
||||
SIGMA = MU / 3
|
||||
#: Default distance that guarantees about 76% chance of winning.
|
||||
BETA = SIGMA / 2
|
||||
#: Default dynamic factor.
|
||||
TAU = SIGMA / 100
|
||||
#: Default draw probability of the game.
|
||||
DRAW_PROBABILITY = .10
|
||||
#: A basis to check reliability of the result.
|
||||
DELTA = 0.0001
|
||||
|
||||
|
||||
def calc_draw_probability(draw_margin, size, env=None):
|
||||
if env is None:
|
||||
env = global_env()
|
||||
return 2 * env.cdf(draw_margin / (math.sqrt(size) * env.beta)) - 1
|
||||
|
||||
|
||||
def calc_draw_margin(draw_probability, size, env=None):
|
||||
if env is None:
|
||||
env = global_env()
|
||||
return env.ppf((draw_probability + 1) / 2.) * math.sqrt(size) * env.beta
|
||||
|
||||
|
||||
def _team_sizes(rating_groups):
|
||||
team_sizes = [0]
|
||||
for group in rating_groups:
|
||||
team_sizes.append(len(group) + team_sizes[-1])
|
||||
del team_sizes[0]
|
||||
return team_sizes
|
||||
|
||||
|
||||
def _floating_point_error(env):
|
||||
if env.backend == 'mpmath':
|
||||
msg = 'Set "mpmath.mp.dps" to higher'
|
||||
else:
|
||||
msg = 'Cannot calculate correctly, set backend to "mpmath"'
|
||||
return FloatingPointError(msg)
|
||||
|
||||
|
||||
class Rating(Gaussian):
|
||||
def __init__(self, mu=None, sigma=None):
|
||||
if isinstance(mu, tuple):
|
||||
mu, sigma = mu
|
||||
elif isinstance(mu, Gaussian):
|
||||
mu, sigma = mu.mu, mu.sigma
|
||||
if mu is None:
|
||||
mu = global_env().mu
|
||||
if sigma is None:
|
||||
sigma = global_env().sigma
|
||||
super(Rating, self).__init__(mu, sigma)
|
||||
|
||||
def __int__(self):
|
||||
return int(self.mu)
|
||||
|
||||
def __long__(self):
|
||||
return long(self.mu)
|
||||
|
||||
def __float__(self):
|
||||
return float(self.mu)
|
||||
|
||||
def __iter__(self):
|
||||
return iter((self.mu, self.sigma))
|
||||
|
||||
def __repr__(self):
|
||||
c = type(self)
|
||||
args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma)
|
||||
return '%s(mu=%.3f, sigma=%.3f)' % args
|
||||
|
||||
|
||||
class TrueSkill(object):
|
||||
def __init__(self, mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
|
||||
draw_probability=DRAW_PROBABILITY, backend=None):
|
||||
self.mu = mu
|
||||
self.sigma = sigma
|
||||
self.beta = beta
|
||||
self.tau = tau
|
||||
self.draw_probability = draw_probability
|
||||
self.backend = backend
|
||||
if isinstance(backend, tuple):
|
||||
self.cdf, self.pdf, self.ppf = backend
|
||||
else:
|
||||
self.cdf, self.pdf, self.ppf = choose_backend(backend)
|
||||
|
||||
def create_rating(self, mu=None, sigma=None):
|
||||
if mu is None:
|
||||
mu = self.mu
|
||||
if sigma is None:
|
||||
sigma = self.sigma
|
||||
return Rating(mu, sigma)
|
||||
|
||||
def v_win(self, diff, draw_margin):
|
||||
x = diff - draw_margin
|
||||
denom = self.cdf(x)
|
||||
return (self.pdf(x) / denom) if denom else -x
|
||||
|
||||
def v_draw(self, diff, draw_margin):
|
||||
abs_diff = abs(diff)
|
||||
a, b = draw_margin - abs_diff, -draw_margin - abs_diff
|
||||
denom = self.cdf(a) - self.cdf(b)
|
||||
numer = self.pdf(b) - self.pdf(a)
|
||||
return ((numer / denom) if denom else a) * (-1 if diff < 0 else +1)
|
||||
|
||||
def w_win(self, diff, draw_margin):
|
||||
x = diff - draw_margin
|
||||
v = self.v_win(diff, draw_margin)
|
||||
w = v * (v + x)
|
||||
if 0 < w < 1:
|
||||
return w
|
||||
raise _floating_point_error(self)
|
||||
|
||||
def w_draw(self, diff, draw_margin):
|
||||
abs_diff = abs(diff)
|
||||
a, b = draw_margin - abs_diff, -draw_margin - abs_diff
|
||||
denom = self.cdf(a) - self.cdf(b)
|
||||
if not denom:
|
||||
raise _floating_point_error(self)
|
||||
v = self.v_draw(abs_diff, draw_margin)
|
||||
return (v ** 2) + (a * self.pdf(a) - b * self.pdf(b)) / denom
|
||||
|
||||
def validate_rating_groups(self, rating_groups):
|
||||
# check group sizes
|
||||
if len(rating_groups) < 2:
|
||||
raise ValueError('Need multiple rating groups')
|
||||
elif not all(rating_groups):
|
||||
raise ValueError('Each group must contain multiple ratings')
|
||||
# check group types
|
||||
group_types = set(map(type, rating_groups))
|
||||
if len(group_types) != 1:
|
||||
raise TypeError('All groups should be same type')
|
||||
elif group_types.pop() is Rating:
|
||||
raise TypeError('Rating cannot be a rating group')
|
||||
# normalize rating_groups
|
||||
if isinstance(rating_groups[0], dict):
|
||||
dict_rating_groups = rating_groups
|
||||
rating_groups = []
|
||||
keys = []
|
||||
for dict_rating_group in dict_rating_groups:
|
||||
rating_group, key_group = [], []
|
||||
for key, rating in iteritems(dict_rating_group):
|
||||
rating_group.append(rating)
|
||||
key_group.append(key)
|
||||
rating_groups.append(tuple(rating_group))
|
||||
keys.append(tuple(key_group))
|
||||
else:
|
||||
rating_groups = list(rating_groups)
|
||||
keys = None
|
||||
return rating_groups, keys
|
||||
|
||||
def validate_weights(self, weights, rating_groups, keys=None):
|
||||
if weights is None:
|
||||
weights = [(1,) * len(g) for g in rating_groups]
|
||||
elif isinstance(weights, dict):
|
||||
weights_dict, weights = weights, []
|
||||
for x, group in enumerate(rating_groups):
|
||||
w = []
|
||||
weights.append(w)
|
||||
for y, rating in enumerate(group):
|
||||
if keys is not None:
|
||||
y = keys[x][y]
|
||||
w.append(weights_dict.get((x, y), 1))
|
||||
return weights
|
||||
|
||||
def factor_graph_builders(self, rating_groups, ranks, weights):
|
||||
flatten_ratings = sum(map(tuple, rating_groups), ())
|
||||
flatten_weights = sum(map(tuple, weights), ())
|
||||
size = len(flatten_ratings)
|
||||
group_size = len(rating_groups)
|
||||
# create variables
|
||||
rating_vars = [Variable() for x in range(size)]
|
||||
perf_vars = [Variable() for x in range(size)]
|
||||
team_perf_vars = [Variable() for x in range(group_size)]
|
||||
team_diff_vars = [Variable() for x in range(group_size - 1)]
|
||||
team_sizes = _team_sizes(rating_groups)
|
||||
# layer builders
|
||||
def build_rating_layer():
|
||||
for rating_var, rating in zip(rating_vars, flatten_ratings):
|
||||
yield PriorFactor(rating_var, rating, self.tau)
|
||||
def build_perf_layer():
|
||||
for rating_var, perf_var in zip(rating_vars, perf_vars):
|
||||
yield LikelihoodFactor(rating_var, perf_var, self.beta ** 2)
|
||||
def build_team_perf_layer():
|
||||
for team, team_perf_var in enumerate(team_perf_vars):
|
||||
if team > 0:
|
||||
start = team_sizes[team - 1]
|
||||
else:
|
||||
start = 0
|
||||
end = team_sizes[team]
|
||||
child_perf_vars = perf_vars[start:end]
|
||||
coeffs = flatten_weights[start:end]
|
||||
yield SumFactor(team_perf_var, child_perf_vars, coeffs)
|
||||
def build_team_diff_layer():
|
||||
for team, team_diff_var in enumerate(team_diff_vars):
|
||||
yield SumFactor(team_diff_var,
|
||||
team_perf_vars[team:team + 2], [+1, -1])
|
||||
def build_trunc_layer():
|
||||
for x, team_diff_var in enumerate(team_diff_vars):
|
||||
if callable(self.draw_probability):
|
||||
# dynamic draw probability
|
||||
team_perf1, team_perf2 = team_perf_vars[x:x + 2]
|
||||
args = (Rating(team_perf1), Rating(team_perf2), self)
|
||||
draw_probability = self.draw_probability(*args)
|
||||
else:
|
||||
# static draw probability
|
||||
draw_probability = self.draw_probability
|
||||
size = sum(map(len, rating_groups[x:x + 2]))
|
||||
draw_margin = calc_draw_margin(draw_probability, size, self)
|
||||
if ranks[x] == ranks[x + 1]: # is a tie?
|
||||
v_func, w_func = self.v_draw, self.w_draw
|
||||
else:
|
||||
v_func, w_func = self.v_win, self.w_win
|
||||
yield TruncateFactor(team_diff_var,
|
||||
v_func, w_func, draw_margin)
|
||||
# build layers
|
||||
return (build_rating_layer, build_perf_layer, build_team_perf_layer,
|
||||
build_team_diff_layer, build_trunc_layer)
|
||||
|
||||
def run_schedule(self, build_rating_layer, build_perf_layer,
|
||||
build_team_perf_layer, build_team_diff_layer,
|
||||
build_trunc_layer, min_delta=DELTA):
|
||||
if min_delta <= 0:
|
||||
raise ValueError('min_delta must be greater than 0')
|
||||
layers = []
|
||||
def build(builders):
|
||||
layers_built = [list(build()) for build in builders]
|
||||
layers.extend(layers_built)
|
||||
return layers_built
|
||||
# gray arrows
|
||||
layers_built = build([build_rating_layer,
|
||||
build_perf_layer,
|
||||
build_team_perf_layer])
|
||||
rating_layer, perf_layer, team_perf_layer = layers_built
|
||||
for f in chain(*layers_built):
|
||||
f.down()
|
||||
# arrow #1, #2, #3
|
||||
team_diff_layer, trunc_layer = build([build_team_diff_layer,
|
||||
build_trunc_layer])
|
||||
team_diff_len = len(team_diff_layer)
|
||||
for x in range(10):
|
||||
if team_diff_len == 1:
|
||||
# only two teams
|
||||
team_diff_layer[0].down()
|
||||
delta = trunc_layer[0].up()
|
||||
else:
|
||||
# multiple teams
|
||||
delta = 0
|
||||
for x in range(team_diff_len - 1):
|
||||
team_diff_layer[x].down()
|
||||
delta = max(delta, trunc_layer[x].up())
|
||||
team_diff_layer[x].up(1) # up to right variable
|
||||
for x in range(team_diff_len - 1, 0, -1):
|
||||
team_diff_layer[x].down()
|
||||
delta = max(delta, trunc_layer[x].up())
|
||||
team_diff_layer[x].up(0) # up to left variable
|
||||
# repeat until to small update
|
||||
if delta <= min_delta:
|
||||
break
|
||||
# up both ends
|
||||
team_diff_layer[0].up(0)
|
||||
team_diff_layer[team_diff_len - 1].up(1)
|
||||
# up the remainder of the black arrows
|
||||
for f in team_perf_layer:
|
||||
for x in range(len(f.vars) - 1):
|
||||
f.up(x)
|
||||
for f in perf_layer:
|
||||
f.up()
|
||||
return layers
|
||||
|
||||
def rate(self, rating_groups, ranks=None, weights=None, min_delta=DELTA):
|
||||
rating_groups, keys = self.validate_rating_groups(rating_groups)
|
||||
weights = self.validate_weights(weights, rating_groups, keys)
|
||||
group_size = len(rating_groups)
|
||||
if ranks is None:
|
||||
ranks = range(group_size)
|
||||
elif len(ranks) != group_size:
|
||||
raise ValueError('Wrong ranks')
|
||||
# sort rating groups by rank
|
||||
by_rank = lambda x: x[1][1]
|
||||
sorting = sorted(enumerate(zip(rating_groups, ranks, weights)),
|
||||
key=by_rank)
|
||||
sorted_rating_groups, sorted_ranks, sorted_weights = [], [], []
|
||||
for x, (g, r, w) in sorting:
|
||||
sorted_rating_groups.append(g)
|
||||
sorted_ranks.append(r)
|
||||
# make weights to be greater than 0
|
||||
sorted_weights.append(max(min_delta, w_) for w_ in w)
|
||||
# build factor graph
|
||||
args = (sorted_rating_groups, sorted_ranks, sorted_weights)
|
||||
builders = self.factor_graph_builders(*args)
|
||||
args = builders + (min_delta,)
|
||||
layers = self.run_schedule(*args)
|
||||
# make result
|
||||
rating_layer, team_sizes = layers[0], _team_sizes(sorted_rating_groups)
|
||||
transformed_groups = []
|
||||
for start, end in zip([0] + team_sizes[:-1], team_sizes):
|
||||
group = []
|
||||
for f in rating_layer[start:end]:
|
||||
group.append(Rating(float(f.var.mu), float(f.var.sigma)))
|
||||
transformed_groups.append(tuple(group))
|
||||
by_hint = lambda x: x[0]
|
||||
unsorting = sorted(zip((x for x, __ in sorting), transformed_groups),
|
||||
key=by_hint)
|
||||
if keys is None:
|
||||
return [g for x, g in unsorting]
|
||||
# restore the structure with input dictionary keys
|
||||
return [dict(zip(keys[x], g)) for x, g in unsorting]
|
||||
|
||||
def quality(self, rating_groups, weights=None):
|
||||
rating_groups, keys = self.validate_rating_groups(rating_groups)
|
||||
weights = self.validate_weights(weights, rating_groups, keys)
|
||||
flatten_ratings = sum(map(tuple, rating_groups), ())
|
||||
flatten_weights = sum(map(tuple, weights), ())
|
||||
length = len(flatten_ratings)
|
||||
# a vector of all of the skill means
|
||||
mean_matrix = Matrix([[r.mu] for r in flatten_ratings])
|
||||
# a matrix whose diagonal values are the variances (sigma ** 2) of each
|
||||
# of the players.
|
||||
def variance_matrix(height, width):
|
||||
variances = (r.sigma ** 2 for r in flatten_ratings)
|
||||
for x, variance in enumerate(variances):
|
||||
yield (x, x), variance
|
||||
variance_matrix = Matrix(variance_matrix, length, length)
|
||||
# the player-team assignment and comparison matrix
|
||||
def rotated_a_matrix(set_height, set_width):
|
||||
t = 0
|
||||
for r, (cur, _next) in enumerate(zip(rating_groups[:-1],
|
||||
rating_groups[1:])):
|
||||
for x in range(t, t + len(cur)):
|
||||
yield (r, x), flatten_weights[x]
|
||||
t += 1
|
||||
x += 1
|
||||
for x in range(x, x + len(_next)):
|
||||
yield (r, x), -flatten_weights[x]
|
||||
set_height(r + 1)
|
||||
set_width(x + 1)
|
||||
rotated_a_matrix = Matrix(rotated_a_matrix)
|
||||
a_matrix = rotated_a_matrix.transpose()
|
||||
# match quality further derivation
|
||||
_ata = (self.beta ** 2) * rotated_a_matrix * a_matrix
|
||||
_atsa = rotated_a_matrix * variance_matrix * a_matrix
|
||||
start = mean_matrix.transpose() * a_matrix
|
||||
middle = _ata + _atsa
|
||||
end = rotated_a_matrix * mean_matrix
|
||||
# make result
|
||||
e_arg = (-0.5 * start * middle.inverse() * end).determinant()
|
||||
s_arg = _ata.determinant() / middle.determinant()
|
||||
return math.exp(e_arg) * math.sqrt(s_arg)
|
||||
|
||||
def expose(self, rating):
|
||||
k = self.mu / self.sigma
|
||||
return rating.mu - k * rating.sigma
|
||||
|
||||
def make_as_global(self):
|
||||
return setup(env=self)
|
||||
|
||||
def __repr__(self):
|
||||
c = type(self)
|
||||
if callable(self.draw_probability):
|
||||
f = self.draw_probability
|
||||
draw_probability = '.'.join([f.__module__, f.__name__])
|
||||
else:
|
||||
draw_probability = '%.1f%%' % (self.draw_probability * 100)
|
||||
if self.backend is None:
|
||||
backend = ''
|
||||
elif isinstance(self.backend, tuple):
|
||||
backend = ', backend=...'
|
||||
else:
|
||||
backend = ', backend=%r' % self.backend
|
||||
args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma,
|
||||
self.beta, self.tau, draw_probability, backend)
|
||||
return ('%s(mu=%.3f, sigma=%.3f, beta=%.3f, tau=%.3f, '
|
||||
'draw_probability=%s%s)' % args)
|
||||
|
||||
|
||||
def rate_1vs1(rating1, rating2, drawn=False, min_delta=DELTA, env=None):
|
||||
if env is None:
|
||||
env = global_env()
|
||||
ranks = [0, 0 if drawn else 1]
|
||||
teams = env.rate([(rating1,), (rating2,)], ranks, min_delta=min_delta)
|
||||
return teams[0][0], teams[1][0]
|
||||
|
||||
|
||||
def quality_1vs1(rating1, rating2, env=None):
|
||||
if env is None:
|
||||
env = global_env()
|
||||
return env.quality([(rating1,), (rating2,)])
|
||||
|
||||
|
||||
def global_env():
|
||||
try:
|
||||
global_env.__trueskill__
|
||||
except AttributeError:
|
||||
# setup the default environment
|
||||
setup()
|
||||
return global_env.__trueskill__
|
||||
|
||||
|
||||
def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU,
|
||||
draw_probability=DRAW_PROBABILITY, backend=None, env=None):
|
||||
if env is None:
|
||||
env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend)
|
||||
global_env.__trueskill__ = env
|
||||
return env
|
||||
|
||||
|
||||
def rate(rating_groups, ranks=None, weights=None, min_delta=DELTA):
|
||||
return global_env().rate(rating_groups, ranks, weights, min_delta)
|
||||
|
||||
|
||||
def quality(rating_groups, weights=None):
|
||||
return global_env().quality(rating_groups, weights)
|
||||
|
||||
|
||||
def expose(rating):
|
||||
return global_env().expose(rating)
|
220
analysis-master/tra_analysis/regression.py
Normal file
@@ -0,0 +1,220 @@
|
||||
# Titan Robotics Team 2022: CUDA-based Regressions Module
|
||||
# Written by Arthur Lu & Jacob Levine
|
||||
# Notes:
|
||||
# this module has been automatically inegrated into analysis.py, and should be callable as a class from the package
|
||||
# this module is cuda-optimized and vectorized (except for one small part)
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0.004"
|
||||
|
||||
# changelog should be viewed using print(analysis.regression.__changelog__)
|
||||
__changelog__ = """
|
||||
1.0.0.004:
|
||||
- bug fixes
|
||||
- fixed changelog
|
||||
1.0.0.003:
|
||||
- bug fixes
|
||||
1.0.0.002:
|
||||
-Added more parameters to log, exponential, polynomial
|
||||
-Added SigmoidalRegKernelArthur, because Arthur apparently needs
|
||||
to train the scaling and shifting of sigmoids
|
||||
1.0.0.001:
|
||||
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
|
||||
-already vectorized (except for polynomial generation) and CUDA-optimized
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
"Arthur Lu <learthurgo@gmail.com>"
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'factorial',
|
||||
'take_all_pwrs',
|
||||
'num_poly_terms',
|
||||
'set_device',
|
||||
'LinearRegKernel',
|
||||
'SigmoidalRegKernel',
|
||||
'LogRegKernel',
|
||||
'PolyRegKernel',
|
||||
'ExpRegKernel',
|
||||
'SigmoidalRegKernelArthur',
|
||||
'SGDTrain',
|
||||
'CustomTrain'
|
||||
]
|
||||
|
||||
import torch
|
||||
|
||||
global device
|
||||
|
||||
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"
|
||||
|
||||
#todo: document completely
|
||||
|
||||
def set_device(self, new_device):
|
||||
device=new_device
|
||||
|
||||
class LinearRegKernel():
|
||||
parameters= []
|
||||
weights=None
|
||||
bias=None
|
||||
def __init__(self, num_vars):
|
||||
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
|
||||
self.bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.bias]
|
||||
def forward(self,mtx):
|
||||
long_bias=self.bias.repeat([1,mtx.size()[1]])
|
||||
return torch.matmul(self.weights,mtx)+long_bias
|
||||
|
||||
class SigmoidalRegKernel():
|
||||
parameters= []
|
||||
weights=None
|
||||
bias=None
|
||||
sigmoid=torch.nn.Sigmoid()
|
||||
def __init__(self, num_vars):
|
||||
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
|
||||
self.bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.bias]
|
||||
def forward(self,mtx):
|
||||
long_bias=self.bias.repeat([1,mtx.size()[1]])
|
||||
return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)
|
||||
|
||||
class SigmoidalRegKernelArthur():
|
||||
parameters= []
|
||||
weights=None
|
||||
in_bias=None
|
||||
scal_mult=None
|
||||
out_bias=None
|
||||
sigmoid=torch.nn.Sigmoid()
|
||||
def __init__(self, num_vars):
|
||||
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
|
||||
self.in_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
|
||||
self.out_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
|
||||
def forward(self,mtx):
|
||||
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
|
||||
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
|
||||
return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
|
||||
|
||||
class LogRegKernel():
|
||||
parameters= []
|
||||
weights=None
|
||||
in_bias=None
|
||||
scal_mult=None
|
||||
out_bias=None
|
||||
def __init__(self, num_vars):
|
||||
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
|
||||
self.in_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
|
||||
self.out_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
|
||||
def forward(self,mtx):
|
||||
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
|
||||
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
|
||||
return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
|
||||
|
||||
class ExpRegKernel():
|
||||
parameters= []
|
||||
weights=None
|
||||
in_bias=None
|
||||
scal_mult=None
|
||||
out_bias=None
|
||||
def __init__(self, num_vars):
|
||||
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
|
||||
self.in_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
|
||||
self.out_bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
|
||||
def forward(self,mtx):
|
||||
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
|
||||
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
|
||||
return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
|
||||
|
||||
class PolyRegKernel():
|
||||
parameters= []
|
||||
weights=None
|
||||
bias=None
|
||||
power=None
|
||||
def __init__(self, num_vars, power):
|
||||
self.power=power
|
||||
num_terms=self.num_poly_terms(num_vars, power)
|
||||
self.weights=torch.rand(num_terms, requires_grad=True, device=device)
|
||||
self.bias=torch.rand(1, requires_grad=True, device=device)
|
||||
self.parameters=[self.weights,self.bias]
|
||||
def num_poly_terms(self,num_vars, power):
|
||||
if power == 0:
|
||||
return 0
|
||||
return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1)
|
||||
def factorial(self,n):
|
||||
if n==0:
|
||||
return 1
|
||||
else:
|
||||
return n*self.factorial(n-1)
|
||||
def take_all_pwrs(self, vec, pwr):
|
||||
#todo: vectorize (kinda)
|
||||
combins=torch.combinations(vec, r=pwr, with_replacement=True)
|
||||
out=torch.ones(combins.size()[0]).to(device).to(torch.float)
|
||||
for i in torch.t(combins).to(device).to(torch.float):
|
||||
out *= i
|
||||
if pwr == 1:
|
||||
return out
|
||||
else:
|
||||
return torch.cat((out,self.take_all_pwrs(vec, pwr-1)))
|
||||
def forward(self,mtx):
|
||||
#TODO: Vectorize the last part
|
||||
cols=[]
|
||||
for i in torch.t(mtx):
|
||||
cols.append(self.take_all_pwrs(i,self.power))
|
||||
new_mtx=torch.t(torch.stack(cols))
|
||||
long_bias=self.bias.repeat([1,mtx.size()[1]])
|
||||
return torch.matmul(self.weights,new_mtx)+long_bias
|
||||
|
||||
def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
|
||||
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
|
||||
data_cuda=data.to(device)
|
||||
ground_cuda=ground.to(device)
|
||||
if (return_losses):
|
||||
losses=[]
|
||||
for i in range(iterations):
|
||||
with torch.set_grad_enabled(True):
|
||||
optim.zero_grad()
|
||||
pred=kernel.forward(data_cuda)
|
||||
ls=loss(pred,ground_cuda)
|
||||
losses.append(ls.item())
|
||||
ls.backward()
|
||||
optim.step()
|
||||
return [kernel,losses]
|
||||
else:
|
||||
for i in range(iterations):
|
||||
with torch.set_grad_enabled(True):
|
||||
optim.zero_grad()
|
||||
pred=kernel.forward(data_cuda)
|
||||
ls=loss(pred,ground_cuda)
|
||||
ls.backward()
|
||||
optim.step()
|
||||
return kernel
|
||||
|
||||
def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
|
||||
data_cuda=data.to(device)
|
||||
ground_cuda=ground.to(device)
|
||||
if (return_losses):
|
||||
losses=[]
|
||||
for i in range(iterations):
|
||||
with torch.set_grad_enabled(True):
|
||||
optim.zero_grad()
|
||||
pred=kernel.forward(data)
|
||||
ls=loss(pred,ground)
|
||||
losses.append(ls.item())
|
||||
ls.backward()
|
||||
optim.step()
|
||||
return [kernel,losses]
|
||||
else:
|
||||
for i in range(iterations):
|
||||
with torch.set_grad_enabled(True):
|
||||
optim.zero_grad()
|
||||
pred=kernel.forward(data_cuda)
|
||||
ls=loss(pred,ground_cuda)
|
||||
ls.backward()
|
||||
optim.step()
|
||||
return kernel
|
122
analysis-master/tra_analysis/titanlearn.py
Normal file
@@ -0,0 +1,122 @@
|
||||
# Titan Robotics Team 2022: ML Module
|
||||
# Written by Arthur Lu & Jacob Levine
|
||||
# Notes:
|
||||
# this should be imported as a python module using 'import titanlearn'
|
||||
# this should be included in the local directory or environment variable
|
||||
# this module is optimized for multhreaded computing
|
||||
# this module learns from its mistakes far faster than 2022's captains
|
||||
# setup:
|
||||
|
||||
__version__ = "2.0.1.001"
|
||||
|
||||
#changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
2.0.1.001:
|
||||
- removed matplotlib import
|
||||
- removed graphloss()
|
||||
2.0.1.000:
|
||||
- added net, dataset, dataloader, and stdtrain template definitions
|
||||
- added graphloss function
|
||||
2.0.0.001:
|
||||
- added clear functions
|
||||
2.0.0.000:
|
||||
- complete rewrite planned
|
||||
- depreciated 1.0.0.xxx versions
|
||||
- added simple training loop
|
||||
1.0.0.xxx:
|
||||
-added generation of ANNS, basic SGD training
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Arthur Lu <arthurlu@ttic.edu>,"
|
||||
"Jacob Levine <jlevine@ttic.edu>,"
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'clear',
|
||||
'net',
|
||||
'dataset',
|
||||
'dataloader',
|
||||
'train',
|
||||
'stdtrainer',
|
||||
]
|
||||
|
||||
import torch
|
||||
from os import system, name
|
||||
import numpy as np
|
||||
|
||||
def clear():
|
||||
if name == 'nt':
|
||||
_ = system('cls')
|
||||
else:
|
||||
_ = system('clear')
|
||||
|
||||
class net(torch.nn.Module): #template for standard neural net
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
|
||||
def forward(self, input):
|
||||
pass
|
||||
|
||||
class dataset(torch.utils.data.Dataset): #template for standard dataset
|
||||
|
||||
def __init__(self):
|
||||
super(torch.utils.data.Dataset).__init__()
|
||||
|
||||
def __getitem__(self, index):
|
||||
pass
|
||||
|
||||
def __len__(self):
|
||||
pass
|
||||
|
||||
def dataloader(dataset, batch_size, num_workers, shuffle = True):
|
||||
|
||||
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
||||
|
||||
def train(device, net, epochs, trainloader, optimizer, criterion): #expects standard dataloader, whch returns (inputs, labels)
|
||||
|
||||
dataset_len = trainloader.dataset.__len__()
|
||||
iter_count = 0
|
||||
running_loss = 0
|
||||
running_loss_list = []
|
||||
|
||||
for epoch in range(epochs): # loop over the dataset multiple times
|
||||
|
||||
for i, data in enumerate(trainloader, 0):
|
||||
|
||||
inputs = data[0].to(device)
|
||||
labels = data[1].to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
|
||||
outputs = net(inputs)
|
||||
loss = criterion(outputs, labels.to(torch.float))
|
||||
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# monitoring steps below
|
||||
|
||||
iter_count += 1
|
||||
running_loss += loss.item()
|
||||
running_loss_list.append(running_loss)
|
||||
clear()
|
||||
|
||||
print("training on: " + device)
|
||||
print("iteration: " + str(i) + "/" + str(int(dataset_len / trainloader.batch_size)) + " | " + "epoch: " + str(epoch) + "/" + str(epochs))
|
||||
print("current batch loss: " + str(loss.item))
|
||||
print("running loss: " + str(running_loss / iter_count))
|
||||
|
||||
return net, running_loss_list
|
||||
print("finished training")
|
||||
|
||||
def stdtrainer(net, criterion, optimizer, dataloader, epochs, batch_size):
|
||||
|
||||
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
net = net.to(device)
|
||||
criterion = criterion.to(device)
|
||||
optimizer = optimizer.to(device)
|
||||
trainloader = dataloader
|
||||
|
||||
return train(device, net, epochs, trainloader, optimizer, criterion)
|
58
analysis-master/tra_analysis/visualization.py
Normal file
@@ -0,0 +1,58 @@
|
||||
# Titan Robotics Team 2022: Visualization Module
|
||||
# Written by Arthur Lu & Jacob Levine
|
||||
# Notes:
|
||||
# this should be imported as a python module using 'import visualization'
|
||||
# this should be included in the local directory or environment variable
|
||||
# fancy
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0.001"
|
||||
|
||||
#changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.0.0.001:
|
||||
- added graphhistogram function as a fragment of visualize_pit.py
|
||||
1.0.0.000:
|
||||
- created visualization.py
|
||||
- added graphloss()
|
||||
- added imports
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Arthur Lu <arthurlu@ttic.edu>,"
|
||||
"Jacob Levine <jlevine@ttic.edu>,"
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'graphloss',
|
||||
]
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
def graphloss(losses):
|
||||
|
||||
x = range(0, len(losses))
|
||||
plt.plot(x, losses)
|
||||
plt.show()
|
||||
|
||||
def graphhistogram(data, figsize, sharey = True): # expects library with key as variable and contents as occurances
|
||||
|
||||
fig, ax = plt.subplots(1, len(data), sharey=sharey, figsize=figsize)
|
||||
|
||||
i = 0
|
||||
|
||||
for variable in data:
|
||||
|
||||
ax[i].hist(data[variable])
|
||||
ax[i].invert_xaxis()
|
||||
|
||||
ax[i].set_xlabel('Variable')
|
||||
ax[i].set_ylabel('Frequency')
|
||||
ax[i].set_title(variable)
|
||||
|
||||
plt.yticks(np.arange(len(data[variable])))
|
||||
|
||||
i+=1
|
||||
|
||||
plt.show()
|
@@ -1 +0,0 @@
|
||||
[{"outputType":{"type":"APK"},"apkInfo":{"type":"MAIN","splits":[],"versionCode":1,"versionName":"1.0","enabled":true,"outputFile":"app-debug.apk","fullName":"debug","baseName":"debug"},"path":"app-debug.apk","properties":{}}]
|
13
apps/android/source/.gitignore
vendored
@@ -1,13 +0,0 @@
|
||||
*.iml
|
||||
.gradle
|
||||
/local.properties
|
||||
/.idea/caches
|
||||
/.idea/libraries
|
||||
/.idea/modules.xml
|
||||
/.idea/workspace.xml
|
||||
/.idea/navEditor.xml
|
||||
/.idea/assetWizardSettings.xml
|
||||
.DS_Store
|
||||
/build
|
||||
/captures
|
||||
.externalNativeBuild
|
29
apps/android/source/.idea/codeStyles/Project.xml
generated
@@ -1,29 +0,0 @@
|
||||
<component name="ProjectCodeStyleConfiguration">
|
||||
<code_scheme name="Project" version="173">
|
||||
<Objective-C-extensions>
|
||||
<file>
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Import" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Macro" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Typedef" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Enum" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Constant" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Global" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Struct" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="FunctionPredecl" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Function" />
|
||||
</file>
|
||||
<class>
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Property" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="Synthesize" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="InitMethod" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="StaticMethod" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="InstanceMethod" />
|
||||
<option name="com.jetbrains.cidr.lang.util.OCDeclarationKind" value="DeallocMethod" />
|
||||
</class>
|
||||
<extensions>
|
||||
<pair source="cpp" header="h" fileNamingConvention="NONE" />
|
||||
<pair source="c" header="h" fileNamingConvention="NONE" />
|
||||
</extensions>
|
||||
</Objective-C-extensions>
|
||||
</code_scheme>
|
||||
</component>
|
18
apps/android/source/.idea/gradle.xml
generated
@@ -1,18 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="GradleSettings">
|
||||
<option name="linkedExternalProjectsSettings">
|
||||
<GradleProjectSettings>
|
||||
<option name="distributionType" value="DEFAULT_WRAPPED" />
|
||||
<option name="externalProjectPath" value="$PROJECT_DIR$" />
|
||||
<option name="modules">
|
||||
<set>
|
||||
<option value="$PROJECT_DIR$" />
|
||||
<option value="$PROJECT_DIR$/app" />
|
||||
</set>
|
||||
</option>
|
||||
<option name="resolveModulePerSourceSet" value="false" />
|
||||
</GradleProjectSettings>
|
||||
</option>
|
||||
</component>
|
||||
</project>
|
9
apps/android/source/.idea/misc.xml
generated
@@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" languageLevel="JDK_1_7" project-jdk-name="11" project-jdk-type="JavaSDK">
|
||||
<output url="file://$PROJECT_DIR$/build/classes" />
|
||||
</component>
|
||||
<component name="ProjectType">
|
||||
<option name="id" value="Android" />
|
||||
</component>
|
||||
</project>
|
12
apps/android/source/.idea/runConfigurations.xml
generated
@@ -1,12 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="RunConfigurationProducerService">
|
||||
<option name="ignoredProducers">
|
||||
<set>
|
||||
<option value="org.jetbrains.plugins.gradle.execution.test.runner.AllInPackageGradleConfigurationProducer" />
|
||||
<option value="org.jetbrains.plugins.gradle.execution.test.runner.TestClassGradleConfigurationProducer" />
|
||||
<option value="org.jetbrains.plugins.gradle.execution.test.runner.TestMethodGradleConfigurationProducer" />
|
||||
</set>
|
||||
</option>
|
||||
</component>
|
||||
</project>
|
6
apps/android/source/.idea/vcs.xml
generated
@@ -1,6 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$/../../.." vcs="Git" />
|
||||
</component>
|
||||
</project>
|
1
apps/android/source/app/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
/build
|
@@ -1,28 +0,0 @@
|
||||
apply plugin: 'com.android.application'
|
||||
|
||||
android {
|
||||
compileSdkVersion 28
|
||||
defaultConfig {
|
||||
applicationId "com.example.titanscouting"
|
||||
minSdkVersion 16
|
||||
targetSdkVersion 28
|
||||
versionCode 1
|
||||
versionName "1.0"
|
||||
testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
|
||||
}
|
||||
buildTypes {
|
||||
release {
|
||||
minifyEnabled false
|
||||
proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
implementation fileTree(dir: 'libs', include: ['*.jar'])
|
||||
implementation 'com.android.support:appcompat-v7:28.0.0'
|
||||
implementation 'com.android.support.constraint:constraint-layout:1.1.3'
|
||||
testImplementation 'junit:junit:4.12'
|
||||
androidTestImplementation 'com.android.support.test:runner:1.0.2'
|
||||
androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
|
||||
}
|
21
apps/android/source/app/proguard-rules.pro
vendored
@@ -1,21 +0,0 @@
|
||||
# Add project specific ProGuard rules here.
|
||||
# You can control the set of applied configuration files using the
|
||||
# proguardFiles setting in build.gradle.
|
||||
#
|
||||
# For more details, see
|
||||
# http://developer.android.com/guide/developing/tools/proguard.html
|
||||
|
||||
# If your project uses WebView with JS, uncomment the following
|
||||
# and specify the fully qualified class name to the JavaScript interface
|
||||
# class:
|
||||
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
|
||||
# public *;
|
||||
#}
|
||||
|
||||
# Uncomment this to preserve the line number information for
|
||||
# debugging stack traces.
|
||||
#-keepattributes SourceFile,LineNumberTable
|
||||
|
||||
# If you keep the line number information, uncomment this to
|
||||
# hide the original source file name.
|
||||
#-renamesourcefileattribute SourceFile
|
@@ -1 +0,0 @@
|
||||
[{"outputType":{"type":"APK"},"apkInfo":{"type":"MAIN","splits":[],"versionCode":1,"versionName":"1.0","enabled":true,"outputFile":"app-release.apk","fullName":"release","baseName":"release"},"path":"app-release.apk","properties":{}}]
|
@@ -1,26 +0,0 @@
|
||||
package com.example.titanscouting;
|
||||
|
||||
import android.content.Context;
|
||||
import android.support.test.InstrumentationRegistry;
|
||||
import android.support.test.runner.AndroidJUnit4;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Instrumented test, which will execute on an Android device.
|
||||
*
|
||||
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
|
||||
*/
|
||||
@RunWith(AndroidJUnit4.class)
|
||||
public class ExampleInstrumentedTest {
|
||||
@Test
|
||||
public void useAppContext() {
|
||||
// Context of the app under test.
|
||||
Context appContext = InstrumentationRegistry.getTargetContext();
|
||||
|
||||
assertEquals("com.example.titanscouting", appContext.getPackageName());
|
||||
}
|
||||
}
|
@@ -1,28 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
package="com.example.titanscouting">
|
||||
|
||||
<uses-permission android:name="android.permission.INTERNET" />
|
||||
|
||||
<application
|
||||
android:allowBackup="true"
|
||||
android:icon="@mipmap/ic_launcher"
|
||||
android:label="@string/app_name"
|
||||
android:roundIcon="@drawable/binoculars_big"
|
||||
android:supportsRtl="true"
|
||||
android:theme="@style/AppTheme"
|
||||
android:usesCleartextTraffic="true">
|
||||
<activity android:name=".tits"></activity>
|
||||
<activity android:name=".launcher">
|
||||
<intent-filter>
|
||||
<action android:name="android.intent.action.MAIN" />
|
||||
|
||||
<category android:name="android.intent.category.LAUNCHER" />
|
||||
</intent-filter>
|
||||
</activity>
|
||||
<activity android:name=".MainActivity">
|
||||
|
||||
</activity>
|
||||
</application>
|
||||
|
||||
</manifest>
|
@@ -1,32 +0,0 @@
|
||||
package com.example.titanscouting;
|
||||
|
||||
import android.support.v7.app.AppCompatActivity;
|
||||
import android.os.Bundle;
|
||||
import android.webkit.WebView;
|
||||
import android.webkit.WebSettings;
|
||||
import android.webkit.WebViewClient;
|
||||
|
||||
public class MainActivity extends AppCompatActivity {
|
||||
|
||||
@Override
|
||||
protected void onCreate(Bundle savedInstanceState) {
|
||||
super.onCreate(savedInstanceState);
|
||||
setContentView(R.layout.activity_main);
|
||||
|
||||
|
||||
|
||||
WebView myWebView = (WebView) findViewById(R.id.webview);
|
||||
|
||||
myWebView.getSettings().setJavaScriptEnabled(true);
|
||||
myWebView.setWebViewClient(new WebViewClient());
|
||||
myWebView.loadUrl("http://titanrobotics.ddns.net:60080/public/");
|
||||
|
||||
myWebView.getSettings().setJavaScriptEnabled(true);
|
||||
myWebView.getSettings().setJavaScriptCanOpenWindowsAutomatically(true);
|
||||
myWebView.getSettings().setDomStorageEnabled(true);
|
||||
myWebView.getSettings().setDomStorageEnabled(true);
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
@@ -1,49 +0,0 @@
|
||||
package com.example.titanscouting;
|
||||
|
||||
import android.support.v7.app.AppCompatActivity;
|
||||
import android.os.Bundle;
|
||||
import android.app.Activity;
|
||||
import android.content.Intent;
|
||||
import android.view.Menu;
|
||||
import android.view.View;
|
||||
import android.view.View.OnClickListener;
|
||||
import android.widget.Button;
|
||||
import android.widget.EditText;
|
||||
public class launcher extends AppCompatActivity {
|
||||
|
||||
Button button;
|
||||
EditText passField;
|
||||
|
||||
@Override
|
||||
protected void onCreate(Bundle savedInstanceState) {
|
||||
super.onCreate(savedInstanceState);
|
||||
setContentView(R.layout.activity_launcher);
|
||||
|
||||
// Locate the button in activity_main.xml
|
||||
button = (Button) findViewById(R.id.launch_button);
|
||||
final EditText passField = (EditText)findViewById(R.id.editText);
|
||||
// Capture button clicks
|
||||
button.setOnClickListener(new OnClickListener() {
|
||||
public void onClick(View arg0) {
|
||||
|
||||
// Start NewActivity.class
|
||||
if(passField.getText().toString().equals("gimmetits")){
|
||||
|
||||
Intent myIntent = new Intent(launcher.this,
|
||||
tits.class);
|
||||
startActivity(myIntent);
|
||||
|
||||
}
|
||||
else {
|
||||
Intent myIntent = new Intent(launcher.this,
|
||||
MainActivity.class);
|
||||
startActivity(myIntent);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
@@ -1,30 +0,0 @@
|
||||
package com.example.titanscouting;
|
||||
|
||||
import android.content.Intent;
|
||||
import android.support.v7.app.AppCompatActivity;
|
||||
import android.os.Bundle;
|
||||
import android.view.View;
|
||||
import android.widget.Button;
|
||||
import android.widget.EditText;
|
||||
|
||||
public class tits extends AppCompatActivity {
|
||||
Button button;
|
||||
@Override
|
||||
protected void onCreate(Bundle savedInstanceState) {
|
||||
super.onCreate(savedInstanceState);
|
||||
setContentView(R.layout.activity_tits);
|
||||
|
||||
button = (Button) findViewById(R.id.button);
|
||||
// Capture button clicks
|
||||
button.setOnClickListener(new View.OnClickListener() {
|
||||
public void onClick(View arg0) {
|
||||
|
||||
|
||||
Intent myIntent = new Intent(tits.this,
|
||||
MainActivity.class);
|
||||
startActivity(myIntent);
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@@ -1,34 +0,0 @@
|
||||
<vector xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:aapt="http://schemas.android.com/aapt"
|
||||
android:width="108dp"
|
||||
android:height="108dp"
|
||||
android:viewportWidth="108"
|
||||
android:viewportHeight="108">
|
||||
<path
|
||||
android:fillType="evenOdd"
|
||||
android:pathData="M32,64C32,64 38.39,52.99 44.13,50.95C51.37,48.37 70.14,49.57 70.14,49.57L108.26,87.69L108,109.01L75.97,107.97L32,64Z"
|
||||
android:strokeWidth="1"
|
||||
android:strokeColor="#00000000">
|
||||
<aapt:attr name="android:fillColor">
|
||||
<gradient
|
||||
android:endX="78.5885"
|
||||
android:endY="90.9159"
|
||||
android:startX="48.7653"
|
||||
android:startY="61.0927"
|
||||
android:type="linear">
|
||||
<item
|
||||
android:color="#44000000"
|
||||
android:offset="0.0" />
|
||||
<item
|
||||
android:color="#00000000"
|
||||
android:offset="1.0" />
|
||||
</gradient>
|
||||
</aapt:attr>
|
||||
</path>
|
||||
<path
|
||||
android:fillColor="#FFFFFF"
|
||||
android:fillType="nonZero"
|
||||
android:pathData="M66.94,46.02L66.94,46.02C72.44,50.07 76,56.61 76,64L32,64C32,56.61 35.56,50.11 40.98,46.06L36.18,41.19C35.45,40.45 35.45,39.3 36.18,38.56C36.91,37.81 38.05,37.81 38.78,38.56L44.25,44.05C47.18,42.57 50.48,41.71 54,41.71C57.48,41.71 60.78,42.57 63.68,44.05L69.11,38.56C69.84,37.81 70.98,37.81 71.71,38.56C72.44,39.3 72.44,40.45 71.71,41.19L66.94,46.02ZM62.94,56.92C64.08,56.92 65,56.01 65,54.88C65,53.76 64.08,52.85 62.94,52.85C61.8,52.85 60.88,53.76 60.88,54.88C60.88,56.01 61.8,56.92 62.94,56.92ZM45.06,56.92C46.2,56.92 47.13,56.01 47.13,54.88C47.13,53.76 46.2,52.85 45.06,52.85C43.92,52.85 43,53.76 43,54.88C43,56.01 43.92,56.92 45.06,56.92Z"
|
||||
android:strokeWidth="1"
|
||||
android:strokeColor="#00000000" />
|
||||
</vector>
|
Before Width: | Height: | Size: 6.8 KiB |
Before Width: | Height: | Size: 925 B |
Before Width: | Height: | Size: 1.6 KiB |
@@ -1,170 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<vector xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
android:width="108dp"
|
||||
android:height="108dp"
|
||||
android:viewportWidth="108"
|
||||
android:viewportHeight="108">
|
||||
<path
|
||||
android:fillColor="#008577"
|
||||
android:pathData="M0,0h108v108h-108z" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M9,0L9,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,0L19,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M29,0L29,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M39,0L39,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M49,0L49,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M59,0L59,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M69,0L69,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M79,0L79,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M89,0L89,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M99,0L99,108"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,9L108,9"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,19L108,19"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,29L108,29"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,39L108,39"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,49L108,49"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,59L108,59"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,69L108,69"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,79L108,79"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,89L108,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M0,99L108,99"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,29L89,29"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,39L89,39"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,49L89,49"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,59L89,59"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,69L89,69"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M19,79L89,79"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M29,19L29,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M39,19L39,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M49,19L49,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M59,19L59,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M69,19L69,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
<path
|
||||
android:fillColor="#00000000"
|
||||
android:pathData="M79,19L79,89"
|
||||
android:strokeWidth="0.8"
|
||||
android:strokeColor="#33FFFFFF" />
|
||||
</vector>
|
@@ -1,42 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<android.support.constraint.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:app="http://schemas.android.com/apk/res-auto"
|
||||
xmlns:tools="http://schemas.android.com/tools"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="match_parent"
|
||||
tools:context=".launcher">
|
||||
|
||||
<Button
|
||||
android:id="@+id/launch_button"
|
||||
android:layout_width="253dp"
|
||||
android:layout_height="56dp"
|
||||
android:layout_marginStart="8dp"
|
||||
android:layout_marginLeft="8dp"
|
||||
android:layout_marginTop="8dp"
|
||||
android:layout_marginEnd="8dp"
|
||||
android:layout_marginRight="8dp"
|
||||
android:layout_marginBottom="8dp"
|
||||
android:text="Launch Titan Scouting"
|
||||
app:layout_constraintBottom_toBottomOf="parent"
|
||||
app:layout_constraintEnd_toEndOf="parent"
|
||||
app:layout_constraintStart_toStartOf="parent"
|
||||
app:layout_constraintTop_toTopOf="parent" />
|
||||
|
||||
<EditText
|
||||
android:id="@+id/editText"
|
||||
android:layout_width="wrap_content"
|
||||
android:layout_height="wrap_content"
|
||||
android:layout_marginStart="8dp"
|
||||
android:layout_marginLeft="8dp"
|
||||
android:layout_marginTop="8dp"
|
||||
android:layout_marginEnd="8dp"
|
||||
android:layout_marginRight="8dp"
|
||||
android:layout_marginBottom="8dp"
|
||||
android:ems="10"
|
||||
android:inputType="textPassword"
|
||||
app:layout_constraintBottom_toBottomOf="parent"
|
||||
app:layout_constraintEnd_toEndOf="parent"
|
||||
app:layout_constraintStart_toStartOf="parent"
|
||||
app:layout_constraintTop_toBottomOf="@+id/launch_button"
|
||||
app:layout_constraintVertical_bias="0.0" />
|
||||
</android.support.constraint.ConstraintLayout>
|
@@ -1,19 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<android.support.constraint.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:app="http://schemas.android.com/apk/res-auto"
|
||||
xmlns:tools="http://schemas.android.com/tools"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="match_parent"
|
||||
tools:context=".MainActivity">
|
||||
|
||||
<WebView
|
||||
android:id="@+id/webview"
|
||||
android:layout_width="0dp"
|
||||
android:layout_height="0dp"
|
||||
app:layout_constraintBottom_toBottomOf="parent"
|
||||
app:layout_constraintEnd_toEndOf="parent"
|
||||
app:layout_constraintStart_toStartOf="parent"
|
||||
app:layout_constraintTop_toTopOf="parent"
|
||||
app:layout_constraintVertical_bias="0.48000002" />
|
||||
|
||||
</android.support.constraint.ConstraintLayout>
|
@@ -1,36 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<android.support.constraint.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
|
||||
xmlns:app="http://schemas.android.com/apk/res-auto"
|
||||
xmlns:tools="http://schemas.android.com/tools"
|
||||
android:layout_width="match_parent"
|
||||
android:layout_height="match_parent"
|
||||
tools:context=".tits">
|
||||
|
||||
<ImageView
|
||||
android:id="@+id/imageView"
|
||||
android:layout_width="372dp"
|
||||
android:layout_height="487dp"
|
||||
android:layout_marginTop="4dp"
|
||||
android:layout_marginBottom="215dp"
|
||||
app:layout_constraintBottom_toBottomOf="parent"
|
||||
app:layout_constraintEnd_toEndOf="parent"
|
||||
app:layout_constraintStart_toStartOf="parent"
|
||||
app:layout_constraintTop_toTopOf="parent"
|
||||
app:srcCompat="@drawable/uuh" />
|
||||
|
||||
<Button
|
||||
android:id="@+id/button"
|
||||
android:layout_width="198dp"
|
||||
android:layout_height="86dp"
|
||||
android:layout_marginStart="8dp"
|
||||
android:layout_marginLeft="8dp"
|
||||
android:layout_marginTop="8dp"
|
||||
android:layout_marginEnd="8dp"
|
||||
android:layout_marginRight="8dp"
|
||||
android:layout_marginBottom="8dp"
|
||||
android:text="Fuck Get Me Out"
|
||||
app:layout_constraintBottom_toBottomOf="parent"
|
||||
app:layout_constraintEnd_toEndOf="parent"
|
||||
app:layout_constraintStart_toStartOf="parent"
|
||||
app:layout_constraintTop_toBottomOf="@+id/imageView" />
|
||||
</android.support.constraint.ConstraintLayout>
|
@@ -1,5 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
|
||||
<background android:drawable="@drawable/ic_launcher_background" />
|
||||
<foreground android:drawable="@drawable/ic_launcher_foreground" />
|
||||
</adaptive-icon>
|
@@ -1,5 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<adaptive-icon xmlns:android="http://schemas.android.com/apk/res/android">
|
||||
<background android:drawable="@drawable/ic_launcher_background" />
|
||||
<foreground android:drawable="@drawable/ic_launcher_foreground" />
|
||||
</adaptive-icon>
|
Before Width: | Height: | Size: 2.9 KiB |
Before Width: | Height: | Size: 4.8 KiB |
Before Width: | Height: | Size: 2.0 KiB |
Before Width: | Height: | Size: 2.7 KiB |
Before Width: | Height: | Size: 4.4 KiB |
Before Width: | Height: | Size: 6.7 KiB |
Before Width: | Height: | Size: 6.2 KiB |
Before Width: | Height: | Size: 10 KiB |
Before Width: | Height: | Size: 8.9 KiB |
Before Width: | Height: | Size: 15 KiB |
@@ -1,6 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<resources>
|
||||
<color name="colorPrimary">#008577</color>
|
||||
<color name="colorPrimaryDark">#00574B</color>
|
||||
<color name="colorAccent">#D81B60</color>
|
||||
</resources>
|
@@ -1,3 +0,0 @@
|
||||
<resources>
|
||||
<string name="app_name">TitanScout</string>
|
||||
</resources>
|
@@ -1,11 +0,0 @@
|
||||
<resources>
|
||||
|
||||
<!-- Base application theme. -->
|
||||
<style name="AppTheme" parent="Theme.AppCompat.Light.NoActionBar">
|
||||
<!-- Customize your theme here. -->
|
||||
<item name="colorPrimary">@color/colorPrimary</item>
|
||||
<item name="colorPrimaryDark">@color/colorPrimaryDark</item>
|
||||
<item name="colorAccent">@color/colorAccent</item>
|
||||
</style>
|
||||
|
||||
</resources>
|
@@ -1,17 +0,0 @@
|
||||
package com.example.titanscouting;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* Example local unit test, which will execute on the development machine (host).
|
||||
*
|
||||
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
|
||||
*/
|
||||
public class ExampleUnitTest {
|
||||
@Test
|
||||
public void addition_isCorrect() {
|
||||
assertEquals(4, 2 + 2);
|
||||
}
|
||||
}
|
@@ -1,27 +0,0 @@
|
||||
// Top-level build file where you can add configuration options common to all sub-projects/modules.
|
||||
|
||||
buildscript {
|
||||
repositories {
|
||||
google()
|
||||
jcenter()
|
||||
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.android.tools.build:gradle:3.3.0'
|
||||
|
||||
// NOTE: Do not place your application dependencies here; they belong
|
||||
// in the individual module build.gradle files
|
||||
}
|
||||
}
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
google()
|
||||
jcenter()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
task clean(type: Delete) {
|
||||
delete rootProject.buildDir
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
# Project-wide Gradle settings.
|
||||
# IDE (e.g. Android Studio) users:
|
||||
# Gradle settings configured through the IDE *will override*
|
||||
# any settings specified in this file.
|
||||
# For more details on how to configure your build environment visit
|
||||
# http://www.gradle.org/docs/current/userguide/build_environment.html
|
||||
# Specifies the JVM arguments used for the daemon process.
|
||||
# The setting is particularly useful for tweaking memory settings.
|
||||
org.gradle.jvmargs=-Xmx1536m
|
||||
# When configured, Gradle will run in incubating parallel mode.
|
||||
# This option should only be used with decoupled projects. More details, visit
|
||||
# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
|
||||
# org.gradle.parallel=true
|
||||
|
||||
|
@@ -1,6 +0,0 @@
|
||||
#Wed Feb 06 15:44:44 CST 2019
|
||||
distributionBase=GRADLE_USER_HOME
|
||||
distributionPath=wrapper/dists
|
||||
zipStoreBase=GRADLE_USER_HOME
|
||||
zipStorePath=wrapper/dists
|
||||
distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.1-all.zip
|
172
apps/android/source/gradlew
vendored
@@ -1,172 +0,0 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
##############################################################################
|
||||
##
|
||||
## Gradle start up script for UN*X
|
||||
##
|
||||
##############################################################################
|
||||
|
||||
# Attempt to set APP_HOME
|
||||
# Resolve links: $0 may be a link
|
||||
PRG="$0"
|
||||
# Need this for relative symlinks.
|
||||
while [ -h "$PRG" ] ; do
|
||||
ls=`ls -ld "$PRG"`
|
||||
link=`expr "$ls" : '.*-> \(.*\)$'`
|
||||
if expr "$link" : '/.*' > /dev/null; then
|
||||
PRG="$link"
|
||||
else
|
||||
PRG=`dirname "$PRG"`"/$link"
|
||||
fi
|
||||
done
|
||||
SAVED="`pwd`"
|
||||
cd "`dirname \"$PRG\"`/" >/dev/null
|
||||
APP_HOME="`pwd -P`"
|
||||
cd "$SAVED" >/dev/null
|
||||
|
||||
APP_NAME="Gradle"
|
||||
APP_BASE_NAME=`basename "$0"`
|
||||
|
||||
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
DEFAULT_JVM_OPTS=""
|
||||
|
||||
# Use the maximum available, or set MAX_FD != -1 to use that value.
|
||||
MAX_FD="maximum"
|
||||
|
||||
warn () {
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
die () {
|
||||
echo
|
||||
echo "$*"
|
||||
echo
|
||||
exit 1
|
||||
}
|
||||
|
||||
# OS specific support (must be 'true' or 'false').
|
||||
cygwin=false
|
||||
msys=false
|
||||
darwin=false
|
||||
nonstop=false
|
||||
case "`uname`" in
|
||||
CYGWIN* )
|
||||
cygwin=true
|
||||
;;
|
||||
Darwin* )
|
||||
darwin=true
|
||||
;;
|
||||
MINGW* )
|
||||
msys=true
|
||||
;;
|
||||
NONSTOP* )
|
||||
nonstop=true
|
||||
;;
|
||||
esac
|
||||
|
||||
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
|
||||
|
||||
# Determine the Java command to use to start the JVM.
|
||||
if [ -n "$JAVA_HOME" ] ; then
|
||||
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
|
||||
# IBM's JDK on AIX uses strange locations for the executables
|
||||
JAVACMD="$JAVA_HOME/jre/sh/java"
|
||||
else
|
||||
JAVACMD="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
if [ ! -x "$JAVACMD" ] ; then
|
||||
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
else
|
||||
JAVACMD="java"
|
||||
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
|
||||
Please set the JAVA_HOME variable in your environment to match the
|
||||
location of your Java installation."
|
||||
fi
|
||||
|
||||
# Increase the maximum file descriptors if we can.
|
||||
if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
|
||||
MAX_FD_LIMIT=`ulimit -H -n`
|
||||
if [ $? -eq 0 ] ; then
|
||||
if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
|
||||
MAX_FD="$MAX_FD_LIMIT"
|
||||
fi
|
||||
ulimit -n $MAX_FD
|
||||
if [ $? -ne 0 ] ; then
|
||||
warn "Could not set maximum file descriptor limit: $MAX_FD"
|
||||
fi
|
||||
else
|
||||
warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
|
||||
fi
|
||||
fi
|
||||
|
||||
# For Darwin, add options to specify how the application appears in the dock
|
||||
if $darwin; then
|
||||
GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
|
||||
fi
|
||||
|
||||
# For Cygwin, switch paths to Windows format before running java
|
||||
if $cygwin ; then
|
||||
APP_HOME=`cygpath --path --mixed "$APP_HOME"`
|
||||
CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
|
||||
JAVACMD=`cygpath --unix "$JAVACMD"`
|
||||
|
||||
# We build the pattern for arguments to be converted via cygpath
|
||||
ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
|
||||
SEP=""
|
||||
for dir in $ROOTDIRSRAW ; do
|
||||
ROOTDIRS="$ROOTDIRS$SEP$dir"
|
||||
SEP="|"
|
||||
done
|
||||
OURCYGPATTERN="(^($ROOTDIRS))"
|
||||
# Add a user-defined pattern to the cygpath arguments
|
||||
if [ "$GRADLE_CYGPATTERN" != "" ] ; then
|
||||
OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
|
||||
fi
|
||||
# Now convert the arguments - kludge to limit ourselves to /bin/sh
|
||||
i=0
|
||||
for arg in "$@" ; do
|
||||
CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
|
||||
CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
|
||||
|
||||
if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
|
||||
eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
|
||||
else
|
||||
eval `echo args$i`="\"$arg\""
|
||||
fi
|
||||
i=$((i+1))
|
||||
done
|
||||
case $i in
|
||||
(0) set -- ;;
|
||||
(1) set -- "$args0" ;;
|
||||
(2) set -- "$args0" "$args1" ;;
|
||||
(3) set -- "$args0" "$args1" "$args2" ;;
|
||||
(4) set -- "$args0" "$args1" "$args2" "$args3" ;;
|
||||
(5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
|
||||
(6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
|
||||
(7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
|
||||
(8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
|
||||
(9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
# Escape application args
|
||||
save () {
|
||||
for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
|
||||
echo " "
|
||||
}
|
||||
APP_ARGS=$(save "$@")
|
||||
|
||||
# Collect all arguments for the java command, following the shell quoting and substitution rules
|
||||
eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
|
||||
|
||||
# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
|
||||
if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
|
||||
cd "$(dirname "$0")"
|
||||
fi
|
||||
|
||||
exec "$JAVACMD" "$@"
|
84
apps/android/source/gradlew.bat
vendored
@@ -1,84 +0,0 @@
|
||||
@if "%DEBUG%" == "" @echo off
|
||||
@rem ##########################################################################
|
||||
@rem
|
||||
@rem Gradle startup script for Windows
|
||||
@rem
|
||||
@rem ##########################################################################
|
||||
|
||||
@rem Set local scope for the variables with windows NT shell
|
||||
if "%OS%"=="Windows_NT" setlocal
|
||||
|
||||
set DIRNAME=%~dp0
|
||||
if "%DIRNAME%" == "" set DIRNAME=.
|
||||
set APP_BASE_NAME=%~n0
|
||||
set APP_HOME=%DIRNAME%
|
||||
|
||||
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
|
||||
set DEFAULT_JVM_OPTS=
|
||||
|
||||
@rem Find java.exe
|
||||
if defined JAVA_HOME goto findJavaFromJavaHome
|
||||
|
||||
set JAVA_EXE=java.exe
|
||||
%JAVA_EXE% -version >NUL 2>&1
|
||||
if "%ERRORLEVEL%" == "0" goto init
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:findJavaFromJavaHome
|
||||
set JAVA_HOME=%JAVA_HOME:"=%
|
||||
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
|
||||
|
||||
if exist "%JAVA_EXE%" goto init
|
||||
|
||||
echo.
|
||||
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
|
||||
echo.
|
||||
echo Please set the JAVA_HOME variable in your environment to match the
|
||||
echo location of your Java installation.
|
||||
|
||||
goto fail
|
||||
|
||||
:init
|
||||
@rem Get command-line arguments, handling Windows variants
|
||||
|
||||
if not "%OS%" == "Windows_NT" goto win9xME_args
|
||||
|
||||
:win9xME_args
|
||||
@rem Slurp the command line arguments.
|
||||
set CMD_LINE_ARGS=
|
||||
set _SKIP=2
|
||||
|
||||
:win9xME_args_slurp
|
||||
if "x%~1" == "x" goto execute
|
||||
|
||||
set CMD_LINE_ARGS=%*
|
||||
|
||||
:execute
|
||||
@rem Setup the command line
|
||||
|
||||
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
|
||||
|
||||
@rem Execute Gradle
|
||||
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
|
||||
|
||||
:end
|
||||
@rem End local scope for the variables with windows NT shell
|
||||
if "%ERRORLEVEL%"=="0" goto mainEnd
|
||||
|
||||
:fail
|
||||
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
|
||||
rem the _cmd.exe /c_ return code!
|
||||
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
|
||||
exit /b 1
|
||||
|
||||
:mainEnd
|
||||
if "%OS%"=="Windows_NT" endlocal
|
||||
|
||||
:omega
|
@@ -1 +0,0 @@
|
||||
include ':app'
|
@@ -1,6 +0,0 @@
|
||||
{
|
||||
"cells": [],
|
||||
"metadata": {},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@@ -1,6 +0,0 @@
|
||||
2,3
|
||||
6,5
|
||||
5,5
|
||||
5,8
|
||||
6,6
|
||||
6,8
|
|
@@ -1,53 +0,0 @@
|
||||
4,6,4,5
|
||||
3,4,8,8
|
||||
2,8,3,0
|
||||
5,8,4,7
|
||||
4,0,9,6
|
||||
2,0,1,6
|
||||
5,3,5,0
|
||||
2,0,2,2
|
||||
4,8
|
||||
6,6,5,1
|
||||
2,1,5,1
|
||||
2,0,3,9
|
||||
2,2,5,2
|
||||
1,1,1
|
||||
6,8,2,3
|
||||
2,1,3,6
|
||||
4,2,9,2
|
||||
5,8,2,2
|
||||
7,6,0,8
|
||||
2,3,5,8
|
||||
4,2,4,1
|
||||
3,1,1,0
|
||||
7,6,0,9
|
||||
5,1,2,5
|
||||
6,9,0,6
|
||||
9,3,0
|
||||
2,0,6,2
|
||||
3,0,6,7
|
||||
4,7,8,7
|
||||
2,7,0,9
|
||||
7,5,6,0
|
||||
5,9,3,4
|
||||
4,7,0,2
|
||||
5,1,4,8
|
||||
1,6
|
||||
4,1,5,6
|
||||
1,8,8,4
|
||||
1,6,7,5
|
||||
1,7,9,7
|
||||
2,4,5,1
|
||||
7,7,3,8
|
||||
1,0,1
|
||||
1,7,3,9
|
||||
3,7,3,4
|
||||
1,7,3,6
|
||||
3,0,6,1
|
||||
2,7,2,5
|
||||
7,2,3,7
|
||||
3,6,9,5
|
||||
6,9,6,8
|
||||
1,7,8,1
|
||||
4,2,9,6
|
||||
2,3,3,8
|
|
@@ -1 +0,0 @@
|
||||
21, 23, 39, 50, 89, 97, 191, 213, 233, 236, 272, 289, 308, 310, 314, 317, 329, 355, 428, 436
|
|
@@ -1 +0,0 @@
|
||||
2022
|
|
@@ -1,178 +0,0 @@
|
||||
Doccumentation of python module: analysis.py
|
||||
|
||||
revision version: 1.0.8.003
|
||||
|
||||
|
||||
|
||||
analysis.py{
|
||||
|
||||
analysis.py should be imported as a python module using "import analysis" in the tr2022 directory, or using "from tr2022 import analysis" if tr2022 modules are installed in the python Libs directory
|
||||
|
||||
analysis.py is a module designed for statistical analyses and artifician neural network analyses
|
||||
|
||||
functions{
|
||||
|
||||
|
||||
_init_device{
|
||||
|
||||
|
||||
initiates device for tensor flow with either a cuda device (device specified via the "arg" argument) or cpu (ignored "arg" argument)
|
||||
|
||||
usage{
|
||||
|
||||
analysis._init_device("cuda", arg) , where arg is the cuda device number
|
||||
|
||||
analysis._init_device("cpu", 0) , which initiates the cpu as the tensorflow device
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
load_csv{
|
||||
|
||||
loads a csv file as a 2 dimentional array
|
||||
|
||||
usage{
|
||||
|
||||
analysis.load_csv(filepath) , where filepath is the path to the csv file to be loaded
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
basic_stats{
|
||||
|
||||
performs basic stats such as mean, median, mode, standard deviation, and varaince on a set of data
|
||||
|
||||
the function can do stats on a 1 dimentional array, or on a specified row or column in a 2 dimentional array
|
||||
|
||||
the method in which it does the statistics is specified by the "method" argument
|
||||
|
||||
usage{
|
||||
|
||||
analysis.basic_stats(data, "1d", 0) , where data is a 1 dimentional array
|
||||
|
||||
analysis.basic_stats(data, "row", rownum) , where data is a 2 dimentional array and "rownum" is the row to run statistics on
|
||||
|
||||
analysis.basic_stats(data, "column", columnnum) , where data is a 2 dimentional array and "columnnum" is the column to run statistics on
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
z_score{
|
||||
|
||||
returns the z score of a point relative to the population mean and standard deviation
|
||||
|
||||
usage{
|
||||
|
||||
analysis.z_score(datapoint, mean, stdev) , where "datapoint" is the specific data point to assign a z score, mean is the mean of the entire data set, and stdev is the standard deviation of the data set
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
z_normalize{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
stdev_z_split{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
histo_analysis{
|
||||
|
||||
returns an analysis of historical data, the analysis predicts a range of possible next data poins given historical data
|
||||
|
||||
usage{
|
||||
|
||||
analysis.histo_analysis(data, delta, low, high) , where data is the historical data to be predicted, delta are the steps (in standard deviations) that the predictor uses, and the low and high bounds are the ranges of standard deviations that the function predicts within
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
poly_regression{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
log_regression{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
exp_regression{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
tanh_regression{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
r_squared{
|
||||
|
||||
used in other functions
|
||||
|
||||
returns the r^2 score of a curve and corresponding data
|
||||
|
||||
}
|
||||
|
||||
rms{
|
||||
|
||||
used in other functions
|
||||
|
||||
returns the root mean squared score of a curve and corresponding data
|
||||
|
||||
}
|
||||
|
||||
calc_overfit{
|
||||
|
||||
used in other functions, not important
|
||||
|
||||
}
|
||||
|
||||
optimize_regression{
|
||||
|
||||
returns a list of possible regressions given the x and y coordinates of the data
|
||||
|
||||
usage{
|
||||
|
||||
analysis.optimize_regression(x, y, range, resolution) , where x and y are the x and y values of each data point, range is the range of polynomial equations tried, and resolution is the detail of bases used for exponential and logorithmic regressions
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
select_best_regression{
|
||||
|
||||
takes a list of equations and returns the best equation, either based on minimizing overfit or based on maximizing root mean squareds
|
||||
|
||||
}
|
||||
|
||||
p_value{
|
||||
|
||||
returns the p value of two data sets
|
||||
|
||||
}
|
||||
|
||||
basic_analysis{
|
||||
|
||||
runs every stat on a given file
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "titanscoutandroid",
|
||||
"private_key_id": "e7cde706a13a6fade555cce2bc46ee53f05a0b11",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\(gottem)/B\ntvFNxi7l6IsHUa+ijrDKGP3O2jbQCWjfBS0gNxpx65JXdKw4l+5p1lIyO5xe5b2m\nKOGQQf9Vd3X6xP9ttHD9ILjvdDRGvtR/bkD3e1ZdFSvt1PDddcLLPnIeDgkNQHXd\nzAYv0TIspJe6bUL3a5+HGK7nyfH7dGXZksNB/hiy3WS/eAgAnL6xzCRsdjK40Cf4\nP7B79bCNNnxnOy/GBpXG/CE8H+xGRr1Xuj5pmJFTc6GbaDbLc8bKMvVOzbCPYKgu\nbCaidtDoiMEJqy8AakrvN39DrlUOT3+kbAhJpw/fk9Rq4A2Mo+J2BuApze2hoYET\noI5HysuLAgMBAAECggEAGYkXgTTrxFmKLUC1+yFI3YO6yaIxrH4bdEStgF6Rq784\nWX+SZCjBKAYC5BrDOrp66/pavEJDo2Oi3WU9su2OqTu3nRJyD+2Uplan//3VnH+p\nOg06XVtGMQxoKghIcvRtj03z4K2CeQsGYXs/juIF4MOUCmMMezbVpkrn0CvyMZGM\n5vrFXvOwdKHyZaDXvql8nQIq6b46RC6ozLUBidEW37pHvuZm+QWD0W7VS2na4DKw\n+jIJz8zjsg3vCLpdTOMFxymW/LmusFTubn3evv4/8BLvw69seWPOyNi/PEjZWwgR\npQA7VYkETlZopZ6paHutmD5Yy4N0FjcJ6PMocwgKQQKBgQDnf6oFvZFV/QO1RACi\nhc0skytc7h96ePUWLwIMSMed3Jdr5ANC6tD4OIwGyrCDfKuLvsUCyEjHKhW8tarb\nTioaqgzM8Jwn+HMTyLJjzU4j8KhxgQWoLWri2HgRlqZV2Y1XNO3fRA8Zs3CsT7Fa\nIyEnKylWM6u0kQ2mMQicgQpulQKBgQC/BjSELv43ZGZKBg5m+Ps+PEFxJArvJgqA\nd+lXSHYkALWynyvukAuhmciAEKN1NKL7/DvxzfNRRXB32kmQkcjcsFZnnqbEkpq+\nzCOIJcesYN0k3kiCJuoNENdQXtAKGJrtHF1ilJfpt5Yuw67VC/B/JwkPF2wCsSfU\nHusyguFpnwKBgGKzVaRY7KxC0d/o/HROo+nLXYOjqxwmkihBJphiN2mg8ZZ4gsN3\nJl2OjnUe2h9VejZ8wbar+gugb+AjfJNAQkdYFVkThSCtlzLqMNTIZfaA1vB92BGa\nO6Y4MQkeuBCGTvLNiFXWyLFmhjWRTMZnj+0JQ/iS0zSLW8xtv4QqqG35AoGBAIee\n3zAtsP0gweKyNA11neLMouWx4jVx+6jD+Z2na4EaI+YiTe18xVVBOnF53qM68LAY\nn3KIdsRvmW7uQqZqaoIMi/vbTqlnMIhfpKZntEC1MKyZSD9nY2pNV6DO/8L7Pxsy\ntTZlKwma9vxSn9DQPjn4O91EEsJChnV6Uh+1flYfAoGADfomBP+kLm0jdvKm3Q+u\nA5S4ng3erDbCbZK0ADeVY5H0fNNJihx1yXx12g02T0biH6Efj+VpCeYC6W0wb2A1\nT/HqY1JSSsKQ7cPe1VEPKbbfn6PPrs+HbsHB8DDVPi9pysVfG7351PgNX/tb+iz/\nvJCSRvjRtxyFafuX4YQzWu0=\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "firebase-adminsdk-wpsvx@titanscoutandroid.iam.gserviceaccount.com",
|
||||
"client_id": "114864465329268712237",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/firebase-adminsdk-wpsvx%40titanscoutandroid.iam.gserviceaccount.com"
|
||||
}
|
@@ -1,132 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import firebase_admin\n",
|
||||
"from firebase_admin import credentials\n",
|
||||
"from firebase_admin import firestore\n",
|
||||
"import csv\n",
|
||||
"import numpy as np\n",
|
||||
"# Use a service account\n",
|
||||
"cred = credentials.Certificate(r'../keys/fsk.json')\n",
|
||||
"#add your own key as this is public. email me for details\n",
|
||||
"firebase_admin.initialize_app(cred)\n",
|
||||
"\n",
|
||||
"db = firestore.client()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"teams=db.collection('data').document('team-2022').collection(\"Midwest 2019\").get()\n",
|
||||
"full=[]\n",
|
||||
"tms=[]\n",
|
||||
"for team in teams:\n",
|
||||
" data=[]\n",
|
||||
" tms.append(team.id)\n",
|
||||
" reports=db.collection('data').document('team-2022').collection(\"Midwest 2019\").document(team.id).collection(\"matches\").get()\n",
|
||||
" for report in reports:\n",
|
||||
" data.append(db.collection('data').document('team-2022').collection(\"Midwest 2019\").document(team.id).collection(\"matches\").document(report.id).get().to_dict())\n",
|
||||
" full.append(data)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def expcsv(loc,data):\n",
|
||||
" with open(loc+'.csv', 'w', newline='', encoding='utf-8') as csvfile:\n",
|
||||
" w = csv.writer(csvfile, delimiter=',', quotechar=\"\\\"\", quoting=csv.QUOTE_MINIMAL)\n",
|
||||
" for i in data:\n",
|
||||
" w.writerow(i)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def keymatch(ld):\n",
|
||||
" keys=set([])\n",
|
||||
" for i in ld:\n",
|
||||
" for j in i.keys():\n",
|
||||
" keys.add(j)\n",
|
||||
" kl=list(keys)\n",
|
||||
" data=[]\n",
|
||||
" for i in kl:\n",
|
||||
" data.append([i])\n",
|
||||
" for i in kl:\n",
|
||||
" for j in ld:\n",
|
||||
" try:\n",
|
||||
" (data[kl.index(i)]).append(j[i])\n",
|
||||
" except:\n",
|
||||
" (data[kl.index(i)]).append(\"\")\n",
|
||||
" return data\n",
|
||||
"wn=[]\n",
|
||||
"for i in full:\n",
|
||||
" wn.append(np.transpose(np.array(keymatch(i))).tolist())\n",
|
||||
"for i in range(len(wn)):\n",
|
||||
" expcsv(tms[i],wn[i])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@@ -1,191 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import operator\n",
|
||||
"import csv\n",
|
||||
"#constants\n",
|
||||
"k=100\n",
|
||||
"rdf=400\n",
|
||||
"\n",
|
||||
"def win_prob(yas,oas):\n",
|
||||
" return 1/(1+10**(1/rdf*(oas-yas)))\n",
|
||||
"def new_score(oscore,yas,oas,outcome):\n",
|
||||
" return (oscore)+k*(outcome-win_prob(yas,oas))\n",
|
||||
"\n",
|
||||
"def readFile(filepath):\n",
|
||||
"\n",
|
||||
" with open(filepath) as csvfile:\n",
|
||||
" lines = csv.reader(csvfile, delimiter=',', quotechar='|')\n",
|
||||
" data = []\n",
|
||||
" try:\n",
|
||||
" for row in lines:\n",
|
||||
" data.append((', '.join(row)).split(\", \"))\n",
|
||||
" except:\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
" return data\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sb=readFile('scoreboard.csv')\n",
|
||||
"teams=set([])\n",
|
||||
"for i in sb:\n",
|
||||
" teams.add(i[2])\n",
|
||||
" teams.add(i[3])\n",
|
||||
" teams.add(i[4])\n",
|
||||
" teams.add(i[5])\n",
|
||||
" teams.add(i[6])\n",
|
||||
" teams.add(i[7])\n",
|
||||
"list(teams)\n",
|
||||
"tsd={}\n",
|
||||
"for i in list(teams):\n",
|
||||
" tsd[i]=500\n",
|
||||
"for i in sb:\n",
|
||||
" ras=tsd[i[2]]+tsd[i[3]]+tsd[i[4]]\n",
|
||||
" bas=tsd[i[5]]+tsd[i[6]]+tsd[i[7]]\n",
|
||||
" outcome=0\n",
|
||||
" if i[8]>i[9]:\n",
|
||||
" outcome=1\n",
|
||||
" elif i[9]==i[8]:\n",
|
||||
" outcome=.5\n",
|
||||
" for j in range(2,5,1):\n",
|
||||
" tsd[i[j]]=new_score(tsd[i[j]],ras,bas,outcome)\n",
|
||||
" for j in range(5,8,1):\n",
|
||||
" tsd[i[j]]=new_score(tsd[i[j]],bas,ras,1-outcome)\n",
|
||||
" \n",
|
||||
"rankinfs = sorted(tsd.items(), key=operator.itemgetter(1), reverse=True) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('5934', 833.1830859510761),\n",
|
||||
" ('48', 739.9728094005745),\n",
|
||||
" ('16', 705.9551102513088),\n",
|
||||
" ('3061', 702.9024075826381),\n",
|
||||
" ('3695', 700.1366129603175),\n",
|
||||
" ('2338', 696.932652524603),\n",
|
||||
" ('4096', 652.7038522070818),\n",
|
||||
" ('3488', 648.9766694246662),\n",
|
||||
" ('4156', 638.0881039843185),\n",
|
||||
" ('101', 626.9019952260375),\n",
|
||||
" ('6823', 613.1453027540894),\n",
|
||||
" ('930', 610.7992869961017),\n",
|
||||
" ('2062', 608.0647276785079),\n",
|
||||
" ('2830', 600.0239706519325),\n",
|
||||
" ('5847', 589.0350788865741),\n",
|
||||
" ('1736', 584.367394696335),\n",
|
||||
" ('2358', 577.5524744241919),\n",
|
||||
" ('5822', 575.4792058357157),\n",
|
||||
" ('1675', 569.9944280943398),\n",
|
||||
" ('111', 559.5150813478114),\n",
|
||||
" ('1797', 537.9429025884093),\n",
|
||||
" ('5148', 533.9623603303631),\n",
|
||||
" ('1781', 519.5609268991466),\n",
|
||||
" ('6651', 516.3195829730869),\n",
|
||||
" ('6906', 501.7408783344565),\n",
|
||||
" ('2022', 482.2765218696747),\n",
|
||||
" ('7237', 474.4616019824547),\n",
|
||||
" ('1884', 468.87487164611116),\n",
|
||||
" ('2039', 467.0990375388428),\n",
|
||||
" ('2451', 462.70812165138807),\n",
|
||||
" ('7608', 462.0188420364676),\n",
|
||||
" ('1739', 459.00590084129664),\n",
|
||||
" ('2252', 456.43201385653043),\n",
|
||||
" ('2151', 439.4118535382677),\n",
|
||||
" ('4702', 435.5729578944645),\n",
|
||||
" ('7738', 423.16353418538296),\n",
|
||||
" ('4296', 420.5085609998351),\n",
|
||||
" ('3734', 418.47615429198186),\n",
|
||||
" ('7609', 409.29347746836567),\n",
|
||||
" ('2709', 403.9793052336144),\n",
|
||||
" ('3067', 402.77020998279653),\n",
|
||||
" ('2136', 386.0798688817299),\n",
|
||||
" ('5350', 383.4109800245315),\n",
|
||||
" ('5125', 377.1609505922246),\n",
|
||||
" ('4292', 357.43188113820975),\n",
|
||||
" ('3110', 344.8643460008074),\n",
|
||||
" ('2725', 332.21429556184444),\n",
|
||||
" ('4645', 329.6452389079341),\n",
|
||||
" ('6968', 329.08368400289095),\n",
|
||||
" ('4241', 315.12115012426335),\n",
|
||||
" ('4787', 288.64374620808815),\n",
|
||||
" ('7560', 279.7779164676232),\n",
|
||||
" ('2016', 247.25607506869346)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"rankinfs"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.5"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
@@ -1,57 +0,0 @@
|
||||
Match,Time,Red1,Red2,Red3,Blue1,Blue2,Blue3,RedScore,BlueScore
|
||||
Qualification 1 ,Fri 3/8 - 9:00 AM,7560,3061,3488,1675,2451,5148,37,26
|
||||
Qualification 2 ,Fri 3/8 - 9:09 AM,7608,1884,2338,4702,2151,111,23,33
|
||||
Qualification 3 ,Fri 3/8 - 9:18 AM,2830,7609,101,2039,4292,16,45,35
|
||||
Qualification 4 ,Fri 3/8 - 9:27 AM,6968,1736,4096,4645,3067,2252,40,20
|
||||
Qualification 5 ,Fri 3/8 - 9:36 AM,4156,2016,3695,2022,3110,3734,46,29
|
||||
Qualification 6 ,Fri 3/8 - 9:45 AM,5934,5822,1739,7738,2062,4296,38,33
|
||||
Qualification 7 ,Fri 3/8 - 9:54 AM,930,5125,4787,7237,6906,1781,37,41
|
||||
Qualification 8 ,Fri 3/8 - 10:03 AM,2725,5847,5350,2709,1797,4241,25,24
|
||||
Qualification 9 ,Fri 3/8 - 10:12 AM,2358,6823,2136,48,6651,3734,17,40
|
||||
Qualification 10 ,Fri 3/8 - 10:21 AM,3067,7608,5148,2062,4292,5822,15,39
|
||||
Qualification 11 ,Fri 3/8 - 10:30 AM,111,6968,4787,16,2022,1675,41,63
|
||||
Qualification 12 ,Fri 3/8 - 10:39 AM,4702,7738,2830,6906,2725,2016,52,24
|
||||
Qualification 13 ,Fri 3/8 - 10:48 AM,2709,2358,7609,101,930,6823,16,42
|
||||
Qualification 14 ,Fri 3/8 - 10:56 AM,2136,5934,3695,1736,7237,2151,45,25
|
||||
Qualification 15 ,Fri 3/8 - 11:04 AM,3110,1781,2252,1797,2338,3488,35,65
|
||||
Qualification 16 ,Fri 3/8 - 11:12 AM,48,4156,4241,4296,1884,3061,48,34
|
||||
Qualification 17 ,Fri 3/8 - 11:20 AM,2039,6651,5125,4096,7560,5350,31,23
|
||||
Qualification 18 ,Fri 3/8 - 11:28 AM,5847,2451,16,4645,1739,7237,62,15
|
||||
Qualification 19 ,Fri 3/8 - 11:36 AM,3734,3067,1797,7609,5148,5934,18,31
|
||||
Qualification 20 ,Fri 3/8 - 11:44 AM,5822,2725,4241,2338,4156,930,20,55
|
||||
Qualification 21 ,Fri 3/8 - 11:52 AM,6968,2016,2709,7608,2151,6823,12,14
|
||||
Qualification 22,Fri 3/8 - 1:00 PM,1736,7560,1739,4292,5350,48,43,58
|
||||
Qualification 23,Fri 3/8 - 1:09 PM,2062,1781,2022,2451,4096,6651,35,45
|
||||
Qualification 24,Fri 3/8 - 1:18 PM,111,4296,3488,4787,2136,2039,49,27
|
||||
Qualification 25,Fri 3/8 - 1:27 PM,101,3061,5847,2252,2830,6906,53,40
|
||||
Qualification 26,Fri 3/8 - 1:36 PM,1675,4645,4702,3695,3110,7738,15,71
|
||||
Qualification 27,Fri 3/8 - 1:44 PM,1736,1884,2358,2016,5125,7560,25,23
|
||||
Qualification 28,Fri 3/8 - 1:52 PM,4156,2725,6651,3488,7237,3067,42,39
|
||||
Qualification 29,Fri 3/8 - 2:00 PM,3734,5350,2151,6906,2062,101,18,36
|
||||
Qualification 30,Fri 3/8 - 2:08 PM,5847,7738,6823,2338,111,4096,54,58
|
||||
Qualification 31,Fri 3/8 - 2:16 PM,2709,48,4702,5934,2039,2252,20,49
|
||||
Qualification 32,Fri 3/8 - 2:24 PM,1884,930,2830,1797,1675,6968,61,49
|
||||
Qualification 33,Fri 3/8 - 2:32 PM,7609,1739,3695,5148,4241,4787,85,54
|
||||
Qualification 34,Fri 3/8 - 2:40 PM,5125,4645,2022,3061,2136,4292,37,39
|
||||
Qualification 35,Fri 3/8 - 2:48 PM,2451,2358,7608,4296,16,3110,37,18
|
||||
Qualification 36,Fri 3/8 - 2:56 PM,1781,2039,3734,5822,7237,5847,30,61
|
||||
Qualification 37,Fri 3/8 - 3:04 PM,3488,5350,930,1884,3695,111,52,54
|
||||
Qualification 38,Fri 3/8 - 3:12 PM,2016,5934,2338,7609,7560,4156,66,24
|
||||
Qualification 39,Fri 3/8 - 3:20 PM,2252,6651,2136,4787,7608,1739,27,23
|
||||
Qualification 40,Fri 3/8 - 3:28 PM,4096,4702,5148,2358,4241,101,37,28
|
||||
Qualification 41,Fri 3/8 - 3:36 PM,3110,5822,2451,48,6968,6906,42,68
|
||||
Qualification 42,Fri 3/8 - 3:44 PM,16,1736,1781,7738,3061,2725,56,43
|
||||
Qualification 43,Fri 3/8 - 3:52 PM,1797,5125,4292,6823,2709,2062,32,42
|
||||
Qualification 44,Fri 3/8 - 4:00 PM,2022,4296,3067,2151,2830,1675,26,31
|
||||
Qualification 45,Fri 3/8 - 4:08 PM,4645,48,5847,5148,3488,2016,63,48
|
||||
Qualification 46,Fri 3/8 - 4:16 PM,3110,4096,930,3061,4787,5934,42,56
|
||||
Qualification 47,Fri 3/8 - 4:24 PM,2725,6823,2451,7608,3695,2039,29,57
|
||||
Qualification 48,Fri 3/8 - 4:32 PM,2062,1675,4156,101,4702,2136,40,31
|
||||
Qualification 49,Fri 3/8 - 4:40 PM,2022,7738,7237,5350,2252,7609,51,37
|
||||
Qualification 50,Fri 3/8 - 4:48 PM,7560,4296,2151,1781,1797,4645,21,39
|
||||
Qualification 51,Fri 3/8 - 4:56 PM,2338,1736,5822,2830,2709,6651,68,37
|
||||
Qualification 52,Fri 3/8 - 5:04 PM,6906,1739,2358,4292,6968,1884,33,29
|
||||
Qualification 53,Fri 3/8 - 5:12 PM,111,16,3067,4241,3734,5125,65,41
|
||||
Qualification 54,Fri 3/8 - 5:20 PM,3061,1675,48,7609,5847,7608,65,42
|
||||
Qualification 55,Fri 3/8 - 5:28 PM,6651,2016,2062,930,2252,4296,43,77
|
||||
Qualification 56,Fri 3/8 - 5:36 PM,4292,5148,2725,2151,4787,3110,19,3
|
|
@@ -1,4 +0,0 @@
|
||||
gmkR7hN4D1fQguey5X5V48d3PhO2,sandstormRocketCargoFailure,teleOpRocketCargoSuccess,sandstormRocketHatchSuccess,strongMediumTeleop,teleOpCargoShipHatchFailure,sandstormRocketCargoSuccess,size,speed,strategy,teamDBRef,sandstormCross,sandstormCrossBonus,teleOpCargoShipHatchSuccess,teleOpRocketHatchSuccess,sandstormCargoShipCargoFailure,lowRocketSuccessTeleop,teleOpRocketHatchFailure,teleOpRocketCargoFailure,cargoSuccessTeleop,sandstormCargoShipHatchSuccess,startingHatch,9fv7QhcLPsfU59sRrPq7LcJlD8J3,fillChoice,sandstormCargoShipCargoSuccess,strongMedium,functional,teleOpCargoShipCargoFailure,sandstormRocketHatchFailure,sandstormCargoShipHatchFailure,fillChoiceTeleop,contrubution,HABClimb,match,hiRocketSuccessTeleop,endingHab,teleOpCargoShipCargoSuccess
|
||||
,0,0,0,,1,0,,slow,,team-101,L2,,1,2,0,,0,0,,0,,"{'notes': 'not good with balls, was able to reach high with hatch', 'contribution': '', 'fillChoice': 'Cargo', 'strategy': 'place hatches, shoe off long arm. not defensive', 'strongMedium': 'Hatch', 'cargoSuccessTeleop': '', 'teamDBRef': 'team-101', 'speed': 'Medium', 'hiRocketSuccessTeleop': 'High', 'size': '', 'match': 'match-13', 'fillChoiceTeleop': 'High Rocket', 'strongMediumTeleop': 'Hatch', 'endingHab': '', 'functional': 'Yes', 'lowRocketSuccessTeleop': '', 'startingHatch': 'Hab I'}",,0,,,0,0,0,,,None,match-13,,,0
|
||||
,,,,Neither,,,Medium,Medium,"Cargo, but couldn't pick up",team-101,L1,L1,,,,High,,,Mid,,Hab I,,Cargo,,Hatch,Yes,,,,Low Rocket,Weak,,match-25,N/A,L1,
|
||||
"{'notes': '', 'contribution': 'Great', 'fillChoice': '', 'strategy': 'quick bottom hatches but still slow', 'strongMedium': 'Hatch', 'cargoSuccessTeleop': 'High', 'teamDBRef': 'team-101', 'speed': 'Medium', 'hiRocketSuccessTeleop': '', 'size': 'Medium', 'match': 'match-29', 'fillChoiceTeleop': 'Low Rocket', 'strongMediumTeleop': 'Both', 'endingHab': 'None', 'functional': 'No', 'lowRocketSuccessTeleop': 'Mid', 'startingHatch': 'Hab I'}",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
|
|