mirror of
https://github.com/xbgmsharp/postgsail.git
synced 2025-09-17 03:07:47 +00:00
Compare commits
246 Commits
v0.7.1
...
ee44420e30
Author | SHA1 | Date | |
---|---|---|---|
![]() |
ee44420e30 | ||
![]() |
5f25a57c3c | ||
![]() |
883c875e39 | ||
![]() |
0282823938 | ||
![]() |
4546b75e0d | ||
![]() |
0c28ed6a0f | ||
![]() |
57a754cdc0 | ||
![]() |
46f16fb077 | ||
![]() |
4c80d041cc | ||
![]() |
12e4baf662 | ||
![]() |
faf62ed9a3 | ||
![]() |
40bdb9620f | ||
![]() |
8fe84ea80c | ||
![]() |
54eefe582d | ||
![]() |
d60af8c7b0 | ||
![]() |
b505c98723 | ||
![]() |
976ea85538 | ||
![]() |
347af573c2 | ||
![]() |
60ba821af7 | ||
![]() |
9759045b0a | ||
![]() |
a6c351c936 | ||
![]() |
b7efb9636f | ||
![]() |
14d19a5394 | ||
![]() |
66b61f9d65 | ||
![]() |
b50c8f5007 | ||
![]() |
a9e1990184 | ||
![]() |
59b52515e3 | ||
![]() |
f528456c08 | ||
![]() |
a76c25b19f | ||
![]() |
00d2247549 | ||
![]() |
b84ac31da1 | ||
![]() |
63cf5d24a5 | ||
![]() |
fe7c1dc1e5 | ||
![]() |
10a26942c7 | ||
![]() |
a5fb08fa42 | ||
![]() |
25c74fd75a | ||
![]() |
fa45782553 | ||
![]() |
e237391e8a | ||
![]() |
dcf4eaca9b | ||
![]() |
ade15f538d | ||
![]() |
f2cf604dab | ||
![]() |
ad2e95bfa8 | ||
![]() |
02130a9e4f | ||
![]() |
29fa3863eb | ||
![]() |
7cd06fced4 | ||
![]() |
564b85f58c | ||
![]() |
da317dce87 | ||
![]() |
08ee757fa5 | ||
![]() |
76ade18d6b | ||
![]() |
e6852a43f1 | ||
![]() |
13f8240838 | ||
![]() |
b7fe6a27b2 | ||
![]() |
29cc40f6de | ||
![]() |
861e61d378 | ||
![]() |
684f34644f | ||
![]() |
6ad9980cd2 | ||
![]() |
7f5974efe2 | ||
![]() |
f4b65d3156 | ||
![]() |
d8ef8b8958 | ||
![]() |
686ac7498b | ||
![]() |
d4c4347a4c | ||
![]() |
4aacae3913 | ||
![]() |
06fd834441 | ||
![]() |
86bd4b5843 | ||
![]() |
111d7d36db | ||
![]() |
b6fef6358a | ||
![]() |
4aecea7532 | ||
![]() |
c8908748f7 | ||
![]() |
0e812c0939 | ||
![]() |
395b7cfad7 | ||
![]() |
14e8c8363c | ||
![]() |
448124f01b | ||
![]() |
3b466e3d93 | ||
![]() |
f0ddca7d58 | ||
![]() |
7744ad4af9 | ||
![]() |
5c70a9a453 | ||
![]() |
6635015dbf | ||
![]() |
f285fcddb0 | ||
![]() |
cabf405648 | ||
![]() |
b2f3372b26 | ||
![]() |
754c9bb6e7 | ||
![]() |
f9238c62dd | ||
![]() |
4a294674e8 | ||
![]() |
83e92cfd6c | ||
![]() |
718ca6d6ea | ||
![]() |
a07f4f181c | ||
![]() |
72b06f9eb9 | ||
![]() |
598a789d36 | ||
![]() |
37e948cb20 | ||
![]() |
f26ece878b | ||
![]() |
9f8b43577e | ||
![]() |
0c76edf793 | ||
![]() |
0cac828347 | ||
![]() |
9e9189ac36 | ||
![]() |
5409f1eec9 | ||
![]() |
e5491ae0c9 | ||
![]() |
1b57641e7d | ||
![]() |
aa7608e07e | ||
![]() |
9575eba043 | ||
![]() |
aa7450271d | ||
![]() |
893a7fc46f | ||
![]() |
3a035f3519 | ||
![]() |
1355629c4e | ||
![]() |
0b8a9950e8 | ||
![]() |
1c1cf70ae2 | ||
![]() |
f7cf07ca99 | ||
![]() |
00056ec4f0 | ||
![]() |
717a85c3ec | ||
![]() |
609fb0a05d | ||
![]() |
22a6b7eb65 | ||
![]() |
561c695f32 | ||
![]() |
c51059a431 | ||
![]() |
27081c32f7 | ||
![]() |
c84cfb9547 | ||
![]() |
9be725fa24 | ||
![]() |
40675a467e | ||
![]() |
f90356c2a7 | ||
![]() |
5f89f63223 | ||
![]() |
8a9abf5340 | ||
![]() |
fbf6047b46 | ||
![]() |
7b17bbcae1 | ||
![]() |
65455c93af | ||
![]() |
48bba3eb99 | ||
![]() |
0c0071236e | ||
![]() |
23d3586a2c | ||
![]() |
07a89d1fb8 | ||
![]() |
2a4b5dbb43 | ||
![]() |
306b942b42 | ||
![]() |
59f812c1e1 | ||
![]() |
798be66c07 | ||
![]() |
ac21b0219c | ||
![]() |
05aa73890a | ||
![]() |
48d19f656a | ||
![]() |
3bbb57e29e | ||
![]() |
bfc0b3756b | ||
![]() |
4936e37f8c | ||
![]() |
9071643aa3 | ||
![]() |
590927481e | ||
![]() |
788d811b15 | ||
![]() |
f72d6b9859 | ||
![]() |
3e30709675 | ||
![]() |
60e0097540 | ||
![]() |
5d21cb2e44 | ||
![]() |
ea89c934ee | ||
![]() |
20e1b6ad73 | ||
![]() |
79e195c24b | ||
![]() |
156d64d936 | ||
![]() |
c3dccf94de | ||
![]() |
3038821353 | ||
![]() |
051408a307 | ||
![]() |
2f7439d704 | ||
![]() |
c792bf81d9 | ||
![]() |
5551376ce2 | ||
![]() |
069ac31ca0 | ||
![]() |
d10b0cf501 | ||
![]() |
5dda28db51 | ||
![]() |
0a80f2e35e | ||
![]() |
dc79ca2f28 | ||
![]() |
fe950b2d2a | ||
![]() |
029e0b3fb6 | ||
![]() |
62854a95e0 | ||
![]() |
e301e6fedd | ||
![]() |
57cf87fbe9 | ||
![]() |
3a43e57b3c | ||
![]() |
95d283b2ac | ||
![]() |
18aba507e9 | ||
![]() |
6045ff46c0 | ||
![]() |
6e367a0e4c | ||
![]() |
eec149d411 | ||
![]() |
de2f9c94e8 | ||
![]() |
d65a0b0a54 | ||
![]() |
59c5142909 | ||
![]() |
e2fe23e58d | ||
![]() |
eedf5881d9 | ||
![]() |
3327c5a813 | ||
![]() |
e857440133 | ||
![]() |
0a13e0a798 | ||
![]() |
8fe0513c3c | ||
![]() |
b69a52eacd | ||
![]() |
129ee7dcbd | ||
![]() |
9800d83463 | ||
![]() |
b80799aa2f | ||
![]() |
84f73f2281 | ||
![]() |
b3d5b80731 | ||
![]() |
5cefe47fea | ||
![]() |
98f45ace66 | ||
![]() |
ff69d6ee7a | ||
![]() |
52dcdb61fd | ||
![]() |
6bbe9795fb | ||
![]() |
23ea9dfee7 | ||
![]() |
82a0f19566 | ||
![]() |
a7019445e3 | ||
![]() |
f97635cae1 | ||
![]() |
fe93b9ec11 | ||
![]() |
b5be201e65 | ||
![]() |
33b97898d8 | ||
![]() |
c726f6cc07 | ||
![]() |
48647e5978 | ||
![]() |
1702b825c7 | ||
![]() |
3c4f68218f | ||
![]() |
42e85cc498 | ||
![]() |
bd9b207d43 | ||
![]() |
c588fc676c | ||
![]() |
e003985d6c | ||
![]() |
0cf9fa701d | ||
![]() |
23d2ced60c | ||
![]() |
a1a5f29c16 | ||
![]() |
5fe9a37eee | ||
![]() |
2064947457 | ||
![]() |
30b1b8f0a6 | ||
![]() |
4dcd8d2ea5 | ||
![]() |
670aa2e43a | ||
![]() |
1a8790f2a0 | ||
![]() |
6d97bb1e32 | ||
![]() |
779cee21ec | ||
![]() |
e48192e609 | ||
![]() |
f2beead3a7 | ||
![]() |
5df3e1dbd3 | ||
![]() |
ed555c1f32 | ||
![]() |
02dd68f2d8 | ||
![]() |
d9329705ba | ||
![]() |
54af136682 | ||
![]() |
6f96a070b8 | ||
![]() |
e79086c4a2 | ||
![]() |
ed417a4c5d | ||
![]() |
546274ce29 | ||
![]() |
d5b6072273 | ||
![]() |
e8addd2e9c | ||
![]() |
f843a6a1f3 | ||
![]() |
478bbf5529 | ||
![]() |
c9523e2f6f | ||
![]() |
5fa85821de | ||
![]() |
7ccef80904 | ||
![]() |
04cc7de245 | ||
![]() |
a75ba105df | ||
![]() |
7eddeefa47 | ||
![]() |
314cdc71c7 | ||
![]() |
d508ac1662 | ||
![]() |
6f9956ee46 | ||
![]() |
0b854374ff | ||
![]() |
9b647c9a49 | ||
![]() |
800f0c83e3 | ||
![]() |
7424fbbe49 | ||
![]() |
e183530435 | ||
![]() |
4444f73919 | ||
![]() |
241c70fcb5 |
@@ -6,17 +6,23 @@ POSTGRES_DB=postgres
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD=password
|
||||
PGSAIL_GRAFANA_PASSWORD=password
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD=password
|
||||
# SMTP server settings
|
||||
PGSAIL_EMAIL_FROM=root@localhost
|
||||
PGSAIL_EMAIL_SERVER=localhost
|
||||
#PGSAIL_EMAIL_USER= Comment if not use
|
||||
#PGSAIL_EMAIL_PASS= Comment if not use
|
||||
# Pushover settings
|
||||
#PGSAIL_PUSHOVER_APP_TOKEN= Comment if not use
|
||||
#PGSAIL_PUSHOVER_APP_URL= Comment if not use
|
||||
# TELEGRAM BOT, ask BotFather
|
||||
#PGSAIL_TELEGRAM_BOT_TOKEN= Comment if not use
|
||||
# webapp entrypoint, typically the public DNS or IP
|
||||
PGSAIL_APP_URL=http://localhost:8080
|
||||
# API entrypoint from the webapp, typically the public DNS or IP
|
||||
PGSAIL_API_URL=http://localhost:3000
|
||||
# POSTGREST ENV Settings
|
||||
PGRST_DB_URI=postgres://authenticator:${PGSAIL_AUTHENTICATOR_PASSWORD}@db:5432/signalk
|
||||
# % cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1
|
||||
PGRST_JWT_SECRET=_at_least_32__char__long__random
|
||||
# Grafana ENV Settings
|
||||
GF_SECURITY_ADMIN_PASSWORD=password
|
||||
|
14
.github/FUNDING.yml
vendored
Normal file
14
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [xbgmsharp]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
polar: # Replace with a single Polar username
|
||||
buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
12
.github/workflows/db-lint.yml
vendored
12
.github/workflows/db-lint.yml
vendored
@@ -27,7 +27,7 @@ jobs:
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Pull Docker images
|
||||
run: docker-compose pull db api
|
||||
run: docker compose pull db api
|
||||
|
||||
- name: Run PostgSail Database & schemalint
|
||||
# Environment variables
|
||||
@@ -41,10 +41,10 @@ jobs:
|
||||
run: |
|
||||
set -eu
|
||||
source .env
|
||||
docker-compose stop || true
|
||||
docker-compose rm || true
|
||||
docker-compose up -d db && sleep 30 && docker-compose up -d api && sleep 5
|
||||
docker-compose ps -a
|
||||
docker compose stop || true
|
||||
docker compose rm || true
|
||||
docker compose up -d db && sleep 30 && docker compose up -d api && sleep 5
|
||||
docker compose ps -a
|
||||
echo ${PGSAIL_API_URL}
|
||||
curl ${PGSAIL_API_URL}
|
||||
npm i -D schemalint
|
||||
@@ -52,4 +52,4 @@ jobs:
|
||||
- name: Show the logs
|
||||
if: always()
|
||||
run: |
|
||||
docker-compose logs
|
||||
docker compose logs
|
14
.github/workflows/db-test.yml
vendored
14
.github/workflows/db-test.yml
vendored
@@ -29,10 +29,10 @@ jobs:
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Pull Docker images
|
||||
run: docker-compose pull db api
|
||||
run: docker compose pull db api
|
||||
|
||||
- name: Build Docker images
|
||||
run: docker-compose -f docker-compose.dev.yml -f docker-compose.yml build tests
|
||||
run: docker compose -f docker-compose.dev.yml -f docker-compose.yml build tests
|
||||
|
||||
- name: Install psql
|
||||
run: sudo apt install postgresql-client
|
||||
@@ -49,10 +49,10 @@ jobs:
|
||||
run: |
|
||||
set -eu
|
||||
source .env
|
||||
docker-compose stop || true
|
||||
docker-compose rm || true
|
||||
docker-compose up -d db && sleep 30 && docker-compose up -d api && sleep 5
|
||||
docker-compose ps -a
|
||||
docker compose stop || true
|
||||
docker compose rm || true
|
||||
docker compose up -d db && sleep 30 && docker compose up -d api && sleep 5
|
||||
docker compose ps -a
|
||||
echo ${PGSAIL_API_URL}
|
||||
curl ${PGSAIL_API_URL}
|
||||
psql -c "select 1"
|
||||
@@ -70,4 +70,4 @@ jobs:
|
||||
- name: Show the logs
|
||||
if: always()
|
||||
run: |
|
||||
docker-compose logs
|
||||
docker compose logs
|
10
.github/workflows/frontend-test.yml
vendored
10
.github/workflows/frontend-test.yml
vendored
@@ -49,10 +49,10 @@ jobs:
|
||||
run: |
|
||||
set -eu
|
||||
source .env
|
||||
docker-compose stop || true
|
||||
docker-compose rm || true
|
||||
docker-compose up -d db && sleep 30 && docker-compose up -d api && sleep 5
|
||||
docker-compose ps -a
|
||||
docker compose stop || true
|
||||
docker compose rm || true
|
||||
docker compose up -d db && sleep 30 && docker compose up -d api && sleep 5
|
||||
docker compose ps -a
|
||||
echo "Test PostgSail Web Unit Test"
|
||||
docker compose -f docker-compose.dev.yml -f docker-compose.yml up -d web_dev && sleep 100
|
||||
docker compose -f docker-compose.dev.yml -f docker-compose.yml logs web_dev
|
||||
@@ -67,4 +67,4 @@ jobs:
|
||||
- name: Show the logs
|
||||
if: always()
|
||||
run: |
|
||||
docker-compose logs
|
||||
docker compose logs
|
17
.github/workflows/grafana-test.yml
vendored
17
.github/workflows/grafana-test.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
run: cp .env.example .env
|
||||
|
||||
- name: Pull Docker images
|
||||
run: docker-compose pull db app
|
||||
run: docker compose pull db app
|
||||
|
||||
- name: Run PostgSail Grafana test
|
||||
# Environment variables
|
||||
@@ -40,15 +40,16 @@ jobs:
|
||||
run: |
|
||||
set -eu
|
||||
source .env
|
||||
docker-compose stop || true
|
||||
docker-compose rm || true
|
||||
docker-compose up -d db && sleep 30
|
||||
docker-compose ps -a
|
||||
docker compose stop || true
|
||||
docker compose rm || true
|
||||
docker compose up -d db && sleep 30
|
||||
docker compose ps -a
|
||||
echo "Test PostgSail Grafana Unit Test"
|
||||
docker-compose up -d app && sleep 5
|
||||
docker-compose ps -a
|
||||
docker compose up -d app && sleep 5
|
||||
docker compose ps -a
|
||||
curl http://localhost:3001/
|
||||
docker compose exec -i db psql -Uusername signalk -c "select public.cron_process_grafana_fn();"
|
||||
- name: Show the logs
|
||||
if: always()
|
||||
run: |
|
||||
docker-compose logs
|
||||
docker compose logs
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,5 +1,6 @@
|
||||
.DS_Store
|
||||
.env
|
||||
docker-compose.mm.yml
|
||||
initdb/*.csv
|
||||
initdb/*.no
|
||||
initdb/*.jwk
|
||||
|
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
||||
## Please see [Releases](https://github.com/xbgmsharp/postgsail/releases) for the release notes.
|
45
CODE_OF_CONDUCT.md
Normal file
45
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer using any of the [private contact addresses](https://github.com/dec0dOS/amazing-github-template#support). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
|
||||
|
||||
For answers to common questions about this code of conduct, see <https://www.contributor-covenant.org/faq>
|
3
CONTRIBUTING.md
Normal file
3
CONTRIBUTING.md
Normal file
@@ -0,0 +1,3 @@
|
||||
Styleguides
|
||||
|
||||
Ensure you code is in lint formatting.
|
271
README.md
271
README.md
@@ -1,27 +1,89 @@
|
||||
# PostgSail
|
||||
<br/>
|
||||
<p align="center">
|
||||
<a href="https://github.com/xbgmsharp/postgsail">
|
||||
<img src="https://iot.openplotter.cloud/android-chrome-192x192.png" alt="Logo" width="80" height="80">
|
||||
</a>
|
||||
|
||||
Effortless cloud based solution for storing and sharing your SignalK data. Allow you to effortlessly log your sails and monitor your boat with historical data.
|
||||
<h3 align="center">PostgSail</h3>
|
||||
|
||||
<p align="center">
|
||||
PostgSail is an open-source alternative to traditional vessel data management!
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md"><strong>Explore the docs »</strong></a>
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="#about-the-project">View Demo</a>
|
||||
.
|
||||
<a href="https://github.com/xbgmsharp/postgsail/issues">Report Bug</a>
|
||||
.
|
||||
<a href="https://github.com/xbgmsharp/postgsail/issues">Request Feature</a>
|
||||
.
|
||||
<a href="https://xbgmsharp.github.io/postgsail/">Website</a>
|
||||
.
|
||||
<a href="https://github.com/sponsors/xbgmsharp">Sponsors</a>
|
||||
.
|
||||
<a href="https://discord.gg/uuZrwz4dCS">Discord</a>
|
||||
.
|
||||
<a href="https://deepwiki.com/xbgmsharp/postgsail/">DeepWiki</a>
|
||||
</p>
|
||||
</p>
|
||||
|
||||
[](https://github.com/xbgmsharp/postgsail/releases/latest)
|
||||
[](#license)
|
||||
[](https://github.com/xbgmsharp/postgsail/issues)
|
||||
[](http://makeapullrequest.com)
|
||||

|
||||
[](https://github.com/xbgmsharp/postgsail/stargazers)
|
||||
[](https://deepwiki.com/xbgmsharp/postgsail)
|
||||
|
||||
[](https://github.com/xbgmsharp/postgsail/actions/workflows/db-test.yml)
|
||||
[](https://github.com/xbgmsharp/postgsail/actions/workflows/frontend-test.yml)
|
||||
[](https://github.com/xbgmsharp/postgsail/actions/workflows/grafana-test.yml)
|
||||
|
||||
signalk-postgsail:
|
||||
[](https://github.com/xbgmsharp/signalk-postgsail/releases/latest)
|
||||
[](https://github.com/xbgmsharp/signalk-postgsail/releases/latest)
|
||||
|
||||
postgsail-backend:
|
||||
[](https://github.com/xbgmsharp/postgsail/releases/latest)
|
||||
|
||||
postgsail-frontend:
|
||||
[](https://github.com/xbgmsharp/vuestic-postgsail/releases/latest)
|
||||
[](https://github.com/xbgmsharp/vuestic-postgsail/releases/latest)
|
||||
|
||||
postgsail-telegram-bot:
|
||||
[](https://github.com/xbgmsharp/postgsail-telegram-bot/releases/latest)
|
||||
[](https://github.com/xbgmsharp/postgsail-telegram-bot/releases/latest)
|
||||
|
||||
[](https://www.bestpractices.dev/projects/8124)
|
||||
|
||||
|
||||
## Table Of Contents
|
||||
|
||||
- [Table Of Contents](#table-of-contents)
|
||||
- [About The Project](#about-the-project)
|
||||
- [Features](#features)
|
||||
- [Cloud-hosted PostgSail](#cloud-hosted-postgsail)
|
||||
- [On-Premise](#on-premise)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Contributing](#contributing)
|
||||
- [Creating A Pull Request](#creating-a-pull-request)
|
||||
- [License](#license)
|
||||
- [Acknowledgements](#acknowledgements)
|
||||
|
||||
## About The Project
|
||||
|
||||
https://github.com/xbgmsharp/signalk-postgsail/assets/1498985/b2669c39-11ad-4a50-9f91-9397f9057ee8
|
||||
|
||||
Effortless cloud based solution for storing and sharing your SignalK data. Allow you to effortlessly log your sails and monitor your boat with historical data.
|
||||
|
||||
Here's how:
|
||||
|
||||
It is all about SQL, object-relational, time-series, spatial databases with a bit of python.
|
||||
|
||||
PostgSail is an open-source alternative to traditional vessel data management.
|
||||
It is based on a well known open-source technology stack, Signalk, PostgreSQL, TimescaleDB, PostGIS, PostgREST. It does perfectly integrate with standard monitoring tool stack like Grafana.
|
||||
|
||||
To understand the why and how, you might want to read [Why.md](https://github.com/xbgmsharp/postgsail/blob/main/Why.md)
|
||||
|
||||
## Features
|
||||
|
||||
- Automatically log your voyages without manually starting or stopping a trip.
|
||||
@@ -29,6 +91,7 @@ postgsail-telegram-bot:
|
||||
- Timelapse video your trips, with or without time control.
|
||||
- Add custom notes to your logs.
|
||||
- Export to CSV, GPX, GeoJSON, KML and download your logs.
|
||||
- Export your logs as image (PNG) or video (MP4).
|
||||
- Aggregate your trip statistics: Longest voyage, time spent at anchorages, home ports etc.
|
||||
- See your moorages on a global map, with incoming and outgoing voyages from each trip.
|
||||
- Monitor your boat (position, depth, wind, temperature, battery charge status, etc.) remotely.
|
||||
@@ -44,178 +107,47 @@ postgsail-telegram-bot:
|
||||
- Polar performance.
|
||||
- Anything missing? just ask!
|
||||
|
||||
## Context
|
||||
## Cloud-hosted PostgSail
|
||||
|
||||
It is all about SQL, object-relational, time-series, spatial databases with a bit of python.
|
||||
Remove the hassle of running PostgSail yourself. Here you can skip the technical setup, the maintenance work and server costs by getting PostgSail on our reliable and secure PostgSail Cloud. Register and try for free at [iot.openplotter.cloud](https://iot.openplotter.cloud/).
|
||||
|
||||
PostgSail is an open-source alternative to traditional vessel data management.
|
||||
It is based on a well known open-source technology stack, Signalk, PostgreSQL, TimescaleDB, PostGIS, PostgREST. It does perfectly integrate with standard monitoring tool stack like Grafana.
|
||||
PostgSail Cloud is Open Source and free for personal use with a single vessel. If wish to manage multiple boats contact us.
|
||||
|
||||
To understand the why and how, you might want to read [Why.md](https://github.com/xbgmsharp/postgsail/tree/main/Why.md)
|
||||
PostgSail is free to use, but is not free to make or host. The stability and accuracy of PostgSail depends on its volunteers and donations from its users. Please consider making an annual recurring gift to PostgSail.
|
||||
|
||||
## Architecture
|
||||
A simple scalable architecture:
|
||||
## On-Premise
|
||||
|
||||
For more clarity and visibility the complete [Architecture overview](https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md).
|
||||
Self host postgSail where you want and how you want. There are no restrictions, you’re in full control. [Install Guide](https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md)
|
||||
|
||||
For more clarity and visibility the complete [Entity-Relationship Diagram (ERD)](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) is export as Mermaid, PNG and SVG file.
|
||||
PostgSail is free to use, but is not free to make or host. The stability and accuracy of PostgSail depends on its volunteers and donations from its users. Please consider making an annual recurring gift to PostgSail.
|
||||
|
||||
## Cloud
|
||||
## Roadmap
|
||||
|
||||
If you prefer not to install or administer your instance of PostgSail, hosted versions of PostgSail are available in the cloud of your choice.
|
||||
See the [open issues](https://github.com/xbgmsharp/postgsail/issues) for a list of proposed features (and known issues).
|
||||
|
||||
### The cloud advantage.
|
||||
Join the community, Get support and exchange on [Discord](https://discord.gg/uuZrwz4dCS). Missing a feature? just ask!
|
||||
|
||||
Hosted and fully–managed options for PostgSail, designed for all your deployment and business needs. Register and try for free at https://iot.openplotter.cloud/.
|
||||
## Contributing
|
||||
|
||||
## Using PostgSail
|
||||
Contributions are what make the open source community such an amazing place to be learn, inspire, and create. Any contributions you make are **greatly appreciated**.
|
||||
* If you have suggestions for features, feel free to [open an issue](https://github.com/xbgmsharp/postgsail/issues/new) to discuss it, or directly create a pull request with necessary changes.
|
||||
* Please make sure you check your spelling and grammar.
|
||||
* Create individual PR for each suggestion.
|
||||
* Please also read through the [Code Of Conduct](https://github.com/xbgmsharp/postgsail/blob/main/CODE_OF_CONDUCT.md) before posting your first idea as well.
|
||||
|
||||
A full-featured development environment.
|
||||
### Creating A Pull Request
|
||||
|
||||
#### With CodeSandbox
|
||||
1. Fork the Project
|
||||
2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
|
||||
3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
|
||||
4. Push to the Branch (`git push origin feature/AmazingFeature`)
|
||||
5. Open a Pull Request
|
||||
|
||||
- Develop on [](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
- or via [direct link](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
## License
|
||||
|
||||
#### With DevPod
|
||||
Distributed under the Apache License Version 2.0. See [LICENSE](https://github.com/xbgmsharp/postgsail/blob/main/LICENSE) for more information.
|
||||
|
||||
- [](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail/&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
- or via [direct link](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
|
||||
#### With Docker Dev Environments
|
||||
- [Open in Docker dev-envs!](https://open.docker.com/dashboard/dev-envs?url=https://github.com/xbgmsharp/postgsail/)
|
||||
|
||||
### pre-deploy configuration
|
||||
|
||||
To get these running, copy `.env.example` and rename to `.env` then set the value accordingly.
|
||||
|
||||
```bash
|
||||
# cp .env.example .env
|
||||
```
|
||||
|
||||
Notice, that `PGRST_JWT_SECRET` must be at least 32 characters long.
|
||||
|
||||
`$ head /dev/urandom | tr -dc A-Za-z0-9 | head -c 42 ; echo ''`
|
||||
|
||||
```bash
|
||||
# nano .env
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
By default there is no network set and all data are store in a docker volume.
|
||||
You can update the default settings by editing `docker-compose.yml` and `docker-compose.dev.yml` to your need.
|
||||
|
||||
First let's initialize the database.
|
||||
|
||||
#### Step 1. Initialize database
|
||||
|
||||
First let's import the SQL schema, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db
|
||||
```
|
||||
|
||||
#### Step 2. Start backend (db, api)
|
||||
|
||||
Then launch the full stack (db, api) backend, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db api
|
||||
```
|
||||
|
||||
The API should be accessible via port HTTP/3000.
|
||||
The database should be accessible via port TCP/5432.
|
||||
|
||||
You can connect to the database via a web gui like [pgadmin](https://www.pgadmin.org/) or you can use a client [dbeaver](https://dbeaver.io/).
|
||||
|
||||
#### Step 3. Start frontend (web)
|
||||
|
||||
Then launch the web frontend, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up web
|
||||
```
|
||||
The frontend should be accessible via port HTTP/8080.
|
||||
|
||||
### SQL Configuration
|
||||
|
||||
Check and update your postgsail settings via SQL in the table `app_settings`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM app_settings;
|
||||
```
|
||||
|
||||
```sql
|
||||
UPDATE app_settings
|
||||
SET
|
||||
value = 'new_value'
|
||||
WHERE name = 'app.email_server';
|
||||
```
|
||||
|
||||
### Ingest data
|
||||
|
||||
Next, to ingest data from signalk, you need to install [signalk-postgsail](https://github.com/xbgmsharp/signalk-postgsail) plugin on your signalk server instance.
|
||||
|
||||
Also, if you like, you can import saillogger data using the postgsail helpers, [postgsail-helpers](https://github.com/xbgmsharp/postgsail-helpers).
|
||||
|
||||
You might want to import your influxdb1 data as well, [outflux](https://github.com/timescale/outflux).
|
||||
For InfluxDB 2.x and 3.x. You will need to enable the 1.x APIs to use them. Consult the InfluxDB documentation for more details.
|
||||
|
||||
Last, if you like, you can import the sample data from Signalk NMEA Plaka by running the tests.
|
||||
If everything goes well all tests pass successfully and you should receive a few notifications by email or PushOver or Telegram.
|
||||
[End-to-End (E2E) Testing.](https://github.com/xbgmsharp/postgsail/blob/main/tests/)
|
||||
|
||||
```
|
||||
$ docker-compose up tests
|
||||
```
|
||||
|
||||
### API Documentation
|
||||
|
||||
The OpenAPI description output depends on the permissions of the role that is contained in the JWT role claim.
|
||||
|
||||
Other applications can also use the [PostgSAIL API](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/xbgmsharp/postgsail/main/openapi.json).
|
||||
|
||||
API anonymous:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/
|
||||
```
|
||||
|
||||
API user_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_login_or_signup_fn'
|
||||
```
|
||||
|
||||
API vessel_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_register_vessel_fn'
|
||||
```
|
||||
|
||||
#### API main workflow
|
||||
|
||||
Check the [End-to-End (E2E) test sample](https://github.com/xbgmsharp/postgsail/blob/main/tests/).
|
||||
|
||||
### Docker dependencies
|
||||
|
||||
`docker-compose` is used to start environment dependencies. Dependencies consist of 3 containers:
|
||||
|
||||
- `timescaledb-postgis` alias `db`, PostgreSQL with TimescaleDB extension along with the PostGIS extension.
|
||||
- `postgrest` alias `api`, Standalone web server that turns your PostgreSQL database directly into a RESTful API.
|
||||
- `grafana` alias `app`, visualize and monitor your data
|
||||
|
||||
### Optional docker images
|
||||
|
||||
- [pgAdmin](https://hub.docker.com/r/dpage/pgadmin4), web UI to monitor and manage multiple PostgreSQL
|
||||
- [Swagger](https://hub.docker.com/r/swaggerapi/swagger-ui), web UI to visualize documentation from PostgREST
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose-optional.yml up
|
||||
```
|
||||
|
||||
### Software reference
|
||||
## Acknowledgements
|
||||
|
||||
An out of the box IoT platform using Docker (could be extend to K3 or K8) with the following software:
|
||||
|
||||
@@ -223,19 +155,6 @@ An out of the box IoT platform using Docker (could be extend to K3 or K8) with t
|
||||
- [PostgreSQL, open source object-relational database system](https://postgresql.org)
|
||||
- [TimescaleDB, Time-series data extends PostgreSQL](https://www.timescale.com)
|
||||
- [PostGIS, a spatial database extender for PostgreSQL object-relational database.](https://postgis.net/)
|
||||
- [MobilityDB, An open source geospatial trajectory data management & analysis platform.](https://mobilitydb.com/)
|
||||
- [Grafana, open observability platform | Grafana Labs](https://grafana.com)
|
||||
|
||||
### Support
|
||||
|
||||
To get support, please create new [issue](https://github.com/xbgmsharp/postgsail/issues).
|
||||
|
||||
There is more likely security flows and bugs.
|
||||
|
||||
### Contribution
|
||||
|
||||
I'm happy to accept Pull Requests!
|
||||
Feel free to contribute.
|
||||
|
||||
### License
|
||||
|
||||
This is a free software, Apache License Version 2.0.
|
||||
- And many more
|
||||
|
@@ -1,5 +1,3 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
db:
|
||||
image: xbgmsharp/timescaledb-postgis
|
||||
@@ -18,7 +16,7 @@ services:
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- ./db-data:/var/lib/postgresql/data
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
- ./initdb:/docker-entrypoint-initdb.d
|
||||
logging:
|
||||
options:
|
||||
@@ -47,6 +45,8 @@ services:
|
||||
PGRST_OPENAPI_SERVER_PROXY_URI: http://127.0.0.1:3000
|
||||
PGRST_DB_PRE_REQUEST: public.check_jwt
|
||||
PGRST_DB_POOL: 20
|
||||
PGRST_DB_POOL_MAX_IDLETIME: 60
|
||||
PGRST_DB_POOL_ACQUISITION_TIMEOUT: 20
|
||||
PGRST_DB_URI: ${PGRST_DB_URI}
|
||||
PGRST_JWT_SECRET: ${PGRST_JWT_SECRET}
|
||||
PGRST_SERVER_TIMING_ENABLED: 1
|
||||
@@ -71,8 +71,8 @@ services:
|
||||
links:
|
||||
- "db:database"
|
||||
volumes:
|
||||
- data:/var/lib/grafana
|
||||
- data:/var/log/grafana
|
||||
- grafana-data:/var/lib/grafana
|
||||
- grafana-data:/var/log/grafana
|
||||
- ./grafana:/etc/grafana
|
||||
ports:
|
||||
- "3001:3000"
|
||||
@@ -145,4 +145,5 @@ services:
|
||||
max-size: 10m
|
||||
|
||||
volumes:
|
||||
data: {}
|
||||
grafana-data: {}
|
||||
postgres-data: {}
|
||||
|
@@ -7,7 +7,7 @@ Auto generated Mermaid diagram using [mermerd](https://github.com/KarnerTh/merme
|
||||
[PostgSail SQL Schema](https://github.com/xbgmsharp/postgsail/tree/main/docs/ERD/postgsail.md "PostgSail SQL Schema")
|
||||
|
||||
## Further
|
||||
There is 3 main schemas:
|
||||
There is 3 main schemas into the signalk database:
|
||||
- API Schema:
|
||||
- tables
|
||||
- metrics
|
||||
@@ -72,4 +72,65 @@ flowchart TD
|
||||
B --> O{Update account,vessel,otp}
|
||||
F --> P{Update metadata}
|
||||
G --> P
|
||||
A --> Q((cron_post_logbook))
|
||||
Q --> R{QGIS and notification}
|
||||
A --> S((cron_video))
|
||||
A --> U((cron_alert))
|
||||
S --> T{notification}
|
||||
U --> T{notification}
|
||||
```
|
||||
cron job are not process by default because if you don't have the correct settings set (SMTP, PushOver, Telegram), you might enter in a loop with error and you could be blocked or banned from the external services.
|
||||
|
||||
Therefor by default they are no active job as it require external configuration settings (SMTP, PushOver, Telegram).
|
||||
To activate all cron.job run the following SQL command:
|
||||
```sql
|
||||
UPDATE cron.job SET active = True;
|
||||
```
|
||||
Be sure to review your postgsail settings via SQL in the table `app_settings`:
|
||||
```sql
|
||||
SELECT * FROM public.app_settings;
|
||||
```
|
||||
|
||||
### How to bypass OTP for a local install?
|
||||
|
||||
To can skip the otp process, add or update the following json key value to the account preference.
|
||||
```json
|
||||
"email_valid": true
|
||||
```
|
||||
SQL query
|
||||
```sql
|
||||
UPDATE auth.accounts
|
||||
SET preferences='{"email_valid": true}'::jsonb || preferences
|
||||
WHERE email='your.email@domain.com';
|
||||
```
|
||||
|
||||
OTP is created and sent by email using a cron in postgres/cron/job.
|
||||
```sql
|
||||
SELECT * FROM auth.otp;
|
||||
```
|
||||
|
||||
Accounts are store in table signalk/auth/accounts
|
||||
```sql
|
||||
SELECT * FROM auth.accounts;
|
||||
```
|
||||
|
||||
You should have an history in table signalk/public/process_queue
|
||||
```sql
|
||||
SELECT * from public.process_queue;
|
||||
```
|
||||
|
||||
### How to turn off signups
|
||||
|
||||
If you just want to use this as a standalone application and don't want people to be able to sign up for an account.
|
||||
|
||||
```SQL
|
||||
REVOKE execute on function api.signup(text,text,text,text) to api_anonymous;
|
||||
```
|
||||
|
||||
### How to disable completely anonymous access
|
||||
|
||||
If you just want to use this as a standalone application and don't want people to be able to access public account.
|
||||
|
||||
```SQL
|
||||
REVOKE SELECT ON ALL TABLES IN SCHEMA api TO api_anonymous;
|
||||
```
|
@@ -13,31 +13,50 @@ erDiagram
|
||||
timestamp_with_time_zone _to_time
|
||||
boolean active
|
||||
double_precision avg_speed
|
||||
numeric distance "in NM"
|
||||
interval duration "Best to use standard ISO 8601"
|
||||
jsonb extra "computed signalk metrics of interest, runTime, currentLevel, etc"
|
||||
numeric distance "Distance in nautical miles (NM)"
|
||||
interval duration "Duration in ISO 8601 format"
|
||||
jsonb extra "Computed SignalK metrics such as runtime, current level, etc."
|
||||
integer id "{NOT_NULL}"
|
||||
double_precision max_speed
|
||||
double_precision max_wind_speed
|
||||
text name
|
||||
text notes
|
||||
geography track_geog "postgis geography type default SRID 4326 Unit: degres"
|
||||
jsonb track_geojson "store generated geojson with track metrics data using with LineString and Point features, we can not depend api.metrics table"
|
||||
geometry track_geom "postgis geometry type EPSG:4326 Unit: degres"
|
||||
tgeogpoint trip "MobilityDB trajectory"
|
||||
tfloat trip_batt_charge "Battery Charge"
|
||||
tfloat trip_batt_voltage "Battery Voltage"
|
||||
tfloat trip_cog "courseovergroundtrue"
|
||||
tfloat trip_depth "Depth"
|
||||
tfloat trip_heading "heading True"
|
||||
tfloat trip_hum_out "Humidity outside"
|
||||
ttext trip_notes
|
||||
tfloat trip_pres_out "Pressure outside"
|
||||
tfloat trip_sog "speedoverground"
|
||||
tfloat trip_solar_power "solar powerPanel"
|
||||
tfloat trip_solar_voltage "solar voltage"
|
||||
ttext trip_status
|
||||
tfloat trip_tank_level "Tank currentLevel"
|
||||
tfloat trip_temp_out "Temperature outside"
|
||||
tfloat trip_temp_water "Temperature water"
|
||||
tfloat trip_twa "windspeedapparent"
|
||||
tfloat trip_twd "truewinddirection"
|
||||
tfloat trip_tws "truewindspeed"
|
||||
text vessel_id "{NOT_NULL}"
|
||||
}
|
||||
|
||||
api_metadata {
|
||||
boolean active "trigger monitor online/offline"
|
||||
boolean active
|
||||
jsonb available_keys "Signalk paths with unit for custom mapping"
|
||||
jsonb available_keys
|
||||
double_precision beam
|
||||
text client_id
|
||||
text configuration
|
||||
jsonb configuration "Signalk path mapping for metrics"
|
||||
jsonb configuration
|
||||
timestamp_with_time_zone created_at "{NOT_NULL}"
|
||||
double_precision height
|
||||
integer id "{NOT_NULL}"
|
||||
text ip "Store vessel ip address"
|
||||
text ip
|
||||
double_precision length
|
||||
numeric mmsi
|
||||
text mmsi
|
||||
text name
|
||||
text platform
|
||||
text plugin_version "{NOT_NULL}"
|
||||
@@ -49,9 +68,22 @@ erDiagram
|
||||
text vessel_id "{NOT_NULL}"
|
||||
}
|
||||
|
||||
api_metadata_ext {
|
||||
timestamp_with_time_zone created_at "{NOT_NULL}"
|
||||
bytea image "Store user boat image in bytea format"
|
||||
text image_b64
|
||||
text image_type "Store user boat image type in text format"
|
||||
timestamp_with_time_zone image_updated_at
|
||||
text image_url
|
||||
text make_model "Store user make & model in text format"
|
||||
text polar "Store polar data in CSV notation as used on ORC sailboat data"
|
||||
timestamp_with_time_zone polar_updated_at
|
||||
text vessel_id "{NOT_NULL}"
|
||||
}
|
||||
|
||||
api_metrics {
|
||||
double_precision anglespeedapparent
|
||||
text client_id
|
||||
text client_id "Deprecated client_id to be removed"
|
||||
double_precision courseovergroundtrue
|
||||
double_precision latitude "With CONSTRAINT but allow NULL value to be ignored silently by trigger"
|
||||
double_precision longitude "With CONSTRAINT but allow NULL value to be ignored silently by trigger"
|
||||
@@ -74,9 +106,7 @@ erDiagram
|
||||
jsonb nominatim
|
||||
text notes
|
||||
jsonb overpass
|
||||
integer reference_count
|
||||
integer stay_code "Link api.stays_at with api.moorages via FOREIGN KEY and REFERENCES"
|
||||
interval stay_duration "Best to use standard ISO 8601"
|
||||
text vessel_id "{NOT_NULL}"
|
||||
}
|
||||
|
||||
@@ -101,6 +131,17 @@ erDiagram
|
||||
integer stay_code "{NOT_NULL}"
|
||||
}
|
||||
|
||||
api_stays_ext {
|
||||
timestamp_with_time_zone created_at "{NOT_NULL}"
|
||||
bytea image "Store stays image in bytea format"
|
||||
text image_b64
|
||||
text image_type "Store stays image type in text format"
|
||||
timestamp_with_time_zone image_updated_at
|
||||
text image_url
|
||||
integer stay_id "{NOT_NULL}"
|
||||
text vessel_id "{NOT_NULL}"
|
||||
}
|
||||
|
||||
auth_accounts {
|
||||
timestamp_with_time_zone connected_at "{NOT_NULL}"
|
||||
timestamp_with_time_zone created_at "{NOT_NULL}"
|
||||
@@ -187,6 +228,13 @@ erDiagram
|
||||
numeric id
|
||||
}
|
||||
|
||||
public_mobilitydb_opcache {
|
||||
integer ltypnum
|
||||
oid opid
|
||||
integer opnum
|
||||
integer rtypnum
|
||||
}
|
||||
|
||||
public_ne_10m_geography_marine_polys {
|
||||
text changed
|
||||
text featurecla
|
||||
@@ -249,13 +297,18 @@ erDiagram
|
||||
api_logbook }o--|| api_metadata : ""
|
||||
api_logbook }o--|| api_moorages : ""
|
||||
api_logbook }o--|| api_moorages : ""
|
||||
api_logbook }o--|| api_moorages : ""
|
||||
api_logbook }o--|| api_moorages : ""
|
||||
api_metadata }o--|| auth_vessels : ""
|
||||
api_metadata_ext |o--|| api_metadata : ""
|
||||
api_metrics }o--|| api_metadata : ""
|
||||
api_moorages }o--|| api_metadata : ""
|
||||
api_stays }o--|| api_metadata : ""
|
||||
api_stays_ext }o--|| api_metadata : ""
|
||||
api_moorages }o--|| api_stays_at : ""
|
||||
api_stays }o--|| api_moorages : ""
|
||||
api_stays }o--|| api_stays_at : ""
|
||||
api_stays_ext |o--|| api_stays : ""
|
||||
auth_otp |o--|| auth_accounts : ""
|
||||
auth_vessels |o--|| auth_accounts : ""
|
||||
auth_vessels }o--|| auth_accounts : ""
|
||||
```
|
211
docs/README.md
211
docs/README.md
@@ -1,4 +1,213 @@
|
||||
|
||||
Simple and scalable architecture.
|
||||
|
||||
## Architecture
|
||||
|
||||
Efficient, simple and scalable architecture.
|
||||
|
||||

|
||||
|
||||
|
||||
For more clarity and visibility the complete [Entity-Relationship Diagram (ERD)](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) is export as Mermaid, PNG and SVG file.
|
||||
|
||||
## Using PostgSail
|
||||
### Development
|
||||
|
||||
A full-featured development environment.
|
||||
|
||||
#### With CodeSandbox
|
||||
|
||||
- Develop on [](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
- or via [direct link](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
|
||||
#### With DevPod
|
||||
|
||||
- [](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail/&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
- or via [direct link](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
|
||||
#### With Docker Dev Environments
|
||||
- [Open in Docker dev-envs!](https://open.docker.com/dashboard/dev-envs?url=https://github.com/xbgmsharp/postgsail/)
|
||||
|
||||
|
||||
### On-premise (self-hosted)
|
||||
|
||||
This kind of deployment needs the [docker application](https://www.docker.com/) to be installed and running. Check this [tutorial](https://www.docker.com/101-tutorial).
|
||||
|
||||
Docker run pre packaged application (aka images) which can be retrieved as sources (Dockerfile and resources) to build or already built from registries (private or public).
|
||||
|
||||
PostgSail depends heavily on [PostgreSQL](https://www.postgresql.org/). Check this [tutorial](https://www.postgresql.org/docs/current/tutorial.html).
|
||||
|
||||
#### pre-deploy configuration
|
||||
|
||||
To get these running, copy `.env.example` and rename to `.env` then set the value accordingly.
|
||||
|
||||
```bash
|
||||
# cp .env.example .env
|
||||
```
|
||||
|
||||
```bash
|
||||
# nano .env
|
||||
```
|
||||
|
||||
Notice, that `PGRST_JWT_SECRET` must be at least 32 characters long.
|
||||
|
||||
`$ cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1`
|
||||
|
||||
`PGSAIL_APP_URL` is the URL you connect to from your browser.
|
||||
|
||||
`PGSAIL_API_URL` is the URL where `PGSAIL_APP_URL` connect to.
|
||||
|
||||
`PGRST_DB_URI` is the URI where the `PGSAIL_API_URL` connect to.
|
||||
|
||||
To summarize:
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph frontend
|
||||
direction TB
|
||||
A(PGSAIL_APP_URL)
|
||||
B(PGSAIL_API_URL)
|
||||
end
|
||||
subgraph backend
|
||||
direction TB
|
||||
B(PGSAIL_API_URL) -- SQL --> C(PGRST_DB_URI)
|
||||
end
|
||||
%% ^ These subgraphs are identical, except for the links to them:
|
||||
|
||||
%% Link *to* subgraph1: subgraph1 direction is maintained
|
||||
|
||||
User -- HTTP --> A
|
||||
User -- HTTP --> B
|
||||
%% Link *within* subgraph2:
|
||||
%% subgraph2 inherits the direction of the top-level graph (LR)
|
||||
|
||||
Boat -- HTTP --> B
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
There is two compose files used. You can update the default settings by editing `docker-compose.yml` and `docker-compose.dev.yml` to your need.
|
||||
|
||||
Now let's initialize the database.
|
||||
|
||||
#### Step 1. Initialize database
|
||||
|
||||
First let's import the SQL schema, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db
|
||||
```
|
||||
|
||||
#### Step 2. Start backend (db, api)
|
||||
|
||||
Then launch the full backend stack (db, api), execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db api
|
||||
```
|
||||
|
||||
The API should be accessible via port HTTP/3000.
|
||||
The database should be accessible via port TCP/5432.
|
||||
|
||||
You can connect to the database via a web gui like [pgadmin](https://www.pgadmin.org/) or you can use a client [dbeaver](https://dbeaver.io/).
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml -f docker-compose.dev.yml pgadmin
|
||||
```
|
||||
Then connect to the web UI on port HTTP/5050.
|
||||
|
||||
#### Step 3. Start frontend (web)
|
||||
|
||||
Last build and launch the web frontend, execute:
|
||||
|
||||
```bash
|
||||
docker compose build web
|
||||
docker compose up web
|
||||
```
|
||||
|
||||
The first step can take some time as it will first run a build to generate the static website based on your settings.
|
||||
|
||||
The frontend is a SPA (Single-Page Application). With SPA, the server provides the user with an empty HTML page and Javascript. The latter is where the magic happens. When the browser receives the HTML + Javascript, it loads the Javascript. Once loaded, the JS takes place and, through a set of operations in the DOM, renders the necessary components to the page. The routing is then handled by the browser itself, not hitting the server.
|
||||
|
||||
The frontend should be accessible via port HTTP/8080.
|
||||
|
||||
Users are collaborating on two installation guide:
|
||||
- [Self-hosted-installation-guide](https://github.com/xbgmsharp/postgsail/blob/main/docs/install_guide.md)
|
||||
- [Self-hosted-installation-guide on AWS EC2](https://github.com/xbgmsharp/postgsail/blob/main/docs/Self%E2%80%90hosted-installation-guide%20on%20AWS.md)
|
||||
- [Self-hosted-installation-guide](https://github.com/xbgmsharp/postgsail/blob/main/docs/Self%E2%80%90hosted-installation-guide.md)
|
||||
|
||||
### SQL Configuration
|
||||
|
||||
Check and update your postgsail settings via SQL in the table `app_settings`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM app_settings;
|
||||
```
|
||||
|
||||
```sql
|
||||
UPDATE app_settings
|
||||
SET
|
||||
value = 'new_value'
|
||||
WHERE name = 'app.email_server';
|
||||
```
|
||||
|
||||
As it is all about SQL, [Read more](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) about the database to configure your instance and explore your data.
|
||||
|
||||
### Ingest data
|
||||
|
||||
Next, to ingest data from signalk, you need to install [signalk-postgsail](https://github.com/xbgmsharp/signalk-postgsail) plugin on your signalk server instance.
|
||||
|
||||
Also, if you like, you can import saillogger data using the postgsail helpers, [postgsail-helpers](https://github.com/xbgmsharp/postgsail-helpers).
|
||||
|
||||
You might want to import your influxdb1 data as well, [outflux](https://github.com/timescale/outflux).
|
||||
For InfluxDB 2.x and 3.x. You will need to enable the 1.x APIs to use them. Consult the InfluxDB documentation for more details.
|
||||
|
||||
Last, if you like, you can import the sample data from Signalk NMEA Plaka by running the tests.
|
||||
If everything goes well all tests pass successfully and you should receive a few notifications by email or PushOver or Telegram.
|
||||
[End-to-End (E2E) Testing.](https://github.com/xbgmsharp/postgsail/blob/main/tests/)
|
||||
|
||||
```
|
||||
$ docker-compose up tests
|
||||
```
|
||||
|
||||
### API Documentation
|
||||
|
||||
The OpenAPI description output depends on the permissions of the role that is contained in the JWT role claim.
|
||||
|
||||
Other applications can also use the [PostgSAIL API](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/xbgmsharp/postgsail/main/openapi.json).
|
||||
|
||||
API anonymous:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/
|
||||
```
|
||||
|
||||
API user_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_login_or_signup_fn'
|
||||
```
|
||||
|
||||
API vessel_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_register_vessel_fn'
|
||||
```
|
||||
|
||||
#### API main workflow
|
||||
|
||||
Check the [End-to-End (E2E) test sample](https://github.com/xbgmsharp/postgsail/blob/main/tests/).
|
||||
|
||||
### Docker dependencies
|
||||
|
||||
`docker compose` is used to start environment dependencies. Dependencies consist of 3 containers:
|
||||
|
||||
- `timescaledb-postgis` alias `db`, PostgreSQL with TimescaleDB extension along with the PostGIS extension.
|
||||
- `postgrest` alias `api`, Standalone web server that turns your PostgreSQL database directly into a RESTful API.
|
||||
- `grafana` alias `app`, visualize and monitor your data
|
||||
|
||||
### Optional docker images
|
||||
|
||||
- [pgAdmin](https://hub.docker.com/r/dpage/pgadmin4), web UI to monitor and manage multiple PostgreSQL
|
||||
- [Swagger](https://hub.docker.com/r/swaggerapi/swagger-ui), web UI to visualize documentation from PostgREST
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose-optional.yml up
|
||||
```
|
277
docs/Self-hosted-update-guide.md
Normal file
277
docs/Self-hosted-update-guide.md
Normal file
@@ -0,0 +1,277 @@
|
||||
# Self hosted update guide
|
||||
|
||||
In this guide we are updating a self hosted installation version 0.7.2 to version 0.9.3. When updating from or to other versions principle remain the same.
|
||||
|
||||
The installation we are upgrading was installed in April 2024 using the installation instructions found on the pgsail github site. Platform is an Ubuntu 22.04 Virtual Machine.
|
||||
Before the upgrade, around 120 trips were logged. Needless to say we don't want to loose our data.
|
||||
|
||||
Unfortunately, there is no automatic update path available, this may change but for now we had to follow the general update instuctions.
|
||||
|
||||
## General update instructions
|
||||
|
||||
- Make a backup
|
||||
- Update the containers.
|
||||
- Update possible extensions.
|
||||
- Run database migrations.
|
||||
- Additional data migration.
|
||||
- Update SignalK client.
|
||||
|
||||
## Let's go
|
||||
|
||||
### Tools used
|
||||
|
||||
In addition to the tools that are already installed as part of Unbuntu and PostgSail, I used DBeaver to examine the database from my Windows desktop.
|
||||
|
||||
<https://dbeaver.io/download/>
|
||||
|
||||
### Make a backup
|
||||
|
||||
Start by making a backup of the database, the docker-compose.yml and .env files. Note that in my case the database was stored in a host folder, later versions are using a docker volume. To copy the database it neccesary the containers are stopped.
|
||||
|
||||
```bash
|
||||
cd postgsail
|
||||
mkdir backup
|
||||
docker compose stop
|
||||
cp .env docker-compose.yml backup/
|
||||
docker compose cp -a db:/var/lib/postgresql/data backup/db-data
|
||||
```
|
||||
|
||||
### Update the containers
|
||||
|
||||
Make a note of the last migration in the initdb folder, in my case this was 99_migrations_202404.sql. Because I used git clone, the migration file was a bit inbetween 0.7.1 and 0.7.2, therefore I decided 99_migrations_202404.sql was the first migration to run.
|
||||
|
||||
Remove the containers:
|
||||
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
Get the latest PostgSail from github, we checkout a specific tag to ensure we have a stable release version. If you installed it from a binary release, just update from the latest binary release.
|
||||
|
||||
```bash
|
||||
git pull remote main
|
||||
git fetch --all --tags
|
||||
git checkout tags/v0.9.3
|
||||
```
|
||||
|
||||
```text
|
||||
Note: switching to 'tags/v0.9.3'.
|
||||
|
||||
You are in 'detached HEAD' state. You can look around, make experimental
|
||||
changes and commit them, and you can discard any commits you make in this
|
||||
state without impacting any branches by switching back to a branch.
|
||||
|
||||
If you want to create a new branch to retain commits you create, you may
|
||||
do so (now or later) by using -c with the switch command. Example:
|
||||
|
||||
git switch -c <new-branch-name>
|
||||
|
||||
Or undo this operation with:
|
||||
|
||||
git switch -
|
||||
|
||||
Turn off this advice by setting config variable advice.detachedHead to false
|
||||
|
||||
HEAD is now at 12e4baf Release PostgSail 0.9.3
|
||||
```
|
||||
|
||||
**Ensure new docker-compose.yml file matches your database folder or volume setting, adjust as needed.**
|
||||
|
||||
Get the latest containers.
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
### Update possible extentions
|
||||
|
||||
Start database container.
|
||||
|
||||
```bash
|
||||
docker compose up -d db
|
||||
```
|
||||
|
||||
Excec psql shell in databse container.
|
||||
|
||||
```bash
|
||||
docker compose exec db sh
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB"
|
||||
\c signalk;
|
||||
```
|
||||
|
||||
Check extensions which can be updated, be sure to run from the signalk database:
|
||||
|
||||
```sql
|
||||
SELECT name, default_version, installed_version FROM pg_available_extensions where default_version <> installed_version;
|
||||
```
|
||||
|
||||
The postgis extention can be upgraded with this SQL query:
|
||||
|
||||
```sql
|
||||
SELECT postgis_extensions_upgrade();
|
||||
```
|
||||
|
||||
Updating the timescaledb requires running from a new session, use following commands (note the -X options, that is neccesary):
|
||||
|
||||
```bash
|
||||
docker compose exec db sh
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" -X
|
||||
```
|
||||
|
||||
Then run following SQL commands from the psql shell:
|
||||
|
||||
```sql
|
||||
ALTER EXTENSION timescaledb UPDATE;
|
||||
CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit;
|
||||
ALTER EXTENSION timescaledb_toolkit UPDATE;
|
||||
```
|
||||
|
||||
For others, to be checked. In my case, the postgis extension was essential.
|
||||
|
||||
### Run datbabase migrations
|
||||
|
||||
Then run the migrations, adjust start and end for first and last migration file to execute.
|
||||
|
||||
```bash
|
||||
start=202404; end=202507; for f in $(ls ./docker-entrypoint-initdb.d/99_migrations_*.sql | sort); do s=$(basename "$f" | sed -E 's/^99_migrations_([0-9]{6})\.sql$/\1/'); if [[ "$s" < "$start" || "$s" > "$end" ]]; then continue; fi; echo "Running $f"; psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < "$f"; done
|
||||
```
|
||||
|
||||
Or line by line
|
||||
|
||||
```bash
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202404.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202405.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202406.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202407.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202408.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202409.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202410.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202411.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202412.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202501.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202504.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202505.sql
|
||||
psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < ./docker-entrypoint-initdb.d/99_migrations_202507.sql
|
||||
```
|
||||
|
||||
Now rebuild the web app.
|
||||
|
||||
```bash
|
||||
docker compose build web
|
||||
```
|
||||
|
||||
Maybe need to run 99env.sh - check.
|
||||
|
||||
Then we can start the other containers.
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
After everything is started, the web site should be accesible.
|
||||
|
||||
### Additional data migration
|
||||
|
||||
Depending on the starting version, additional data migration may be needed.
|
||||
If the old trips are visible, but the routes are not, we need to run an SQL Script to re-calculate the trip metadata.
|
||||
|
||||
```sql
|
||||
DO $$
|
||||
declare
|
||||
-- Re calculate the trip metadata
|
||||
logbook_rec record;
|
||||
avg_rec record;
|
||||
t_rec record;
|
||||
batch_size INTEGER := 20;
|
||||
offset_value INTEGER := 0;
|
||||
done BOOLEAN := FALSE;
|
||||
processed INTEGER := 0;
|
||||
begin
|
||||
WHILE NOT done LOOP
|
||||
processed := 0;
|
||||
FOR logbook_rec IN
|
||||
SELECT *
|
||||
FROM api.logbook
|
||||
WHERE _from IS NOT NULL
|
||||
AND _to IS NOT NULL
|
||||
AND active IS FALSE
|
||||
AND trip IS NULL
|
||||
--AND trip_heading IS NULL
|
||||
--AND vessel_id = '06b6d311ccfe'
|
||||
ORDER BY id DESC
|
||||
LIMIT batch_size -- OFFSET offset_value -- don's use offset as causes entries to skip
|
||||
LOOP
|
||||
processed := processed + 1;
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
|
||||
-- Calculate trip metadata
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
--UPDATE api.logbook
|
||||
-- SET extra = jsonb_recursive_merge(extra, jsonb_build_object('avg_wind_speed', avg_rec.avg_wind_speed))
|
||||
-- WHERE id = logbook_rec.id;
|
||||
if avg_rec.count_metric IS NULL OR avg_rec.count_metric = 0 then
|
||||
-- We don't have the orignal metrics, we should read the geojson
|
||||
continue; -- return current row of SELECT
|
||||
end if;
|
||||
|
||||
-- mobilitydb, add spaciotemporal sequence
|
||||
-- reduce the numbers of metrics by skipping row or aggregate time-series
|
||||
-- By default the signalk plugin report one entry every minute.
|
||||
IF avg_rec.count_metric < 30 THEN -- if less ~20min trip we keep it all data
|
||||
t_rec := logbook_update_metrics_short_fn(avg_rec.count_metric, logbook_rec._from_time, logbook_rec._to_time);
|
||||
ELSIF avg_rec.count_metric < 2000 THEN -- if less ~33h trip we skip data
|
||||
t_rec := logbook_update_metrics_fn(avg_rec.count_metric, logbook_rec._from_time, logbook_rec._to_time);
|
||||
ELSE -- As we have too many data, we time-series aggregate data
|
||||
t_rec := logbook_update_metrics_timebucket_fn(avg_rec.count_metric, logbook_rec._from_time, logbook_rec._to_time);
|
||||
END IF;
|
||||
--RAISE NOTICE 'mobilitydb [%]', t_rec;
|
||||
IF t_rec.trajectory IS NULL THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn, vessel_id [%], invalid mobilitydb data [%] [%]', logbook_rec.vessel_id, _id, t_rec;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
RAISE NOTICE '-> process_logbook_queue_fn, vessel_id [%], update entry logbook id:[%] start:[%] end:[%]', logbook_rec.vessel_id, logbook_rec.id, logbook_rec._from_time, logbook_rec._to_time;
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
trip = t_rec.trajectory,
|
||||
trip_cog = t_rec.courseovergroundtrue,
|
||||
trip_sog = t_rec.speedoverground,
|
||||
trip_twa = t_rec.windspeedapparent,
|
||||
trip_tws = t_rec.truewindspeed,
|
||||
trip_twd = t_rec.truewinddirection,
|
||||
trip_notes = t_rec.notes, -- don't overwrite existing user notes. **** Must set trip_notes otherwise replay is not working.
|
||||
trip_status = t_rec.status,
|
||||
trip_depth = t_rec.depth,
|
||||
trip_batt_charge = t_rec.stateofcharge,
|
||||
trip_batt_voltage = t_rec.voltage,
|
||||
trip_temp_water = t_rec.watertemperature,
|
||||
trip_temp_out = t_rec.outsidetemperature,
|
||||
trip_pres_out = t_rec.outsidepressure,
|
||||
trip_hum_out = t_rec.outsidehumidity,
|
||||
trip_heading = t_rec.heading, -- heading True
|
||||
trip_tank_level = t_rec.tankLevel, -- Tank currentLevel
|
||||
trip_solar_voltage = t_rec.solarVoltage, -- solar voltage
|
||||
trip_solar_power = t_rec.solarPower -- solar powerPanel
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
END LOOP;
|
||||
|
||||
RAISE NOTICE '-> Processed:[%]', processed;
|
||||
IF processed = 0 THEN
|
||||
done := TRUE;
|
||||
ELSE
|
||||
offset_value := offset_value + batch_size;
|
||||
END IF;
|
||||
END LOOP;
|
||||
|
||||
END $$;
|
||||
```
|
||||
|
||||
### Update SignalK client
|
||||
|
||||
The SignalK client can be updated from the SignalK Web UI. After the migration we updated this to version v0.5.0
|
||||
|
||||
### Trouble shooting
|
||||
|
||||
During this migration, several issues came up, they eventually boiled down to an extension not updated and permissions issues.
|
288
docs/Self‐hosted-installation-guide on AWS.md
Normal file
288
docs/Self‐hosted-installation-guide on AWS.md
Normal file
@@ -0,0 +1,288 @@
|
||||
## Self AWS cloud hosted setup example
|
||||
|
||||
In this guide we install, setup and run a postgsail project on an AWS instance in the cloud.
|
||||
|
||||
## On AWS Console
|
||||
***Launch an instance on AWS EC2***
|
||||
With the following settings:
|
||||
+ Ubuntu
|
||||
+ Instance type: t2.small
|
||||
+ Create a new key pair:
|
||||
+ key pair type: RSA
|
||||
+ Private key file format: .pem
|
||||
+ The key file is stored for later use
|
||||
|
||||
+ Allow SSH traffic from: Anywhere
|
||||
+ Allow HTTPS traffic from the internet
|
||||
+ Allow HTTP traffic from the internet
|
||||
|
||||
Configure storage:
|
||||
The standard storage of 8GiB is too small so change this to 16GiB.
|
||||
|
||||
Create a new security group
|
||||
+ Go to: EC2>Security groups>Create security group
|
||||
Add inbound rules for the following ports:443, 8080, 80, 3000, 5432, 22, 5050
|
||||
+ Go to your instance>select your instance>Actions>security>change security group
|
||||
+ And add the correct security group to the instance.
|
||||
|
||||
## Connect to instance with SSH
|
||||
|
||||
+ Copy the key file in your default SSH configuration file location (the one VSCode will use)
|
||||
+ In terminal, go to the folder and run this command to ensure your key is not publicly viewable:
|
||||
```
|
||||
chmod 600 "privatekey.pem"
|
||||
```
|
||||
|
||||
We are using VSCode to connect to the instance:
|
||||
+ Install the Remote - SSH Extension for VSCode
|
||||
+ Open the Command Palette (Ctrl+Shift+P) and type Remote-SSH: Add New SSH Host:
|
||||
```
|
||||
ssh -i "privatekey.pem" ubuntu@ec2-111-22-33-44.eu-west-1.compute.amazonaws.com
|
||||
```
|
||||
When prompted, select the default SSH configuration file location.
|
||||
Open the config file and add the location:
|
||||
```
|
||||
xIdentityFile ~/.ssh/privatekey.pem
|
||||
```
|
||||
|
||||
|
||||
## Install Docker on your instance
|
||||
To install Docker on your new EC2 Ubuntu instance via SSH, follow these steps:
|
||||
|
||||
Update your package list:
|
||||
```
|
||||
sudo apt-get update
|
||||
```
|
||||
Install required dependencies:
|
||||
```
|
||||
sudo apt-get install apt-transport-https ca-certificates curl software-properties-common
|
||||
```
|
||||
Add Docker's official GPG key:
|
||||
```
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
```
|
||||
Add Docker's official repository:
|
||||
```
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
```
|
||||
Update the package list again:
|
||||
```
|
||||
sudo apt-get update
|
||||
```
|
||||
Install Docker:
|
||||
```
|
||||
sudo apt-get install docker-ce docker-ce-cli containerd.io
|
||||
```
|
||||
Verify Docker installation:
|
||||
```
|
||||
sudo docker --version
|
||||
```
|
||||
Add your user to the docker group to run Docker without sudo:
|
||||
```
|
||||
sudo usermod -aG docker ubuntu
|
||||
```
|
||||
Then, log out and back in or use the following to apply the changes:
|
||||
```
|
||||
newgrp docker
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Install Postgsail
|
||||
Git clone the postgsail repo:
|
||||
```
|
||||
git clone https://github.com/xbgmsharp/postgsail.git
|
||||
```
|
||||
|
||||
## Edit environment variables
|
||||
Copy the example.env file and edit the environment variables:
|
||||
```
|
||||
cd postgsail
|
||||
cp .env.example .env
|
||||
nano .env
|
||||
```
|
||||
|
||||
+ POSTGRES_USER
|
||||
Come up with a unique username for the database user. This will be used in the docker image when it’s started up. Nothing beyond creating a unique username and password is required here.
|
||||
This environment variable is used in conjunction with `POSTGRES_PASSWORD` to set a user and its password. This variable will create the specified user with superuser power and a database with the same name.
|
||||
|
||||
https://github.com/docker-library/docs/blob/master/postgres/README.md
|
||||
|
||||
+ POSTGRES_PASSWORD
|
||||
This should be a good password. It will be used for the postgres user above. Again this is used in the docker image.
|
||||
This environment variable is required for you to use the PostgreSQL image. It must not be empty or undefined. This environment variable sets the superuser password for PostgreSQL. The default superuser is defined by the POSTGRES_USER environment variable.
|
||||
|
||||
+ POSTGRES_DB
|
||||
This is the name of the database within postgres. You can leave it named postgres but give it a unique name if you like. The schema will be loaded into this database and all data will be stored within it. Since this is used inside the docker image the name really doesn’t matter. If you plan to run additional databases within the image, then you might care.
|
||||
This environment variable can be used to define a different name for the default database that is created when the image is first started. If it is not specified, then the value of `POSTGRES_USER` will be used.
|
||||
|
||||
+ PGSAIL_APP_URL
|
||||
This is the webapp (webui) entrypoint, typically the public DNS or IP
|
||||
```
|
||||
PGSAIL_APP_URL=http://localhost:8080
|
||||
```
|
||||
|
||||
|
||||
+ PGSAIL_API_URL
|
||||
This is the URL to your API on your instance on port 3000:
|
||||
```
|
||||
PGSAIL_API_URL=PGSAIL_API_URL=http://localhost:3000
|
||||
```
|
||||
|
||||
+ PGSAIL_AUTHENTICATOR_PASSWORD
|
||||
This password is used as part of the database access configuration. It’s used as part of the access URI later on. (Put the same password in both lines.)
|
||||
|
||||
+ PGSAIL_GRAFANA_PASSWORD
|
||||
This password is used for the grafana service
|
||||
|
||||
+ PGSAIL_GRAFANA_AUTH_PASSWORD
|
||||
??This password is used for user authentication on grafana?
|
||||
|
||||
+ PGSAIL_EMAIL_FROM - PGSAIL_EMAIL_SERVER - PGSAIL_EMAIL_USER - PGSAIL_EMAIL_PASS Pgsail does not include a built in email service - only hooks to send email via an existing server.
|
||||
We use gmail as a third party email service:
|
||||
```
|
||||
PGSAIL_EMAIL_FROM=email@gmail.com
|
||||
PGSAIL_EMAIL_SERVER=smtp.gmail.com
|
||||
PGSAIL_EMAIL_USER=email@gmail.com
|
||||
```
|
||||
You need to get the PGSAIL_EMAIL_PASS from your gmail account security settings: it is not the account password, instead you need to make an "App password"
|
||||
|
||||
+ PGRST_JWT_SECRET
|
||||
This secret key must be at least 32 characters long, you can create a random key with the following command:
|
||||
```
|
||||
cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1
|
||||
```
|
||||
|
||||
+ Other ENV variables
|
||||
```
|
||||
PGSAIL_PUSHOVER_APP_TOKEN
|
||||
PGSAIL_PUSHOVER_APP
|
||||
PGSAIL_TELEGRAM_BOT_TOKEN
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD=password
|
||||
PGSAIL_GRAFANA_PASSWORD=password
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD=password
|
||||
#PGSAIL_PUSHOVER_APP_TOKEN= Comment if not used
|
||||
#PGSAIL_PUSHOVER_APP_URL= Comment if not used
|
||||
#PGSAIL_TELEGRAM_BOT_TOKEN= Comment if not used
|
||||
```
|
||||
|
||||
## Run the project
|
||||
If needed, add your user to the docker group to run Docker without sudo:
|
||||
```
|
||||
sudo usermod -aG docker ubuntu
|
||||
```
|
||||
Then, log out and back in or use the following to apply the changes:
|
||||
```
|
||||
newgrp docker
|
||||
```
|
||||
|
||||
|
||||
Step 1. Import the SQL schema, execute:
|
||||
```
|
||||
docker compose up db
|
||||
```
|
||||
Step 2. Launch the full backend stack (db, api), execute:
|
||||
```
|
||||
docker compose up db api
|
||||
```
|
||||
Step 3. Launch the frontend webapp
|
||||
```
|
||||
docker compose up web
|
||||
```
|
||||
|
||||
Open browser and navigate to your PGSAIL_APP_URL, you should see the postgsail login screen now:
|
||||
```
|
||||
http://ec2-11-234-567-890.eu-west-1.compute.amazonaws.com::8080
|
||||
```
|
||||
|
||||
## Additional database setup
|
||||
Aditional setup will be required.
|
||||
There is no useraccount yet, also cronjobs need to be activated.
|
||||
We'll do that by using pgadmin.
|
||||
|
||||
### Run Pgadmin & connect to database
|
||||
First add two more vars to your env. file:
|
||||
```
|
||||
PGADMIN_DEFAULT_EMAIL=setup@setup.com
|
||||
PGADMIN_DEFAULT_PASSWORD=123456
|
||||
```
|
||||
|
||||
Pgadmin is defined in docker-compose.dev.yml so we need to start the service:
|
||||
```
|
||||
docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d pgadmin
|
||||
```
|
||||
|
||||
All services should be up now: api, db, web and pgadmin. Check all services running by:
|
||||
```
|
||||
docker ps
|
||||
```
|
||||
|
||||
To open Pgadmin, navigate to your aws-url and port 5050:
|
||||
```
|
||||
http://ec2-11-234-567-890.eu-west-1.compute.amazonaws.com::5050
|
||||
```
|
||||
|
||||
<p>
|
||||
You are now able to login with your credentials: PGADMIN_DEFAULT_EMAIL & PGADMIN_DEFAULT_PASSWORD.<br>
|
||||
</p>
|
||||
<p>
|
||||
In the right-side panel you will see "Servers(1)"; by clicking you'll see the Server: "PostgSail dev db"<br>
|
||||
</p>
|
||||
<p>
|
||||
**Warning:** A dialog box will open, prompting to input the password, but stating the wrong username (postgres) , you have to change this username by right-clicking on the server "PostgSail dev db" > Properties > Connection > enter username: POSTGRES_USER > Save
|
||||
</p>
|
||||
<p>
|
||||
Now right-click and Connect to Server and enter your password: POSTGRES_PASSWORD
|
||||
</p>
|
||||
<p>
|
||||
You'll see 2 databases: "postgres" and "signalk"
|
||||
</p>
|
||||
|
||||
### Enabling cron jobs by SQL query
|
||||
<p>
|
||||
Cron jobs are not active by default because if you don't have the correct settings set (for SMTP, PushOver, Telegram), you might enter in a loop with errors and you could be blocked or banned from the external services.
|
||||
</p>
|
||||
<p>
|
||||
Once you have setup the services correctly (entered credentials in .env file) you can activate the cron jobs. (We are only using the SMTP email service in this example) in the "postgres" database:
|
||||
</p>
|
||||
+ Right-click on "postgres" database and select "Query Tool"
|
||||
+ Execute the following SQL query:
|
||||
|
||||
```
|
||||
UPDATE cron.job SET active = True;
|
||||
```
|
||||
|
||||
### Adding a user by SQL query
|
||||
I was not able to create a new user through the web application (still figuring out what is going on). Therefore I added a new user by SQL in the "signalk" database.
|
||||
+ Right-click on "signalk" database and select "Query Tool"
|
||||
+ Check the current users in your database executing the query:
|
||||
```
|
||||
SELECT * FROM auth.accounts;
|
||||
```
|
||||
|
||||
|
||||
|
||||
+ To add a new user executing the query:
|
||||
|
||||
```
|
||||
INSERT INTO auth.accounts (
|
||||
email, first, last, pass, role) VALUES (
|
||||
'your.email@domain.com'::citext, 'Test'::text, 'your_username'::text, 'your_password'::text, 'user_role'::name)
|
||||
returning email;
|
||||
```
|
||||
|
||||
When SMTP is correctly setup, you will receive two emails: "Welcome" and "Email verification".
|
||||
<p>
|
||||
You will be able to login with these credentials on the web
|
||||
</p>
|
||||
<p>
|
||||
Each time you login, you will receive an email: "Email verification". This is the OTP process, you can bypass this process by updating the json key value of "Preferences":
|
||||
</p>
|
||||
|
||||
```
|
||||
UPDATE auth.accounts
|
||||
SET preferences='{"email_valid": true}'::jsonb || preferences
|
||||
WHERE email='your.email@domain.com';
|
||||
```
|
||||
|
||||
Now you are able to use PostGSail on the web on your own AWS server!
|
166
docs/Self‐hosted-installation-guide.md
Normal file
166
docs/Self‐hosted-installation-guide.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Self hosted setup example environment:
|
||||
|
||||
Virtual machine with Ubuntu 22.04 LTS minimal server installation.
|
||||
|
||||
Install openssh, update and install docker-ce manually (ubuntu docker repo is lame)
|
||||
The following ports are exposed to the internet either using a static public IP address or port forwarding via your favorite firewall platform. (not need by default docker will expose all ports to all IPs)
|
||||
The base install uses ports 5432 (db) and 3000 (api) and 8080 (web).
|
||||
|
||||
We’ll add https using Apache or Nginx proxy once everything is tested. At that point you’ll want to open 443 or whatever other port you want to use for secure communication.
|
||||
|
||||
For docker-ce installation, this is a decent guide to installation:
|
||||
https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04
|
||||
|
||||
Third party services and options:
|
||||
Emails
|
||||
For email notifications you may want to install a local email handler like postfix or use a third party service like gmail.
|
||||
|
||||
Pushover
|
||||
Add more here
|
||||
|
||||
Telegram Bot
|
||||
Add more here
|
||||
|
||||
|
||||
```
|
||||
$ git clone https://github.com/xbgmsharp/postgsail
|
||||
cd postgsail
|
||||
cp .env.example .env
|
||||
nano .env
|
||||
```
|
||||
|
||||
Login to your docker host once it’s setup.
|
||||
Clone the repo to your user directory:
|
||||
|
||||
Copy the example file and edit the environment variables
|
||||
|
||||
The example has the following:
|
||||
```
|
||||
# POSTGRESQL ENV Settings
|
||||
POSTGRES_USER=username
|
||||
POSTGRES_PASSWORD=password
|
||||
POSTGRES_DB=postgres
|
||||
# PostgSail ENV Settings
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD=password
|
||||
PGSAIL_GRAFANA_PASSWORD=password
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD=password
|
||||
# SMTP server settings
|
||||
PGSAIL_EMAIL_FROM=root@localhost
|
||||
PGSAIL_EMAIL_SERVER=localhost
|
||||
#PGSAIL_EMAIL_USER= Comment if not use
|
||||
#PGSAIL_EMAIL_PASS= Comment if not use
|
||||
# Pushover settings
|
||||
#PGSAIL_PUSHOVER_APP_TOKEN= Comment if not use
|
||||
#PGSAIL_PUSHOVER_APP_URL= Comment if not use
|
||||
# TELEGRAM BOT, ask BotFather
|
||||
#PGSAIL_TELEGRAM_BOT_TOKEN= Comment if not use
|
||||
# webapp entrypoint, typically the public DNS or IP
|
||||
PGSAIL_APP_URL=http://localhost:8080
|
||||
# API entrypoint from the webapp, typically the public DNS or IP
|
||||
PGSAIL_API_URL=http://localhost:3000
|
||||
#
|
||||
POSTGREST ENV Settings
|
||||
PGRST_DB_URI=postgres://authenticator:${PGSAIL_AUTHENTICATOR_PASSWORD}@db:5432/signalk
|
||||
# % cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1
|
||||
PGRST_JWT_SECRET=_at_least_32__char__long__random
|
||||
# Grafana ENV Settings
|
||||
GF_SECURITY_ADMIN_PASSWORD=password
|
||||
```
|
||||
|
||||
All of these need to be configured.
|
||||
|
||||
Step by step:
|
||||
|
||||
## POSTGRESQL ENV Settings
|
||||
|
||||
***POSTGRES_USER***
|
||||
Come up with a unique username for the database user. This will be used in the docker image when it’s started up. Nothing beyond creating a unique username and password is required here.
|
||||
This environment variable is used in conjunction with `POSTGRES_PASSWORD` to set a user and its password. This variable will create the specified user with superuser power and a database with the same name.
|
||||
|
||||
https://github.com/docker-library/docs/blob/master/postgres/README.md
|
||||
|
||||
***POSTGRES_PASSWORD***
|
||||
This should be a good password. It will be used for the postgres user above. Again this is used in the docker image.
|
||||
This environment variable is required for you to use the PostgreSQL image. It must not be empty or undefined. This environment variable sets the superuser password for PostgreSQL. The default superuser is defined by the POSTGRES_USER environment variable.
|
||||
|
||||
***POSTGRES_DB***
|
||||
This is the name of the database within postgres. Give it a unique name if you like. The schema will be loaded into this database and all data will be stored within it. Since this is used inside the docker image the name really doesn’t matter. If you plan to run additional databases within the image, then you might care.
|
||||
This environment variable can be used to define a different name for the default database that is created when the image is first started. If it is not specified, then the value of `POSTGRES_USER` will be used.
|
||||
|
||||
|
||||
```
|
||||
# PostgSail ENV Settings
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD=password
|
||||
PGSAIL_GRAFANA_PASSWORD=password
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD=password
|
||||
PGSAIL_EMAIL_FROM=root@localhost
|
||||
PGSAIL_EMAIL_SERVER=localhost
|
||||
#PGSAIL_EMAIL_USER= Comment if not use
|
||||
#PGSAIL_EMAIL_PASS= Comment if not use
|
||||
#PGSAIL_PUSHOVER_APP_TOKEN= Comment if not use
|
||||
#PGSAIL_PUSHOVER_APP_URL= Comment if not use
|
||||
#PGSAIL_TELEGRAM_BOT_TOKEN= Comment if not use
|
||||
PGSAIL_APP_URL=http://localhost:8080
|
||||
PGSAIL_API_URL=http://localhost:3000
|
||||
```
|
||||
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD
|
||||
This password is used as part of the database access configuration. It’s used as part of the access URI later on. (Put the same password in both lines.)
|
||||
|
||||
PGSAIL_GRAFANA_PASSWORD
|
||||
This password is used for the grafana service
|
||||
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD
|
||||
??This password is used for user authentication on grafana?
|
||||
|
||||
PGSAIL_EMAIL_FROM
|
||||
PGSAIL_EMAIL_SERVER
|
||||
Pgsail does not include a built in email service - only hooks to send email via an existing server.
|
||||
You can install an email service on the ubuntu host or use a third party service like gmail. If you chose to use a local service, be aware that some email services will filter it as spam unless you’ve properly configured it.
|
||||
|
||||
PGSAIL_PUSHOVER_APP_TOKEN
|
||||
PGSAIL_PUSHOVER_APP
|
||||
PGSAIL_TELEGRAM_BOT_TOKEN
|
||||
|
||||
Add more info here
|
||||
PGSAIL_APP_URL
|
||||
This is the full url (with domain name or IP) that you access PGSAIL via. Once nginx ssl proxy is added this may need to be updated. (Service restart required after changing?)
|
||||
|
||||
|
||||
PGSAIL_API_URL
|
||||
This is the API URL that’s used for the boat and user access. Once apache or nginx ssl proxy is added this may need to be updated. (same restart?)
|
||||
|
||||
Network configuration example:
|
||||
It is a docker question but in general no special network config should be need, docker created and assign one automatically. all images will be bind to all IPs on the host.
|
||||
The volume can be on disk or should be a docker volume prefer.
|
||||
```
|
||||
# docker compose -f docker-compose.yml -f docker-compose.dev.yml ps -a
|
||||
NAME IMAGE COMMAND SERVICE CREATED STATUS PORTS
|
||||
api postgrest/postgrest "/bin/postgrest" api 2 months ago Up 2 months 0.0.0.0:3000->3000/tcp, :::3000->3000/tcp, 0.0.0.0:3003->3003/tcp, :::3003->3003/tcp
|
||||
app grafana/grafana:latest "/run.sh" app 3 months ago Up 12 days 0.0.0.0:3001->3000/tcp, :::3001->3000/tcp
|
||||
db xbgmsharp/timescaledb-postgis "docker-entrypoint.sh postgres" db 2 months ago Up 2 months (healthy) 0.0.0.0:5432->5432/tcp, :::5432->5432/tcp
|
||||
```
|
||||
All services (db,api,web) will be accessible via localhost and others IPs, hence the default configuration.
|
||||
|
||||
```bash
|
||||
# telnet localhost 5432
|
||||
```
|
||||
and
|
||||
```bash
|
||||
# curl localhost:3000
|
||||
```
|
||||
|
||||
```bash
|
||||
# docker network ls
|
||||
NETWORK ID NAME DRIVER SCOPE
|
||||
...
|
||||
14f30223ebf2 postgsail_default bridge local
|
||||
```
|
||||
|
||||
Volumes:
|
||||
```bash
|
||||
% docker volume ls
|
||||
DRIVER VOLUME NAME
|
||||
local postgsail_grafana-data
|
||||
local postgsail_postgres-data
|
||||
```
|
84
docs/install_guide.md
Normal file
84
docs/install_guide.md
Normal file
@@ -0,0 +1,84 @@
|
||||
|
||||
## Connect to the server
|
||||
```bash
|
||||
% ssh root@my.server.com
|
||||
```
|
||||
|
||||
# Clone the git repo
|
||||
```bash
|
||||
% git clone https://github.com/xbgmsharp/postgsail
|
||||
Cloning into 'postgsail'...
|
||||
...
|
||||
```
|
||||
|
||||
## Edit the configuration
|
||||
```bash
|
||||
% cd postgsail
|
||||
% cp .env.example .env
|
||||
% cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1
|
||||
..
|
||||
% nano .env
|
||||
```
|
||||
|
||||
## Install Docker
|
||||
From https://docs.docker.com/engine/install/ubuntu/
|
||||
```bash
|
||||
% apt-get update
|
||||
...
|
||||
% apt-get install -y ca-certificates curl
|
||||
...
|
||||
% install -m 0755 -d /etc/apt/keyrings
|
||||
% curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
|
||||
% chmod a+r /etc/apt/keyrings/docker.asc
|
||||
% echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
% apt-get update
|
||||
...
|
||||
% apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
...
|
||||
```
|
||||
|
||||
## Init the database
|
||||
```bash
|
||||
% docker compose up db
|
||||
...
|
||||
Gracefully stopping... (press Ctrl+C again to force)
|
||||
[+] Stopping 1/1
|
||||
✔ Container db Stopped
|
||||
```
|
||||
|
||||
## Start the db with the api
|
||||
```bash
|
||||
% docker compose pull api
|
||||
...
|
||||
% docker compose up -d db api
|
||||
```
|
||||
|
||||
## Checks
|
||||
Making sure it works.
|
||||
```bash
|
||||
% telnet localhost 5432
|
||||
...
|
||||
telnet> quit
|
||||
Connection closed.
|
||||
% curl localhost:3000
|
||||
...
|
||||
% docker ps
|
||||
...
|
||||
% docker logs api
|
||||
...
|
||||
```
|
||||
|
||||
# Run the web instance
|
||||
```bash
|
||||
% docker compose -f docker-compose.yml -f docker-compose.dev.yml build web (be patient)
|
||||
...
|
||||
|
||||
% docker compose -f docker-compose.yml -f docker-compose.dev.yml up web (be patient)
|
||||
...
|
||||
web |
|
||||
web | ➜ Local: http://localhost:8080/
|
||||
web | ➜ Network: http://172.18.0.4:8080/
|
||||
```
|
2
frontend
2
frontend
Submodule frontend updated: 31579e219d...44c270ea8b
625
initdb/99_migrations_202404.sql
Normal file
625
initdb/99_migrations_202404.sql
Normal file
@@ -0,0 +1,625 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration April 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
UPDATE public.email_templates
|
||||
SET email_content='Hello __RECIPIENT__,
|
||||
Sorry!We could not convert your boat into a Windy Personal Weather Station due to missing data (temperature, wind or pressure).
|
||||
Windy Personal Weather Station is now disable.'
|
||||
WHERE "name"='windy_error';
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.cron_windy_fn() RETURNS void AS $$
|
||||
DECLARE
|
||||
windy_rec record;
|
||||
default_last_metric TIMESTAMPTZ := NOW() - interval '1 day';
|
||||
last_metric TIMESTAMPTZ := NOW();
|
||||
metric_rec record;
|
||||
windy_metric jsonb;
|
||||
app_settings jsonb;
|
||||
user_settings jsonb;
|
||||
windy_pws jsonb;
|
||||
BEGIN
|
||||
-- Check for new observations pending update
|
||||
RAISE NOTICE 'cron_process_windy_fn';
|
||||
-- Gather url from app settings
|
||||
app_settings := get_app_settings_fn();
|
||||
-- Find users with Windy active and with an active vessel
|
||||
-- Map account id to Windy Station ID
|
||||
FOR windy_rec in
|
||||
SELECT
|
||||
a.id,a.email,v.vessel_id,v.name,
|
||||
COALESCE((a.preferences->'windy_last_metric')::TEXT, default_last_metric::TEXT) as last_metric
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels AS v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata AS m ON m.vessel_id = v.vessel_id
|
||||
WHERE (a.preferences->'public_windy')::boolean = True
|
||||
AND m.active = True
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_windy_fn for [%]', windy_rec;
|
||||
PERFORM set_config('vessel.id', windy_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.cron_process_windy_rec_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(windy_rec.vessel_id::TEXT);
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking user_settings [%]', user_settings;
|
||||
-- Get all metrics from the last windy_last_metric avg by 5 minutes
|
||||
-- TODO json_agg to send all data in once, but issue with py jsonb transformation decimal.
|
||||
FOR metric_rec in
|
||||
SELECT time_bucket('5 minutes', m.time) AS time_bucket,
|
||||
avg((m.metrics->'environment.outside.temperature')::numeric) AS temperature,
|
||||
avg((m.metrics->'environment.outside.pressure')::numeric) AS pressure,
|
||||
avg((m.metrics->'environment.outside.relativeHumidity')::numeric) AS rh,
|
||||
avg((m.metrics->'environment.wind.directionTrue')::numeric) AS winddir,
|
||||
avg((m.metrics->'environment.wind.speedTrue')::numeric) AS wind,
|
||||
max((m.metrics->'environment.wind.speedTrue')::numeric) AS gust,
|
||||
last(latitude, time) AS lat,
|
||||
last(longitude, time) AS lng
|
||||
FROM api.metrics m
|
||||
WHERE vessel_id = windy_rec.vessel_id
|
||||
AND m.time >= windy_rec.last_metric::TIMESTAMPTZ
|
||||
GROUP BY time_bucket
|
||||
ORDER BY time_bucket ASC LIMIT 100
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking metrics [%]', metric_rec;
|
||||
if metric_rec.wind is null or metric_rec.temperature is null
|
||||
or metric_rec.pressure is null or metric_rec.rh is null then
|
||||
-- Ignore when there is no metrics.
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('windy_error'::TEXT, user_settings::JSONB);
|
||||
-- Disable windy
|
||||
PERFORM api.update_user_preferences_fn('{public_windy}'::TEXT, 'false'::TEXT);
|
||||
RETURN;
|
||||
end if;
|
||||
-- https://community.windy.com/topic/8168/report-your-weather-station-data-to-windy
|
||||
-- temp from kelvin to Celsius
|
||||
-- winddir from radiant to degrees
|
||||
-- rh from ratio to percentage
|
||||
SELECT jsonb_build_object(
|
||||
'dateutc', metric_rec.time_bucket,
|
||||
'station', windy_rec.id,
|
||||
'name', windy_rec.name,
|
||||
'lat', metric_rec.lat,
|
||||
'lon', metric_rec.lng,
|
||||
'wind', metric_rec.wind,
|
||||
'gust', metric_rec.gust,
|
||||
'pressure', metric_rec.pressure,
|
||||
'winddir', radiantToDegrees(metric_rec.winddir::numeric),
|
||||
'temp', kelvinToCel(metric_rec.temperature::numeric),
|
||||
'rh', valToPercent(metric_rec.rh::numeric)
|
||||
) INTO windy_metric;
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking windy_metrics [%]', windy_metric;
|
||||
SELECT windy_pws_py_fn(windy_metric, user_settings, app_settings) into windy_pws;
|
||||
RAISE NOTICE '-> cron_process_windy_fn Windy PWS [%]', ((windy_pws->'header')::JSONB ? 'id');
|
||||
IF NOT((user_settings->'settings')::JSONB ? 'windy') and ((windy_pws->'header')::JSONB ? 'id') then
|
||||
RAISE NOTICE '-> cron_process_windy_fn new Windy PWS [%]', (windy_pws->'header')::JSONB->>'id';
|
||||
-- Send metrics to Windy
|
||||
PERFORM api.update_user_preferences_fn('{windy}'::TEXT, ((windy_pws->'header')::JSONB->>'id')::TEXT);
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('windy'::TEXT, user_settings::JSONB);
|
||||
-- Refresh user settings after first success
|
||||
user_settings := get_user_settings_from_vesselid_fn(windy_rec.vessel_id::TEXT);
|
||||
END IF;
|
||||
-- Record last metrics time
|
||||
SELECT metric_rec.time_bucket INTO last_metric;
|
||||
END LOOP;
|
||||
PERFORM api.update_user_preferences_fn('{windy_last_metric}'::TEXT, last_metric::TEXT);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ language plpgsql;
|
||||
|
||||
-- Add security definer, run this function as admin to avoid weird bug
|
||||
-- ERROR: variable not found in subplan target list
|
||||
CREATE OR REPLACE FUNCTION api.delete_logbook_fn(IN _id integer) RETURNS BOOLEAN AS $delete_logbook$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
previous_stays_id numeric;
|
||||
current_stays_departed text;
|
||||
current_stays_id numeric;
|
||||
current_stays_active boolean;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> delete_logbook_fn invalid input %', _id;
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE id = _id;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> delete_logbook_fn invalid logbook %', _id;
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
-- Update logbook
|
||||
UPDATE api.logbook l
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE l.vessel_id = current_setting('vessel.id', false)
|
||||
AND id = logbook_rec.id;
|
||||
-- Update metrics status to moored
|
||||
-- This generate an error when run as user_role "variable not found in subplan target list"
|
||||
UPDATE api.metrics
|
||||
SET status = 'moored'
|
||||
WHERE time >= logbook_rec._from_time
|
||||
AND time <= logbook_rec._to_time
|
||||
AND vessel_id = current_setting('vessel.id', false);
|
||||
-- Get related stays
|
||||
SELECT id,departed,active INTO current_stays_id,current_stays_departed,current_stays_active
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived = logbook_rec._to_time;
|
||||
-- Update related stays
|
||||
UPDATE api.stays s
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived = logbook_rec._to_time;
|
||||
-- Find previous stays
|
||||
SELECT id INTO previous_stays_id
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived < logbook_rec._to_time
|
||||
ORDER BY s.arrived DESC LIMIT 1;
|
||||
-- Update previous stays with the departed time from current stays
|
||||
-- and set the active state from current stays
|
||||
UPDATE api.stays
|
||||
SET departed = current_stays_departed::TIMESTAMPTZ,
|
||||
active = current_stays_active
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = previous_stays_id;
|
||||
-- Clean up, remove invalid logbook and stay entry
|
||||
DELETE FROM api.logbook WHERE id = logbook_rec.id;
|
||||
RAISE WARNING '-> delete_logbook_fn delete logbook [%]', logbook_rec.id;
|
||||
DELETE FROM api.stays WHERE id = current_stays_id;
|
||||
RAISE WARNING '-> delete_logbook_fn delete stays [%]', current_stays_id;
|
||||
-- Clean up, Subtract (-1) moorages ref count
|
||||
UPDATE api.moorages
|
||||
SET reference_count = reference_count - 1
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = previous_stays_id;
|
||||
RETURN TRUE;
|
||||
END;
|
||||
$delete_logbook$ LANGUAGE plpgsql security definer;
|
||||
|
||||
-- Allow users to update certain columns on specific TABLES on API schema add reference_count, when deleting a log
|
||||
GRANT UPDATE (name, notes, stay_code, home_flag, reference_count) ON api.moorages TO user_role;
|
||||
|
||||
-- Allow users to update certain columns on specific TABLES on API schema add track_geojson
|
||||
GRANT UPDATE (name, _from, _to, notes, track_geojson) ON api.logbook TO user_role;
|
||||
|
||||
DROP FUNCTION IF EXISTS api.timelapse2_fn;
|
||||
CREATE OR REPLACE FUNCTION api.timelapse2_fn(
|
||||
IN start_log INTEGER DEFAULT NULL,
|
||||
IN end_log INTEGER DEFAULT NULL,
|
||||
IN start_date TEXT DEFAULT NULL,
|
||||
IN end_date TEXT DEFAULT NULL,
|
||||
OUT geojson JSONB) RETURNS JSONB AS $timelapse2$
|
||||
DECLARE
|
||||
_geojson jsonb;
|
||||
BEGIN
|
||||
-- Using sub query to force id order by time
|
||||
-- User can now directly edit the json to add comment or remove track point
|
||||
-- Merge json track_geojson with Geometry Point into a single GeoJSON Points
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND end_log IS NULL THEN
|
||||
end_log := start_log;
|
||||
END IF;
|
||||
IF start_date IS NOT NULL AND end_date IS NULL THEN
|
||||
end_date := start_date;
|
||||
END IF;
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND public.isnumeric(start_log::text) AND public.isnumeric(end_log::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.id >= start_log
|
||||
AND l.id <= end_log
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
ELSIF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l._from_time >= start_date::TIMESTAMPTZ
|
||||
AND l._to_time <= end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
ELSE
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
END IF;
|
||||
-- Return a GeoJSON MultiLineString
|
||||
-- result _geojson [null, null]
|
||||
--RAISE WARNING 'result _geojson %' , _geojson;
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _geojson ) INTO geojson;
|
||||
END;
|
||||
$timelapse2$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.timelapse2_fn
|
||||
IS 'Export all selected logs geojson `track_geojson` to a geojson as points including properties';
|
||||
|
||||
-- Allow timelapse2_fn execution for user_role and api_anonymous (public replay)
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON FUNCTION api.timelapse2_fn TO api_anonymous;
|
||||
|
||||
DROP FUNCTION IF EXISTS public.process_logbook_queue_fn;
|
||||
CREATE OR REPLACE FUNCTION public.process_logbook_queue_fn(IN _id integer) RETURNS void AS $process_logbook_queue$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
from_name text;
|
||||
to_name text;
|
||||
log_name text;
|
||||
from_moorage record;
|
||||
to_moorage record;
|
||||
avg_rec record;
|
||||
geo_rec record;
|
||||
log_settings jsonb;
|
||||
user_settings jsonb;
|
||||
geojson jsonb;
|
||||
extra_json jsonb;
|
||||
trip_note jsonb;
|
||||
from_moorage_note jsonb;
|
||||
to_moorage_note jsonb;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid input %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid logbook %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.process_logbook_queue_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
|
||||
-- Calculate logbook data average and geo
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
geo_rec := logbook_update_geom_distance_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
-- Do we have an existing moorage within 300m of the new log
|
||||
-- generate logbook name, concat _from_location and _to_location from moorage name
|
||||
from_moorage := process_lat_lon_fn(logbook_rec._from_lng::NUMERIC, logbook_rec._from_lat::NUMERIC);
|
||||
to_moorage := process_lat_lon_fn(logbook_rec._to_lng::NUMERIC, logbook_rec._to_lat::NUMERIC);
|
||||
SELECT CONCAT(from_moorage.moorage_name, ' to ' , to_moorage.moorage_name) INTO log_name;
|
||||
|
||||
-- Process `propulsion.*.runTime` and `navigation.log`
|
||||
-- Calculate extra json
|
||||
extra_json := logbook_update_extra_json_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
RAISE NOTICE 'Updating valid logbook entry logbook id:[%] start:[%] end:[%]', logbook_rec.id, logbook_rec._from_time, logbook_rec._to_time;
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
duration = (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ),
|
||||
avg_speed = avg_rec.avg_speed,
|
||||
max_speed = avg_rec.max_speed,
|
||||
max_wind_speed = avg_rec.max_wind_speed,
|
||||
_from = from_moorage.moorage_name,
|
||||
_from_moorage_id = from_moorage.moorage_id,
|
||||
_to_moorage_id = to_moorage.moorage_id,
|
||||
_to = to_moorage.moorage_name,
|
||||
name = log_name,
|
||||
track_geom = geo_rec._track_geom,
|
||||
distance = geo_rec._track_distance,
|
||||
extra = extra_json,
|
||||
notes = NULL -- reset pre_log process
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- GeoJSON require track_geom field
|
||||
geojson := logbook_update_geojson_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
track_geojson = geojson
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- Add trip details name as note for the first geometry point entry from the GeoJSON
|
||||
SELECT format('{"trip": { "name": "%s", "duration": "%s", "distance": "%s" }}', logbook_rec.name, logbook_rec.duration, logbook_rec.distance) into trip_note;
|
||||
-- Update the properties of the first feature
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 1, properties}',
|
||||
(track_geojson -> 'features' -> 1 -> 'properties' || trip_note)::jsonb
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
and track_geojson -> 'features' -> 1 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Add moorage name as note for the third and last entry of the GeoJSON
|
||||
SELECT format('{"notes": "%s"}', from_moorage.moorage_name) into from_moorage_note;
|
||||
-- Update the properties of the third feature, the second with geometry point
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 2, properties}',
|
||||
(track_geojson -> 'features' -> 2 -> 'properties' || from_moorage_note)::jsonb
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
AND track_geojson -> 'features' -> 2 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Update the note properties of the last feature with geometry point
|
||||
SELECT format('{"notes": "%s"}', to_moorage.moorage_name) into to_moorage_note;
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, -1, properties}',
|
||||
CASE
|
||||
WHEN COALESCE((track_geojson -> 'features' -> -1 -> 'properties' ->> 'notes'), '') = '' THEN
|
||||
(track_geojson -> 'features' -> -1 -> 'properties' || to_moorage_note)::jsonb
|
||||
ELSE
|
||||
track_geojson -> 'features' -> -1 -> 'properties'
|
||||
END
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
AND track_geojson -> 'features' -> -1 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Prepare notification, gather user settings
|
||||
SELECT json_build_object('logbook_name', log_name, 'logbook_link', logbook_rec.id) into log_settings;
|
||||
user_settings := get_user_settings_from_vesselid_fn(logbook_rec.vessel_id::TEXT);
|
||||
SELECT user_settings::JSONB || log_settings::JSONB into user_settings;
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn log_settings [%]', log_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('logbook'::TEXT, user_settings::JSONB);
|
||||
-- Process badges
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn user_settings [%]', user_settings->>'email'::TEXT;
|
||||
PERFORM set_config('user.email', user_settings->>'email'::TEXT, false);
|
||||
PERFORM badges_logbook_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
PERFORM badges_geom_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
END;
|
||||
$process_logbook_queue$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.process_logbook_queue_fn
|
||||
IS 'Update logbook details when completed, logbook_update_avg_fn, logbook_update_geom_distance_fn, reverse_geocode_py_fn';
|
||||
|
||||
-- Update the pre.check for the new timelapse function
|
||||
CREATE OR REPLACE FUNCTION public.check_jwt() RETURNS void AS $$
|
||||
-- Prevent unregister user or unregister vessel access
|
||||
-- Allow anonymous access
|
||||
-- Need to be refactor and simplify, specially the anonymous part.
|
||||
DECLARE
|
||||
_role name;
|
||||
_email text;
|
||||
anonymous record;
|
||||
_path name;
|
||||
_vid text;
|
||||
_vname text;
|
||||
boat TEXT;
|
||||
_pid INTEGER := 0; -- public_id
|
||||
_pvessel TEXT := NULL; -- public_type
|
||||
_ptype TEXT := NULL; -- public_type
|
||||
_ppath BOOLEAN := False; -- public_path
|
||||
_pvalid BOOLEAN := False; -- public_valid
|
||||
_pheader text := NULL; -- public_header
|
||||
valid_public_type BOOLEAN := False;
|
||||
account_rec record;
|
||||
vessel_rec record;
|
||||
BEGIN
|
||||
-- Extract email and role from jwt token
|
||||
--RAISE WARNING 'check_jwt jwt %', current_setting('request.jwt.claims', true);
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'email' INTO _email;
|
||||
PERFORM set_config('user.email', _email, false);
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'role' INTO _role;
|
||||
--RAISE WARNING 'jwt email %', current_setting('request.jwt.claims', true)::json->>'email';
|
||||
--RAISE WARNING 'jwt role %', current_setting('request.jwt.claims', true)::json->>'role';
|
||||
--RAISE WARNING 'cur_user %', current_user;
|
||||
|
||||
--TODO SELECT current_setting('request.jwt.uid', true)::json->>'uid' INTO _user_id;
|
||||
--TODO RAISE WARNING 'jwt user_id %', current_setting('request.jwt.uid', true)::json->>'uid';
|
||||
--TODO SELECT current_setting('request.jwt.vid', true)::json->>'vid' INTO _vessel_id;
|
||||
--TODO RAISE WARNING 'jwt vessel_id %', current_setting('request.jwt.vid', true)::json->>'vid';
|
||||
IF _role = 'user_role' THEN
|
||||
-- Check the user exist in the accounts table
|
||||
SELECT * INTO account_rec
|
||||
FROM auth.accounts
|
||||
WHERE auth.accounts.email = _email;
|
||||
IF account_rec.email IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid user'
|
||||
USING HINT = 'Unknown user or password';
|
||||
END IF;
|
||||
-- Set session variables
|
||||
PERFORM set_config('user.id', account_rec.user_id, false);
|
||||
SELECT current_setting('request.path', true) into _path;
|
||||
--RAISE WARNING 'req path %', current_setting('request.path', true);
|
||||
-- Function allow without defined vessel like for anonymous role
|
||||
IF _path ~ '^\/rpc\/(login|signup|recover|reset)$' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Function allow without defined vessel as user role
|
||||
-- openapi doc, user settings, otp code and vessel registration
|
||||
IF _path = '/rpc/settings_fn'
|
||||
OR _path = '/rpc/register_vessel'
|
||||
OR _path = '/rpc/update_user_preferences_fn'
|
||||
OR _path = '/rpc/versions_fn'
|
||||
OR _path = '/rpc/email_fn'
|
||||
OR _path = '/' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Check a vessel and user exist
|
||||
SELECT auth.vessels.* INTO vessel_rec
|
||||
FROM auth.vessels, auth.accounts
|
||||
WHERE auth.vessels.owner_email = auth.accounts.email
|
||||
AND auth.accounts.email = _email;
|
||||
-- check if boat exist yet?
|
||||
IF vessel_rec.owner_email IS NULL THEN
|
||||
-- Return http status code 551 with message
|
||||
RAISE sqlstate 'PT551' using
|
||||
message = 'Vessel Required',
|
||||
detail = 'Invalid vessel',
|
||||
hint = 'Unknown vessel';
|
||||
--RETURN; -- ignore if not exist
|
||||
END IF;
|
||||
-- Redundant?
|
||||
IF vessel_rec.vessel_id IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid vessel'
|
||||
USING HINT = 'Unknown vessel id';
|
||||
END IF;
|
||||
-- Set session variables
|
||||
PERFORM set_config('vessel.id', vessel_rec.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', vessel_rec.name, false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.id [%]', current_setting('vessel.id', false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.name [%]', current_setting('vessel.name', false);
|
||||
ELSIF _role = 'vessel_role' THEN
|
||||
SELECT current_setting('request.path', true) into _path;
|
||||
--RAISE WARNING 'req path %', current_setting('request.path', true);
|
||||
-- Function allow without defined vessel like for anonymous role
|
||||
IF _path ~ '^\/rpc\/(oauth_\w+)$' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Extract vessel_id from jwt token
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'vid' INTO _vid;
|
||||
-- Check the vessel and user exist
|
||||
SELECT auth.vessels.* INTO vessel_rec
|
||||
FROM auth.vessels, auth.accounts
|
||||
WHERE auth.vessels.owner_email = auth.accounts.email
|
||||
AND auth.accounts.email = _email
|
||||
AND auth.vessels.vessel_id = _vid;
|
||||
IF vessel_rec.owner_email IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid vessel'
|
||||
USING HINT = 'Unknown vessel owner_email';
|
||||
END IF;
|
||||
PERFORM set_config('vessel.id', vessel_rec.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', vessel_rec.name, false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.name %', current_setting('vessel.name', false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.id %', current_setting('vessel.id', false);
|
||||
ELSIF _role = 'api_anonymous' THEN
|
||||
--RAISE WARNING 'public.check_jwt() api_anonymous';
|
||||
-- Check if path is a valid allow anonymous path
|
||||
SELECT current_setting('request.path', true) ~ '^/(logs_view|log_view|rpc/timelapse_fn|rpc/timelapse2_fn|monitoring_view|stats_logs_view|stats_moorages_view|rpc/stats_logs_fn)$' INTO _ppath;
|
||||
if _ppath is True then
|
||||
-- Check is custom header is present and valid
|
||||
SELECT current_setting('request.headers', true)::json->>'x-is-public' into _pheader;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous _pheader [%]', _pheader;
|
||||
if _pheader is null then
|
||||
RAISE EXCEPTION 'Invalid public_header'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
end if;
|
||||
SELECT convert_from(decode(_pheader, 'base64'), 'utf-8')
|
||||
~ '\w+,public_(logs|logs_list|stats|timelapse|monitoring),\d+$' into _pvalid;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous _pvalid [%]', _pvalid;
|
||||
if _pvalid is null or _pvalid is False then
|
||||
RAISE EXCEPTION 'Invalid public_valid'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
end if;
|
||||
WITH regex AS (
|
||||
SELECT regexp_match(
|
||||
convert_from(
|
||||
decode(_pheader, 'base64'), 'utf-8'),
|
||||
'(\w+),(public_(logs|logs_list|stats|timelapse|monitoring)),(\d+)$') AS match
|
||||
)
|
||||
SELECT match[1], match[2], match[4] into _pvessel, _ptype, _pid
|
||||
FROM regex;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous [%] [%] [%]', _pvessel, _ptype, _pid;
|
||||
if _pvessel is not null and _ptype is not null then
|
||||
-- Everything seem fine, get the vessel_id base on the vessel name.
|
||||
SELECT _ptype::name = any(enum_range(null::public_type)::name[]) INTO valid_public_type;
|
||||
IF valid_public_type IS False THEN
|
||||
-- Ignore entry if type is invalid
|
||||
RAISE EXCEPTION 'Invalid public_type'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
END IF;
|
||||
-- Check if boat name match public_vessel name
|
||||
boat := '^' || _pvessel || '$';
|
||||
IF _ptype ~ '^public_(logs|timelapse)$' AND _pid > 0 THEN
|
||||
WITH log as (
|
||||
SELECT vessel_id from api.logbook l where l.id = _pid
|
||||
)
|
||||
SELECT v.vessel_id, v.name into anonymous
|
||||
FROM auth.accounts a, auth.vessels v, jsonb_each_text(a.preferences) as prefs, log l
|
||||
WHERE v.vessel_id = l.vessel_id
|
||||
AND a.email = v.owner_email
|
||||
AND a.preferences->>'public_vessel'::text ~* boat
|
||||
AND prefs.key = _ptype::TEXT
|
||||
AND prefs.value::BOOLEAN = true;
|
||||
RAISE WARNING '-> ispublic_fn public_logs output boat:[%], type:[%], result:[%]', _pvessel, _ptype, anonymous;
|
||||
IF anonymous.vessel_id IS NOT NULL THEN
|
||||
PERFORM set_config('vessel.id', anonymous.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', anonymous.name, false);
|
||||
RETURN;
|
||||
END IF;
|
||||
ELSE
|
||||
SELECT v.vessel_id, v.name into anonymous
|
||||
FROM auth.accounts a, auth.vessels v, jsonb_each_text(a.preferences) as prefs
|
||||
WHERE a.email = v.owner_email
|
||||
AND a.preferences->>'public_vessel'::text ~* boat
|
||||
AND prefs.key = _ptype::TEXT
|
||||
AND prefs.value::BOOLEAN = true;
|
||||
RAISE WARNING '-> ispublic_fn output boat:[%], type:[%], result:[%]', _pvessel, _ptype, anonymous;
|
||||
IF anonymous.vessel_id IS NOT NULL THEN
|
||||
PERFORM set_config('vessel.id', anonymous.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', anonymous.name, false);
|
||||
RETURN;
|
||||
END IF;
|
||||
END IF;
|
||||
RAISE sqlstate 'PT404' using message = 'unknown resource';
|
||||
END IF; -- end anonymous path
|
||||
END IF;
|
||||
ELSIF _role <> 'api_anonymous' THEN
|
||||
RAISE EXCEPTION 'Invalid role'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
END IF;
|
||||
END
|
||||
$$ language plpgsql security definer;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.check_jwt
|
||||
IS 'PostgREST API db-pre-request check, set_config according to role (api_anonymous,vessel_role,user_role)';
|
||||
|
||||
GRANT EXECUTE ON FUNCTION public.check_jwt() TO api_anonymous;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.2'
|
||||
WHERE "name"='app.version';
|
784
initdb/99_migrations_202405.sql
Normal file
784
initdb/99_migrations_202405.sql
Normal file
@@ -0,0 +1,784 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration May 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
INSERT INTO public.email_templates ("name",email_subject,email_content,pushover_title,pushover_message)
|
||||
VALUES ('account_disable','PostgSail Account disable',E'Hello __RECIPIENT__,\nSorry!Your account is disable. Please contact me to solve the issue.','PostgSail Account disable!',E'Sorry!\nYour account is disable. Please contact me to solve the issue.');
|
||||
|
||||
-- Check if user is disable due to abuse
|
||||
-- Track IP per user to avoid abuse
|
||||
create or replace function
|
||||
api.login(in email text, in pass text) returns auth.jwt_token as $$
|
||||
declare
|
||||
_role name;
|
||||
result auth.jwt_token;
|
||||
app_jwt_secret text;
|
||||
_email_valid boolean := false;
|
||||
_email text := email;
|
||||
_user_id text := null;
|
||||
_user_disable boolean := false;
|
||||
headers json := current_setting('request.headers', true)::json;
|
||||
client_ip text := coalesce(headers->>'x-client-ip', NULL);
|
||||
begin
|
||||
-- check email and password
|
||||
select auth.user_role(email, pass) into _role;
|
||||
if _role is null then
|
||||
-- HTTP/403
|
||||
--raise invalid_password using message = 'invalid user or password';
|
||||
-- HTTP/401
|
||||
raise insufficient_privilege using message = 'invalid user or password';
|
||||
end if;
|
||||
|
||||
-- Check if user is disable due to abuse
|
||||
SELECT preferences['disable'],user_id INTO _user_disable,_user_id
|
||||
FROM auth.accounts a
|
||||
WHERE a.email = _email;
|
||||
IF _user_disable is True then
|
||||
-- due to the raise, the insert is never committed.
|
||||
--INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
-- VALUES ('account_disable', _email, now(), _user_id);
|
||||
RAISE sqlstate 'PT402' using message = 'Account disable, contact us',
|
||||
detail = 'Quota exceeded',
|
||||
hint = 'Upgrade your plan';
|
||||
END IF;
|
||||
|
||||
-- Check email_valid and generate OTP
|
||||
SELECT preferences['email_valid'],user_id INTO _email_valid,_user_id
|
||||
FROM auth.accounts a
|
||||
WHERE a.email = _email;
|
||||
IF _email_valid is null or _email_valid is False THEN
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('email_otp', _email, now(), _user_id);
|
||||
END IF;
|
||||
|
||||
-- Track IP per user to avoid abuse
|
||||
--RAISE WARNING 'api.login debug: [%],[%]', client_ip, login.email;
|
||||
IF client_ip IS NOT NULL THEN
|
||||
UPDATE auth.accounts a SET preferences = jsonb_recursive_merge(a.preferences, jsonb_build_object('ip', client_ip)) WHERE a.email = login.email;
|
||||
END IF;
|
||||
|
||||
-- Get app_jwt_secret
|
||||
SELECT value INTO app_jwt_secret
|
||||
FROM app_settings
|
||||
WHERE name = 'app.jwt_secret';
|
||||
|
||||
--RAISE WARNING 'api.login debug: [%],[%],[%]', app_jwt_secret, _role, login.email;
|
||||
-- Generate jwt
|
||||
select jwt.sign(
|
||||
-- row_to_json(r), ''
|
||||
-- row_to_json(r)::json, current_setting('app.jwt_secret')::text
|
||||
row_to_json(r)::json, app_jwt_secret
|
||||
) as token
|
||||
from (
|
||||
select _role as role, login.email as email, -- TODO replace with user_id
|
||||
-- select _role as role, user_id as uid, -- add support in check_jwt
|
||||
extract(epoch from now())::integer + 60*60 as exp
|
||||
) r
|
||||
into result;
|
||||
return result;
|
||||
end;
|
||||
$$ language plpgsql security definer;
|
||||
|
||||
-- Add moorage name to view
|
||||
DROP VIEW IF EXISTS api.moorages_stays_view;
|
||||
CREATE OR REPLACE VIEW api.moorages_stays_view WITH (security_invoker=true,security_barrier=true) AS
|
||||
select
|
||||
_to.name AS _to_name,
|
||||
_to.id AS _to_id,
|
||||
_to._to_time,
|
||||
_from.id AS _from_id,
|
||||
_from.name AS _from_name,
|
||||
_from._from_time,
|
||||
s.stay_code,s.duration,m.id,m.name
|
||||
FROM api.stays_at sa, api.moorages m, api.stays s
|
||||
LEFT JOIN api.logbook AS _from ON _from._from_time = s.departed
|
||||
LEFT JOIN api.logbook AS _to ON _to._to_time = s.arrived
|
||||
WHERE s.departed IS NOT NULL
|
||||
AND s.name IS NOT NULL
|
||||
AND s.stay_code = sa.stay_code
|
||||
AND s.moorage_id = m.id
|
||||
ORDER BY _to._to_time DESC;
|
||||
-- Description
|
||||
COMMENT ON VIEW
|
||||
api.moorages_stays_view
|
||||
IS 'Moorages stay listing web view';
|
||||
|
||||
-- Create a merge_logbook_fn
|
||||
CREATE OR REPLACE FUNCTION api.merge_logbook_fn(IN id_start integer, IN id_end integer) RETURNS void AS $merge_logbook$
|
||||
DECLARE
|
||||
logbook_rec_start record;
|
||||
logbook_rec_end record;
|
||||
log_name text;
|
||||
avg_rec record;
|
||||
geo_rec record;
|
||||
geojson jsonb;
|
||||
extra_json jsonb;
|
||||
BEGIN
|
||||
-- If id_start or id_end is not NULL
|
||||
IF (id_start IS NULL OR id_start < 1) OR (id_end IS NULL OR id_end < 1) THEN
|
||||
RAISE WARNING '-> merge_logbook_fn invalid input % %', id_start, id_end;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- If id_end is lower than id_start
|
||||
IF id_end <= id_start THEN
|
||||
RAISE WARNING '-> merge_logbook_fn invalid input % < %', id_end, id_start;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the start logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec_start
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = id_start
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec_start.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> merge_logbook_fn invalid logbook %', id_start;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the end logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec_end
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = id_end
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec_end.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> merge_logbook_fn invalid logbook %', id_end;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
RAISE WARNING '-> merge_logbook_fn logbook start:% end:%', id_start, id_end;
|
||||
PERFORM set_config('vessel.id', logbook_rec_start.vessel_id, false);
|
||||
|
||||
-- Calculate logbook data average and geo
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec_start.id, logbook_rec_start._from_time::TEXT, logbook_rec_end._to_time::TEXT);
|
||||
geo_rec := logbook_update_geom_distance_fn(logbook_rec_start.id, logbook_rec_start._from_time::TEXT, logbook_rec_end._to_time::TEXT);
|
||||
|
||||
-- Process `propulsion.*.runTime` and `navigation.log`
|
||||
-- Calculate extra json
|
||||
extra_json := logbook_update_extra_json_fn(logbook_rec_start.id, logbook_rec_start._from_time::TEXT, logbook_rec_end._to_time::TEXT);
|
||||
-- add the avg_wind_speed
|
||||
extra_json := extra_json || jsonb_build_object('avg_wind_speed', avg_rec.avg_wind_speed);
|
||||
|
||||
-- generate logbook name, concat _from_location and _to_location from moorage name
|
||||
SELECT CONCAT(logbook_rec_start._from, ' to ', logbook_rec_end._to) INTO log_name;
|
||||
RAISE NOTICE 'Updating valid logbook entry logbook id:[%] start:[%] end:[%]', logbook_rec_start.id, logbook_rec_start._from_time, logbook_rec_end._to_time;
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
-- Update the start logbook with the new calculate metrics
|
||||
duration = (logbook_rec_end._to_time::TIMESTAMPTZ - logbook_rec_start._from_time::TIMESTAMPTZ),
|
||||
avg_speed = avg_rec.avg_speed,
|
||||
max_speed = avg_rec.max_speed,
|
||||
max_wind_speed = avg_rec.max_wind_speed,
|
||||
name = log_name,
|
||||
track_geom = geo_rec._track_geom,
|
||||
distance = geo_rec._track_distance,
|
||||
extra = extra_json,
|
||||
-- Set _to metrics from end logbook
|
||||
_to = logbook_rec_end._to,
|
||||
_to_moorage_id = logbook_rec_end._to_moorage_id,
|
||||
_to_lat = logbook_rec_end._to_lat,
|
||||
_to_lng = logbook_rec_end._to_lng,
|
||||
_to_time = logbook_rec_end._to_time
|
||||
WHERE id = logbook_rec_start.id;
|
||||
|
||||
-- GeoJSON require track_geom field
|
||||
geojson := logbook_update_geojson_fn(logbook_rec_start.id, logbook_rec_start._from_time::TEXT, logbook_rec_end._to_time::TEXT);
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
track_geojson = geojson
|
||||
WHERE id = logbook_rec_start.id;
|
||||
|
||||
-- Update logbook mark for deletion
|
||||
UPDATE api.logbook
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE id = logbook_rec_end.id;
|
||||
-- Update related stays mark for deletion
|
||||
UPDATE api.stays
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE arrived = logbook_rec_start._to_time;
|
||||
-- Update related moorages mark for deletion
|
||||
UPDATE api.moorages
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE id = logbook_rec_start._to_moorage_id;
|
||||
|
||||
-- Clean up, remove invalid logbook and stay, moorage entry
|
||||
DELETE FROM api.logbook WHERE id = logbook_rec_end.id;
|
||||
RAISE WARNING '-> merge_logbook_fn delete logbook id [%]', logbook_rec_end.id;
|
||||
DELETE FROM api.stays WHERE arrived = logbook_rec_start._to_time;
|
||||
RAISE WARNING '-> merge_logbook_fn delete stay arrived [%]', logbook_rec_start._to_time;
|
||||
DELETE FROM api.moorages WHERE id = logbook_rec_start._to_moorage_id;
|
||||
RAISE WARNING '-> merge_logbook_fn delete moorage id [%]', logbook_rec_start._to_moorage_id;
|
||||
END;
|
||||
$merge_logbook$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.merge_logbook_fn
|
||||
IS 'Merge 2 logbook by id, from the start of the lower log id and the end of the higher log id, update the calculate data as well (avg, geojson)';
|
||||
|
||||
-- Add tags to view
|
||||
DROP VIEW IF EXISTS api.logs_view;
|
||||
CREATE OR REPLACE VIEW api.logs_view
|
||||
WITH(security_invoker=true,security_barrier=true)
|
||||
AS SELECT id,
|
||||
name,
|
||||
_from AS "from",
|
||||
_from_time AS started,
|
||||
_to AS "to",
|
||||
_to_time AS ended,
|
||||
distance,
|
||||
duration,
|
||||
_from_moorage_id,
|
||||
_to_moorage_id,
|
||||
extra->'tags' AS tags
|
||||
FROM api.logbook l
|
||||
WHERE name IS NOT NULL AND _to_time IS NOT NULL
|
||||
ORDER BY _from_time DESC;
|
||||
-- Description
|
||||
COMMENT ON VIEW api.logs_view IS 'Logs web view';
|
||||
|
||||
-- Update a logbook with avg wind speed
|
||||
DROP FUNCTION IF EXISTS public.logbook_update_avg_fn;
|
||||
CREATE OR REPLACE FUNCTION public.logbook_update_avg_fn(
|
||||
IN _id integer,
|
||||
IN _start TEXT,
|
||||
IN _end TEXT,
|
||||
OUT avg_speed double precision,
|
||||
OUT max_speed double precision,
|
||||
OUT max_wind_speed double precision,
|
||||
OUT avg_wind_speed double precision,
|
||||
OUT count_metric integer
|
||||
) AS $logbook_update_avg$
|
||||
BEGIN
|
||||
RAISE NOTICE '-> logbook_update_avg_fn calculate avg for logbook id=%, start:"%", end:"%"', _id, _start, _end;
|
||||
SELECT AVG(speedoverground), MAX(speedoverground), MAX(windspeedapparent), AVG(windspeedapparent), COUNT(*) INTO
|
||||
avg_speed, max_speed, max_wind_speed, avg_wind_speed, count_metric
|
||||
FROM api.metrics m
|
||||
WHERE m.latitude IS NOT NULL
|
||||
AND m.longitude IS NOT NULL
|
||||
AND m.time >= _start::TIMESTAMPTZ
|
||||
AND m.time <= _end::TIMESTAMPTZ
|
||||
AND vessel_id = current_setting('vessel.id', false);
|
||||
RAISE NOTICE '-> logbook_update_avg_fn avg for logbook id=%, avg_speed:%, max_speed:%, avg_wind_speed:%, max_wind_speed:%, count:%', _id, avg_speed, max_speed, avg_wind_speed, max_wind_speed, count_metric;
|
||||
END;
|
||||
$logbook_update_avg$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.logbook_update_avg_fn
|
||||
IS 'Update logbook details with calculate average and max data, AVG(speedOverGround), MAX(speedOverGround), MAX(windspeedapparent), count_metric';
|
||||
|
||||
-- Update pending new logbook from process queue
|
||||
DROP FUNCTION IF EXISTS process_logbook_queue_fn;
|
||||
CREATE OR REPLACE FUNCTION process_logbook_queue_fn(IN _id integer) RETURNS void AS $process_logbook_queue$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
from_name text;
|
||||
to_name text;
|
||||
log_name text;
|
||||
from_moorage record;
|
||||
to_moorage record;
|
||||
avg_rec record;
|
||||
geo_rec record;
|
||||
log_settings jsonb;
|
||||
user_settings jsonb;
|
||||
geojson jsonb;
|
||||
extra_json jsonb;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid input %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid logbook %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.process_logbook_queue_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
|
||||
-- Calculate logbook data average and geo
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
geo_rec := logbook_update_geom_distance_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
-- Do we have an existing moorage within 300m of the new log
|
||||
-- generate logbook name, concat _from_location and _to_location from moorage name
|
||||
from_moorage := process_lat_lon_fn(logbook_rec._from_lng::NUMERIC, logbook_rec._from_lat::NUMERIC);
|
||||
to_moorage := process_lat_lon_fn(logbook_rec._to_lng::NUMERIC, logbook_rec._to_lat::NUMERIC);
|
||||
SELECT CONCAT(from_moorage.moorage_name, ' to ' , to_moorage.moorage_name) INTO log_name;
|
||||
|
||||
-- Process `propulsion.*.runTime` and `navigation.log`
|
||||
-- Calculate extra json
|
||||
extra_json := logbook_update_extra_json_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
-- add the avg_wind_speed
|
||||
extra_json := extra_json || jsonb_build_object('avg_wind_speed', avg_rec.avg_wind_speed);
|
||||
|
||||
RAISE NOTICE 'Updating valid logbook entry logbook id:[%] start:[%] end:[%]', logbook_rec.id, logbook_rec._from_time, logbook_rec._to_time;
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
duration = (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ),
|
||||
avg_speed = avg_rec.avg_speed,
|
||||
max_speed = avg_rec.max_speed,
|
||||
max_wind_speed = avg_rec.max_wind_speed,
|
||||
_from = from_moorage.moorage_name,
|
||||
_from_moorage_id = from_moorage.moorage_id,
|
||||
_to_moorage_id = to_moorage.moorage_id,
|
||||
_to = to_moorage.moorage_name,
|
||||
name = log_name,
|
||||
track_geom = geo_rec._track_geom,
|
||||
distance = geo_rec._track_distance,
|
||||
extra = extra_json,
|
||||
notes = NULL -- reset pre_log process
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- GeoJSON require track_geom field geometry linestring
|
||||
geojson := logbook_update_geojson_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
track_geojson = geojson
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- GeoJSON Timelapse require track_geojson geometry point
|
||||
-- Add properties to the geojson for timelapse purpose
|
||||
PERFORM public.logbook_timelapse_geojson_fn(logbook_rec.id);
|
||||
|
||||
-- Add post logbook entry to process queue for notification and QGIS processing
|
||||
-- Require as we need the logbook to be updated with SQL commit
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('post_logbook', logbook_rec.id, NOW(), current_setting('vessel.id', true));
|
||||
|
||||
END;
|
||||
$process_logbook_queue$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.process_logbook_queue_fn
|
||||
IS 'Update logbook details when completed, logbook_update_avg_fn, logbook_update_geom_distance_fn, reverse_geocode_py_fn';
|
||||
|
||||
-- Add avg_wind_speed to logbook geojson
|
||||
-- Add back truewindspeed and truewinddirection to logbook geojson
|
||||
DROP FUNCTION IF EXISTS public.logbook_update_geojson_fn;
|
||||
CREATE FUNCTION public.logbook_update_geojson_fn(IN _id integer, IN _start text, IN _end text,
|
||||
OUT _track_geojson JSON
|
||||
) AS $logbook_geojson$
|
||||
declare
|
||||
log_geojson jsonb;
|
||||
metrics_geojson jsonb;
|
||||
_map jsonb;
|
||||
begin
|
||||
-- GeoJson Feature Logbook linestring
|
||||
SELECT
|
||||
ST_AsGeoJSON(log.*) into log_geojson
|
||||
FROM
|
||||
( SELECT
|
||||
id,name,
|
||||
distance,
|
||||
duration,
|
||||
avg_speed,
|
||||
max_speed,
|
||||
max_wind_speed,
|
||||
_from_time,
|
||||
_to_time
|
||||
_from_moorage_id,
|
||||
_to_moorage_id,
|
||||
notes,
|
||||
extra['avg_wind_speed'] as avg_wind_speed,
|
||||
track_geom
|
||||
FROM api.logbook
|
||||
WHERE id = _id
|
||||
) AS log;
|
||||
-- GeoJson Feature Metrics point
|
||||
SELECT
|
||||
json_agg(ST_AsGeoJSON(t.*)::json) into metrics_geojson
|
||||
FROM (
|
||||
( SELECT
|
||||
time,
|
||||
courseovergroundtrue,
|
||||
speedoverground,
|
||||
windspeedapparent,
|
||||
longitude,latitude,
|
||||
'' AS notes,
|
||||
coalesce(metersToKnots((metrics->'environment.wind.speedTrue')::NUMERIC), null) as truewindspeed,
|
||||
coalesce(radiantToDegrees((metrics->'environment.wind.directionTrue')::NUMERIC), null) as truewinddirection,
|
||||
coalesce(status, null) as status,
|
||||
st_makepoint(longitude,latitude) AS geo_point
|
||||
FROM api.metrics m
|
||||
WHERE m.latitude IS NOT NULL
|
||||
AND m.longitude IS NOT NULL
|
||||
AND time >= _start::TIMESTAMPTZ
|
||||
AND time <= _end::TIMESTAMPTZ
|
||||
AND vessel_id = current_setting('vessel.id', false)
|
||||
ORDER BY m.time ASC
|
||||
)
|
||||
) AS t;
|
||||
|
||||
-- Merge jsonb
|
||||
SELECT log_geojson::jsonb || metrics_geojson::jsonb into _map;
|
||||
-- output
|
||||
SELECT
|
||||
json_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _map
|
||||
) into _track_geojson;
|
||||
END;
|
||||
$logbook_geojson$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.logbook_update_geojson_fn
|
||||
IS 'Update log details with geojson';
|
||||
|
||||
-- Add properties to the geojson for timelapse purpose
|
||||
DROP FUNCTION IF EXISTS public.logbook_timelapse_geojson_fn;
|
||||
CREATE FUNCTION public.logbook_timelapse_geojson_fn(IN _id INT) returns void
|
||||
AS $logbook_timelapse$
|
||||
declare
|
||||
first_feature_note JSONB;
|
||||
second_feature_note JSONB;
|
||||
last_feature_note JSONB;
|
||||
logbook_rec record;
|
||||
begin
|
||||
-- We need to fetch the processed logbook data.
|
||||
SELECT name,duration,distance,_from,_to INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
--raise warning '-> logbook_rec: %', logbook_rec;
|
||||
select format('{"trip": { "name": "%s", "duration": "%s", "distance": "%s" }}', logbook_rec.name, logbook_rec.duration, logbook_rec.distance) into first_feature_note;
|
||||
select format('{"notes": "%s"}', logbook_rec._from) into second_feature_note;
|
||||
select format('{"notes": "%s"}', logbook_rec._to) into last_feature_note;
|
||||
--raise warning '-> logbook_rec: % % %', first_feature_note, second_feature_note, last_feature_note;
|
||||
|
||||
-- Update the properties of the first feature, the second with geometry point
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 1, properties}',
|
||||
(track_geojson -> 'features' -> 1 -> 'properties' || first_feature_note)::jsonb
|
||||
)
|
||||
WHERE id = _id
|
||||
and track_geojson -> 'features' -> 1 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Update the properties of the third feature, the second with geometry point
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 2, properties}',
|
||||
(track_geojson -> 'features' -> 2 -> 'properties' || second_feature_note)::jsonb
|
||||
)
|
||||
where id = _id
|
||||
and track_geojson -> 'features' -> 2 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Update the properties of the last feature with geometry point
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, -1, properties}',
|
||||
CASE
|
||||
WHEN COALESCE((track_geojson -> 'features' -> -1 -> 'properties' ->> 'notes'), '') = '' THEN
|
||||
(track_geojson -> 'features' -> -1 -> 'properties' || last_feature_note)::jsonb
|
||||
ELSE
|
||||
track_geojson -> 'features' -> -1 -> 'properties'
|
||||
END
|
||||
)
|
||||
WHERE id = _id
|
||||
and track_geojson -> 'features' -> -1 -> 'geometry' ->> 'type' = 'Point';
|
||||
end;
|
||||
$logbook_timelapse$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.logbook_timelapse_geojson_fn
|
||||
IS 'Update logbook geojson, Add properties to some geojson features for timelapse purpose';
|
||||
|
||||
-- CRON for signalk plugin upgrade
|
||||
-- The goal is to avoid error from old plugin version by enforcing upgrade.
|
||||
-- ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
|
||||
-- "POST /metadata?on_conflict=client_id HTTP/1.1" 400 137 "-" "postgsail.signalk v0.0.9"
|
||||
DROP FUNCTION IF EXISTS public.cron_process_skplugin_upgrade_fn;
|
||||
CREATE FUNCTION public.cron_process_skplugin_upgrade_fn() RETURNS void AS $skplugin_upgrade$
|
||||
DECLARE
|
||||
skplugin_upgrade_rec record;
|
||||
user_settings jsonb;
|
||||
BEGIN
|
||||
-- Check for signalk plugin version
|
||||
RAISE NOTICE 'cron_process_plugin_upgrade_fn';
|
||||
FOR skplugin_upgrade_rec in
|
||||
SELECT
|
||||
v.owner_email,m.name,m.vessel_id,m.plugin_version,a.first
|
||||
FROM api.metadata m
|
||||
LEFT JOIN auth.vessels v ON v.vessel_id = m.vessel_id
|
||||
LEFT JOIN auth.accounts a ON v.owner_email = a.email
|
||||
WHERE m.plugin_version <= '0.3.0'
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_skplugin_upgrade_rec_fn for [%]', skplugin_upgrade_rec;
|
||||
SELECT json_build_object('email', skplugin_upgrade_rec.owner_email, 'recipient', skplugin_upgrade_rec.first) into user_settings;
|
||||
RAISE NOTICE '-> debug cron_process_skplugin_upgrade_rec_fn [%]', user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('skplugin_upgrade'::TEXT, user_settings::JSONB);
|
||||
END LOOP;
|
||||
END;
|
||||
$skplugin_upgrade$ language plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_skplugin_upgrade_fn
|
||||
IS 'init by pg_cron, check for signalk plugin version and notify for upgrade';
|
||||
|
||||
INSERT INTO public.email_templates ("name",email_subject,email_content,pushover_title,pushover_message)
|
||||
VALUES ('skplugin_upgrade','PostgSail Signalk plugin upgrade',E'Hello __RECIPIENT__,\nPlease upgrade your postgsail signalk plugin. Be sure to contact me if you encounter any issue.','PostgSail Signalk plugin upgrade!',E'Please upgrade your postgsail signalk plugin.');
|
||||
|
||||
DROP FUNCTION IF EXISTS public.metadata_ip_trigger_fn;
|
||||
-- Track IP per vessel to avoid abuse
|
||||
CREATE FUNCTION public.metadata_ip_trigger_fn() RETURNS trigger
|
||||
AS $metadata_ip_trigger$
|
||||
DECLARE
|
||||
headers json := current_setting('request.headers', true)::json;
|
||||
client_ip text := coalesce(headers->>'x-client-ip', NULL);
|
||||
BEGIN
|
||||
RAISE WARNING 'metadata_ip_trigger_fn [%] [%]', current_setting('vessel.id', true), client_ip;
|
||||
IF client_ip IS NOT NULL THEN
|
||||
UPDATE api.metadata
|
||||
SET
|
||||
configuration = NEW.configuration || jsonb_build_object('ip', client_ip)
|
||||
WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NULL;
|
||||
END;
|
||||
$metadata_ip_trigger$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION public.metadata_ip_trigger_fn() IS 'Add IP from vessel in metadata, track abuse';
|
||||
|
||||
DROP TRIGGER IF EXISTS metadata_ip_trigger ON api.metadata;
|
||||
-- Generate an error
|
||||
--CREATE TRIGGER metadata_ip_trigger BEFORE UPDATE ON api.metadata
|
||||
-- FOR EACH ROW EXECUTE FUNCTION metadata_ip_trigger_fn();
|
||||
-- Description
|
||||
--COMMENT ON TRIGGER
|
||||
-- metadata_ip_trigger ON api.metadata
|
||||
-- IS 'AFTER UPDATE ON api.metadata run function metadata_ip_trigger_fn for tracking vessel IP';
|
||||
|
||||
DROP FUNCTION IF EXISTS public.logbook_active_geojson_fn;
|
||||
CREATE FUNCTION public.logbook_active_geojson_fn(
|
||||
OUT _track_geojson jsonb
|
||||
) AS $logbook_active_geojson$
|
||||
BEGIN
|
||||
WITH log_active AS (
|
||||
SELECT * FROM api.logbook WHERE active IS True
|
||||
),
|
||||
log_gis_line AS (
|
||||
SELECT ST_MakeLine(
|
||||
ARRAY(
|
||||
SELECT st_makepoint(longitude,latitude) AS geo_point
|
||||
FROM api.metrics m, log_active l
|
||||
WHERE m.latitude IS NOT NULL
|
||||
AND m.longitude IS NOT NULL
|
||||
AND m.time >= l._from_time::TIMESTAMPTZ
|
||||
AND m.time <= l._to_time::TIMESTAMPTZ
|
||||
ORDER BY m.time ASC
|
||||
)
|
||||
)
|
||||
),
|
||||
log_gis_point AS (
|
||||
SELECT
|
||||
ST_AsGeoJSON(t.*)::json AS GeoJSONPoint
|
||||
FROM (
|
||||
( SELECT
|
||||
time,
|
||||
courseovergroundtrue,
|
||||
speedoverground,
|
||||
windspeedapparent,
|
||||
longitude,latitude,
|
||||
'' AS notes,
|
||||
coalesce(metersToKnots((metrics->'environment.wind.speedTrue')::NUMERIC), null) as truewindspeed,
|
||||
coalesce(radiantToDegrees((metrics->'environment.wind.directionTrue')::NUMERIC), null) as truewinddirection,
|
||||
coalesce(status, null) AS status,
|
||||
st_makepoint(longitude,latitude) AS geo_point
|
||||
FROM api.metrics m
|
||||
WHERE m.latitude IS NOT NULL
|
||||
AND m.longitude IS NOT NULL
|
||||
ORDER BY m.time DESC LIMIT 1
|
||||
)
|
||||
) as t
|
||||
),
|
||||
log_agg as (
|
||||
SELECT
|
||||
CASE WHEN log_gis_line.st_makeline IS NOT NULL THEN
|
||||
( SELECT jsonb_agg(ST_AsGeoJSON(log_gis_line.*)::json)::jsonb AS GeoJSONLine FROM log_gis_line )
|
||||
ELSE
|
||||
( SELECT '[]'::json AS GeoJSONLine )::jsonb
|
||||
END
|
||||
FROM log_gis_line
|
||||
)
|
||||
SELECT
|
||||
jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', log_agg.GeoJSONLine::jsonb || log_gis_point.GeoJSONPoint::jsonb
|
||||
) INTO _track_geojson FROM log_agg, log_gis_point;
|
||||
END;
|
||||
$logbook_active_geojson$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.logbook_active_geojson_fn
|
||||
IS 'Create a GeoJSON with 2 features, LineString with a current active log and Point with the last position';
|
||||
|
||||
-- Update monitoring view to support live trip and truewindspeed and truewinddirection to stationary GeoJSON.
|
||||
DROP VIEW IF EXISTS api.monitoring_view;
|
||||
CREATE VIEW api.monitoring_view WITH (security_invoker=true,security_barrier=true) AS
|
||||
SELECT
|
||||
time AS "time",
|
||||
(NOW() AT TIME ZONE 'UTC' - time) > INTERVAL '70 MINUTES' as offline,
|
||||
metrics-> 'environment.water.temperature' AS waterTemperature,
|
||||
metrics-> 'environment.inside.temperature' AS insideTemperature,
|
||||
metrics-> 'environment.outside.temperature' AS outsideTemperature,
|
||||
metrics-> 'environment.wind.speedOverGround' AS windSpeedOverGround,
|
||||
metrics-> 'environment.wind.directionTrue' AS windDirectionTrue,
|
||||
metrics-> 'environment.inside.relativeHumidity' AS insideHumidity,
|
||||
metrics-> 'environment.outside.relativeHumidity' AS outsideHumidity,
|
||||
metrics-> 'environment.outside.pressure' AS outsidePressure,
|
||||
metrics-> 'environment.inside.pressure' AS insidePressure,
|
||||
metrics-> 'electrical.batteries.House.capacity.stateOfCharge' AS batteryCharge,
|
||||
metrics-> 'electrical.batteries.House.voltage' AS batteryVoltage,
|
||||
metrics-> 'environment.depth.belowTransducer' AS depth,
|
||||
jsonb_build_object(
|
||||
'type', 'Feature',
|
||||
'geometry', ST_AsGeoJSON(st_makepoint(longitude,latitude))::jsonb,
|
||||
'properties', jsonb_build_object(
|
||||
'name', current_setting('vessel.name', false),
|
||||
'latitude', m.latitude,
|
||||
'longitude', m.longitude,
|
||||
'time', m.time,
|
||||
'speedoverground', m.speedoverground,
|
||||
'windspeedapparent', m.windspeedapparent,
|
||||
'truewindspeed', coalesce(metersToKnots((metrics->'environment.wind.speedTrue')::NUMERIC), null),
|
||||
'truewinddirection', coalesce(radiantToDegrees((metrics->'environment.wind.directionTrue')::NUMERIC), null),
|
||||
'status', coalesce(m.status, null)
|
||||
)::jsonb ) AS geojson,
|
||||
current_setting('vessel.name', false) AS name,
|
||||
m.status,
|
||||
CASE WHEN m.status <> 'moored' THEN (
|
||||
SELECT public.logbook_active_geojson_fn() )
|
||||
END AS live
|
||||
FROM api.metrics m
|
||||
ORDER BY time DESC LIMIT 1;
|
||||
-- Description
|
||||
COMMENT ON VIEW
|
||||
api.monitoring_view
|
||||
IS 'Monitoring static web view';
|
||||
|
||||
-- Allow to access tables for user_role and grafana and api_anonymous
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA api TO user_role;
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA api TO grafana;
|
||||
GRANT SELECT ON TABLE api.monitoring_view TO user_role;
|
||||
GRANT SELECT ON TABLE api.monitoring_view TO api_anonymous;
|
||||
GRANT SELECT ON TABLE api.monitoring_view TO grafana;
|
||||
|
||||
-- Allow to execute fn for user_role and grafana and api_anonymous
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO grafana;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO grafana;
|
||||
GRANT EXECUTE ON FUNCTION public.logbook_active_geojson_fn TO api_anonymous;
|
||||
GRANT EXECUTE ON FUNCTION public.metersToKnots TO api_anonymous;
|
||||
GRANT EXECUTE ON FUNCTION public.radiantToDegrees TO api_anonymous;
|
||||
|
||||
-- Fix vessel name (Organization) ensure we have a value either from metadata tbl (signalk) or from vessel tbl
|
||||
DROP FUNCTION IF EXISTS public.cron_process_grafana_fn;
|
||||
CREATE OR REPLACE FUNCTION public.cron_process_grafana_fn() RETURNS void
|
||||
AS $cron_process_grafana_fn$
|
||||
DECLARE
|
||||
process_rec record;
|
||||
data_rec record;
|
||||
app_settings jsonb;
|
||||
user_settings jsonb;
|
||||
BEGIN
|
||||
-- We run grafana provisioning only after the first received vessel metadata
|
||||
-- Check for new vessel metadata pending grafana provisioning
|
||||
RAISE NOTICE 'cron_process_grafana_fn';
|
||||
FOR process_rec in
|
||||
SELECT * from process_queue
|
||||
where channel = 'grafana' and processed is null
|
||||
order by stored asc
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_grafana_fn [%]', process_rec.payload;
|
||||
-- Gather url from app settings
|
||||
app_settings := get_app_settings_fn();
|
||||
-- Get vessel details base on metadata id
|
||||
SELECT
|
||||
v.owner_email,coalesce(m.name,v.name) as name,m.vessel_id into data_rec
|
||||
FROM auth.vessels v
|
||||
LEFT JOIN api.metadata m ON v.vessel_id = m.vessel_id
|
||||
WHERE m.id = process_rec.payload::INTEGER;
|
||||
IF data_rec.vessel_id IS NULL OR data_rec.name IS NULL THEN
|
||||
RAISE WARNING '-> DEBUG cron_process_grafana_fn grafana_py_fn error [%]', data_rec;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- as we got data from the vessel we can do the grafana provisioning.
|
||||
RAISE DEBUG '-> DEBUG cron_process_grafana_fn grafana_py_fn provisioning [%]', data_rec;
|
||||
PERFORM grafana_py_fn(data_rec.name, data_rec.vessel_id, data_rec.owner_email, app_settings);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(data_rec.vessel_id::TEXT);
|
||||
RAISE DEBUG '-> DEBUG cron_process_grafana_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
-- add user in keycloak
|
||||
PERFORM keycloak_auth_py_fn(data_rec.vessel_id, user_settings, app_settings);
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('grafana'::TEXT, user_settings::JSONB);
|
||||
-- update process_queue entry as processed
|
||||
UPDATE process_queue
|
||||
SET
|
||||
processed = NOW()
|
||||
WHERE id = process_rec.id;
|
||||
RAISE NOTICE '-> cron_process_grafana_fn updated process_queue table [%]', process_rec.id;
|
||||
END LOOP;
|
||||
END;
|
||||
$cron_process_grafana_fn$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_grafana_fn
|
||||
IS 'init by pg_cron to check for new vessel pending grafana provisioning, if so perform grafana_py_fn';
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.3'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
\c postgres
|
||||
|
||||
-- Notifications/Reminders for old signalk plugin
|
||||
-- At 08:06 on Sunday.
|
||||
-- At 08:06 on every 4th day-of-month if it's on Sunday.
|
||||
SELECT cron.schedule('cron_skplugin_upgrade', '6 8 */4 * 0', 'select public.cron_process_skplugin_upgrade_fn()');
|
||||
UPDATE cron.job SET database = 'postgres' WHERE jobname = 'cron_skplugin_upgrade';
|
879
initdb/99_migrations_202406.sql
Normal file
879
initdb/99_migrations_202406.sql
Normal file
@@ -0,0 +1,879 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration June 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Add video timelapse notification message
|
||||
INSERT INTO public.email_templates ("name",email_subject,email_content,pushover_title,pushover_message)
|
||||
VALUES ('video_ready','PostgSail Video ready',E'Hey,\nYour video is available at __VIDEO_LINK__.\nPlease make sure you download your video as it will delete in 7 days.','PostgSail Video ready!',E'Your video is ready __VIDEO_LINK__.');
|
||||
|
||||
-- Generate and request the logbook image url to be cache on QGIS server.
|
||||
DROP FUNCTION IF EXISTS public.qgis_getmap_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.qgis_getmap_py_fn(IN vessel_id TEXT DEFAULT NULL, IN log_id NUMERIC DEFAULT NULL, IN extent TEXT DEFAULT NULL, IN logs_url BOOLEAN DEFAULT False) RETURNS VOID
|
||||
AS $qgis_getmap_py$
|
||||
import requests
|
||||
|
||||
# Extract extent
|
||||
def parse_extent_from_db(extent_raw):
|
||||
# Parse the extent_raw to extract coordinates
|
||||
extent = extent_raw.replace('BOX(', '').replace(')', '').split(',')
|
||||
min_x, min_y = map(float, extent[0].split())
|
||||
max_x, max_y = map(float, extent[1].split())
|
||||
return min_x, min_y, max_x, max_y
|
||||
|
||||
# ZoomOut from linestring extent
|
||||
def apply_scale_factor(extent, scale_factor=1.125):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
center_x = (min_x + max_x) / 2
|
||||
center_y = (min_y + max_y) / 2
|
||||
width = max_x - min_x
|
||||
height = max_y - min_y
|
||||
new_width = width * scale_factor
|
||||
new_height = height * scale_factor
|
||||
scaled_extent = (
|
||||
round(center_x - new_width / 2),
|
||||
round(center_y - new_height / 2),
|
||||
round(center_x + new_width / 2),
|
||||
round(center_y + new_height / 2),
|
||||
)
|
||||
return scaled_extent
|
||||
|
||||
def adjust_image_size_to_bbox(extent, width, height):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
bbox_aspect_ratio = (max_x - min_x) / (max_y - min_y)
|
||||
image_aspect_ratio = width / height
|
||||
|
||||
if bbox_aspect_ratio > image_aspect_ratio:
|
||||
# Adjust height to match aspect ratio
|
||||
height = width / bbox_aspect_ratio
|
||||
else:
|
||||
# Adjust width to match aspect ratio
|
||||
width = height * bbox_aspect_ratio
|
||||
|
||||
return int(width), int(height)
|
||||
|
||||
def calculate_width(extent, fixed_height):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
bbox_aspect_ratio = (max_x - min_x) / (max_y - min_y)
|
||||
width = fixed_height * bbox_aspect_ratio
|
||||
return int(width)
|
||||
|
||||
def adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height):
|
||||
min_x, min_y, max_x, max_y = scaled_extent
|
||||
bbox_width = max_x - min_x
|
||||
bbox_height = max_y - min_y
|
||||
bbox_aspect_ratio = bbox_width / bbox_height
|
||||
image_aspect_ratio = fixed_width / fixed_height
|
||||
|
||||
if bbox_aspect_ratio > image_aspect_ratio:
|
||||
# Adjust height to match aspect ratio
|
||||
new_bbox_height = bbox_width / image_aspect_ratio
|
||||
height_diff = new_bbox_height - bbox_height
|
||||
min_y -= height_diff / 2
|
||||
max_y += height_diff / 2
|
||||
else:
|
||||
# Adjust width to match aspect ratio
|
||||
new_bbox_width = bbox_height * image_aspect_ratio
|
||||
width_diff = new_bbox_width - bbox_width
|
||||
min_x -= width_diff / 2
|
||||
max_x += width_diff / 2
|
||||
|
||||
adjusted_extent = (min_x, min_y, max_x, max_y)
|
||||
return adjusted_extent
|
||||
|
||||
def generate_getmap_url(server_url, project_path, layer_name, extent, width=1080, height=566, crs="EPSG:3857", format="image/png"):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
bbox = f"{min_x},{min_y},{max_x},{max_y}"
|
||||
|
||||
# Adjust image size to match BBOX aspect ratio
|
||||
#width, height = adjust_image_size_to_bbox(extent, width, height)
|
||||
|
||||
# Calculate width to maintain aspect ratio with fixed height
|
||||
#width = calculate_width(extent, height)
|
||||
|
||||
url = (
|
||||
f"{server_url}?SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&FORMAT={format}&CRS={crs}"
|
||||
f"&BBOX={bbox}&WIDTH={width}&HEIGHT={height}&LAYERS={layer_name}&MAP={project_path}"
|
||||
)
|
||||
return url
|
||||
|
||||
if logs_url == False:
|
||||
server_url = f"https://gis.openplotter.cloud/log_{vessel_id}_{log_id}.png".format(vessel_id, log_id)
|
||||
else:
|
||||
server_url = f"https://gis.openplotter.cloud/logs_{vessel_id}_{log_id}.png".format(vessel_id, log_id)
|
||||
project_path = "/projects/postgsail5.qgz"
|
||||
layer_name = "OpenStreetMap,SQLLayer"
|
||||
#plpy.notice('qgis_getmap_py vessel_id [{}], log_id [{}], extent [{}]'.format(vessel_id, log_id, extent))
|
||||
|
||||
# Parse extent and scale factor
|
||||
scaled_extent = apply_scale_factor(parse_extent_from_db(extent))
|
||||
#plpy.notice('qgis_getmap_py scaled_extent [{}]'.format(scaled_extent))
|
||||
|
||||
fixed_width = 1080
|
||||
fixed_height = 566
|
||||
adjusted_extent = adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height)
|
||||
#plpy.notice('qgis_getmap_py adjusted_extent [{}]'.format(adjusted_extent))
|
||||
|
||||
getmap_url = generate_getmap_url(server_url, project_path, layer_name, adjusted_extent)
|
||||
if logs_url == False:
|
||||
filter_url = f"{getmap_url}&FILTER=SQLLayer:\"vessel_id\" = '{vessel_id}' AND \"id\" = {log_id}".format(getmap_url, vessel_id, log_id)
|
||||
else:
|
||||
filter_url = f"{getmap_url}&FILTER=SQLLayer:\"vessel_id\" = '{vessel_id}'".format(getmap_url, vessel_id)
|
||||
#plpy.notice('qgis_getmap_py getmap_url [{}]'.format(filter_url))
|
||||
|
||||
# Fetch image to be cache in qgis server
|
||||
headers = {"User-Agent": "PostgSail", "From": "xbgmsharp@gmail.com"}
|
||||
r = requests.get(filter_url, headers=headers, timeout=100)
|
||||
# Parse response
|
||||
if r.status_code != 200:
|
||||
plpy.warning('Failed to get WMS image, url[{}]'.format(filter_url))
|
||||
$qgis_getmap_py$ LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.qgis_getmap_py_fn
|
||||
IS 'Generate a log map, to generate the cache data for faster access later';
|
||||
|
||||
-- Generate the logbook extent for the logbook image to access the QGIS server.
|
||||
DROP FUNCTION IF EXISTS public.qgis_bbox_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.qgis_bbox_py_fn(IN vessel_id TEXT DEFAULT NULL, IN log_id NUMERIC DEFAULT NULL, IN width NUMERIC DEFAULT 1080, IN height NUMERIC DEFAULT 566, IN scaleout BOOLEAN DEFAULT True, OUT bbox TEXT)
|
||||
AS $qgis_bbox_py$
|
||||
log_extent = None
|
||||
# If we have a vessel_id then it is logs image map
|
||||
if vessel_id:
|
||||
# Use the shared cache to avoid preparing the log extent
|
||||
if vessel_id in SD:
|
||||
plan = SD[vessel_id]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("WITH merged AS ( SELECT ST_Union(track_geom) AS merged_geometry FROM api.logbook WHERE vessel_id = $1 ) SELECT ST_Extent(ST_Transform(merged_geometry, 3857))::TEXT FROM merged;", ["text"])
|
||||
SD[vessel_id] = plan
|
||||
# Execute the statement with the log extent param and limit to 1 result
|
||||
rv = plpy.execute(plan, [vessel_id], 1)
|
||||
log_extent = rv[0]['st_extent']
|
||||
# Else we have a log_id then it is single log image map
|
||||
else:
|
||||
# Use the shared cache to avoid preparing the log extent
|
||||
if log_id in SD:
|
||||
plan = SD[log_id]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT ST_Extent(ST_Transform(track_geom, 3857)) FROM api.logbook WHERE id = $1::NUMERIC", ["text"])
|
||||
SD[log_id] = plan
|
||||
# Execute the statement with the log extent param and limit to 1 result
|
||||
rv = plpy.execute(plan, [log_id], 1)
|
||||
log_extent = rv[0]['st_extent']
|
||||
|
||||
# Extract extent
|
||||
def parse_extent_from_db(extent_raw):
|
||||
# Parse the extent_raw to extract coordinates
|
||||
extent = extent_raw.replace('BOX(', '').replace(')', '').split(',')
|
||||
min_x, min_y = map(float, extent[0].split())
|
||||
max_x, max_y = map(float, extent[1].split())
|
||||
return min_x, min_y, max_x, max_y
|
||||
|
||||
# ZoomOut from linestring extent
|
||||
def apply_scale_factor(extent, scale_factor=1.125):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
center_x = (min_x + max_x) / 2
|
||||
center_y = (min_y + max_y) / 2
|
||||
width = max_x - min_x
|
||||
height = max_y - min_y
|
||||
new_width = width * scale_factor
|
||||
new_height = height * scale_factor
|
||||
scaled_extent = (
|
||||
round(center_x - new_width / 2),
|
||||
round(center_y - new_height / 2),
|
||||
round(center_x + new_width / 2),
|
||||
round(center_y + new_height / 2),
|
||||
)
|
||||
return scaled_extent
|
||||
|
||||
def adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height):
|
||||
min_x, min_y, max_x, max_y = scaled_extent
|
||||
bbox_width = float(max_x - min_x)
|
||||
bbox_height = float(max_y - min_y)
|
||||
bbox_aspect_ratio = float(bbox_width / bbox_height)
|
||||
image_aspect_ratio = float(fixed_width / fixed_height)
|
||||
|
||||
if bbox_aspect_ratio > image_aspect_ratio:
|
||||
# Adjust height to match aspect ratio
|
||||
new_bbox_height = bbox_width / image_aspect_ratio
|
||||
height_diff = new_bbox_height - bbox_height
|
||||
min_y -= height_diff / 2
|
||||
max_y += height_diff / 2
|
||||
else:
|
||||
# Adjust width to match aspect ratio
|
||||
new_bbox_width = bbox_height * image_aspect_ratio
|
||||
width_diff = new_bbox_width - bbox_width
|
||||
min_x -= width_diff / 2
|
||||
max_x += width_diff / 2
|
||||
|
||||
adjusted_extent = (min_x, min_y, max_x, max_y)
|
||||
return adjusted_extent
|
||||
|
||||
if not log_extent:
|
||||
plpy.warning('Failed to get sql qgis_bbox_py log_id [{}], extent [{}]'.format(log_id, log_extent))
|
||||
#plpy.notice('qgis_bbox_py log_id [{}], extent [{}]'.format(log_id, log_extent))
|
||||
# Parse extent and apply ZoomOut scale factor
|
||||
if scaleout:
|
||||
scaled_extent = apply_scale_factor(parse_extent_from_db(log_extent))
|
||||
else:
|
||||
scaled_extent = parse_extent_from_db(log_extent)
|
||||
#plpy.notice('qgis_bbox_py log_id [{}], scaled_extent [{}]'.format(log_id, scaled_extent))
|
||||
fixed_width = width # default 1080
|
||||
fixed_height = height # default 566
|
||||
adjusted_extent = adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height)
|
||||
#plpy.notice('qgis_bbox_py log_id [{}], adjusted_extent [{}]'.format(log_id, adjusted_extent))
|
||||
min_x, min_y, max_x, max_y = adjusted_extent
|
||||
return f"{min_x},{min_y},{max_x},{max_y}"
|
||||
$qgis_bbox_py$ LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.qgis_bbox_py_fn
|
||||
IS 'Generate the BBOX base on log extent and adapt extent to the image size for QGIS Server';
|
||||
|
||||
-- qgis_role user and role with login, read-only on auth.accounts, limit 20 connections
|
||||
CREATE ROLE qgis_role WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 20 LOGIN PASSWORD 'mysecretpassword';
|
||||
COMMENT ON ROLE qgis_role IS
|
||||
'Role use by QGIS server and Apache to connect and lookup the logbook table.';
|
||||
-- Allow read on VIEWS on API schema
|
||||
GRANT USAGE ON SCHEMA api TO qgis_role;
|
||||
GRANT SELECT ON TABLE api.logbook TO qgis_role;
|
||||
GRANT USAGE ON SCHEMA public TO qgis_role;
|
||||
-- For all postgis fn, st_extent, st_transform
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO qgis_role;
|
||||
-- Allow qgis_role to select all logbook records
|
||||
CREATE POLICY logbook_qgis_role ON api.logbook TO qgis_role
|
||||
USING (true)
|
||||
WITH CHECK (false);
|
||||
|
||||
-- Add support for HTML email with image inline for logbook
|
||||
-- Add support for video link for maplapse
|
||||
DROP FUNCTION IF EXISTS public.send_email_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.send_email_py_fn(IN email_type TEXT, IN _user JSONB, IN app JSONB) RETURNS void
|
||||
AS $send_email_py$
|
||||
# Import smtplib for the actual sending function
|
||||
import smtplib
|
||||
import requests
|
||||
|
||||
# Import the email modules we need
|
||||
from email.message import EmailMessage
|
||||
from email.utils import formatdate,make_msgid
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
# Use the shared cache to avoid preparing the email metadata
|
||||
if email_type in SD:
|
||||
plan = SD[email_type]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT * FROM email_templates WHERE name = $1", ["text"])
|
||||
SD[email_type] = plan
|
||||
|
||||
# Execute the statement with the email_type param and limit to 1 result
|
||||
rv = plpy.execute(plan, [email_type], 1)
|
||||
email_subject = rv[0]['email_subject']
|
||||
email_content = rv[0]['email_content']
|
||||
|
||||
# Replace fields using input jsonb obj
|
||||
if not _user or not app:
|
||||
plpy.notice('send_email_py_fn Parameters [{}] [{}]'.format(_user, app))
|
||||
plpy.error('Error missing parameters')
|
||||
return None
|
||||
if 'logbook_name' in _user and _user['logbook_name']:
|
||||
email_content = email_content.replace('__LOGBOOK_NAME__', _user['logbook_name'])
|
||||
if 'logbook_link' in _user and _user['logbook_link']:
|
||||
email_content = email_content.replace('__LOGBOOK_LINK__', str(_user['logbook_link']))
|
||||
if 'logbook_img' in _user and _user['logbook_img']:
|
||||
email_content = email_content.replace('__LOGBOOK_IMG__', str(_user['logbook_img']))
|
||||
if 'video_link' in _user and _user['video_link']:
|
||||
email_content = email_content.replace('__VIDEO_LINK__', str( _user['video_link']))
|
||||
if 'recipient' in _user and _user['recipient']:
|
||||
email_content = email_content.replace('__RECIPIENT__', _user['recipient'])
|
||||
if 'boat' in _user and _user['boat']:
|
||||
email_content = email_content.replace('__BOAT__', _user['boat'])
|
||||
if 'badge' in _user and _user['badge']:
|
||||
email_content = email_content.replace('__BADGE_NAME__', _user['badge'])
|
||||
if 'otp_code' in _user and _user['otp_code']:
|
||||
email_content = email_content.replace('__OTP_CODE__', _user['otp_code'])
|
||||
if 'reset_qs' in _user and _user['reset_qs']:
|
||||
email_content = email_content.replace('__RESET_QS__', _user['reset_qs'])
|
||||
if 'alert' in _user and _user['alert']:
|
||||
email_content = email_content.replace('__ALERT__', _user['alert'])
|
||||
|
||||
if 'app.url' in app and app['app.url']:
|
||||
email_content = email_content.replace('__APP_URL__', app['app.url'])
|
||||
|
||||
email_from = 'root@localhost'
|
||||
if 'app.email_from' in app and app['app.email_from']:
|
||||
email_from = 'PostgSail <' + app['app.email_from'] + '>'
|
||||
#plpy.notice('Sending email from [{}] [{}]'.format(email_from, app['app.email_from']))
|
||||
|
||||
email_to = 'root@localhost'
|
||||
if 'email' in _user and _user['email']:
|
||||
email_to = _user['email']
|
||||
#plpy.notice('Sending email to [{}] [{}]'.format(email_to, _user['email']))
|
||||
else:
|
||||
plpy.error('Error email to')
|
||||
return None
|
||||
|
||||
if email_type == 'logbook':
|
||||
msg = EmailMessage()
|
||||
msg.set_content(email_content)
|
||||
else:
|
||||
msg = MIMEText(email_content, 'plain', 'utf-8')
|
||||
msg["Subject"] = email_subject
|
||||
msg["From"] = email_from
|
||||
msg["To"] = email_to
|
||||
msg["Date"] = formatdate()
|
||||
msg["Message-ID"] = make_msgid()
|
||||
|
||||
if email_type == 'logbook' and 'logbook_img' in _user and _user['logbook_img']:
|
||||
# Create a Content-ID for the image
|
||||
image_cid = make_msgid()
|
||||
# Set an alternative html body
|
||||
msg.add_alternative("""\
|
||||
<html>
|
||||
<body>
|
||||
<p>{email_content}</p>
|
||||
<img src="cid:{image_cid}">
|
||||
</body>
|
||||
</html>
|
||||
""".format(email_content=email_content, image_cid=image_cid[1:-1]), subtype='html')
|
||||
img_url = 'https://gis.openplotter.cloud/{}'.format(str(_user['logbook_img']))
|
||||
response = requests.get(img_url, stream=True)
|
||||
if response.status_code == 200:
|
||||
msg.get_payload()[1].add_related(response.raw.data,
|
||||
maintype='image',
|
||||
subtype='png',
|
||||
cid=image_cid)
|
||||
|
||||
server_smtp = 'localhost'
|
||||
if 'app.email_server' in app and app['app.email_server']:
|
||||
server_smtp = app['app.email_server']
|
||||
#plpy.notice('Sending server [{}] [{}]'.format(server_smtp, app['app.email_server']))
|
||||
|
||||
# Send the message via our own SMTP server.
|
||||
try:
|
||||
# send your message with credentials specified above
|
||||
with smtplib.SMTP(server_smtp, 587) as server:
|
||||
if 'app.email_user' in app and app['app.email_user'] \
|
||||
and 'app.email_pass' in app and app['app.email_pass']:
|
||||
server.starttls()
|
||||
server.login(app['app.email_user'], app['app.email_pass'])
|
||||
#server.send_message(msg)
|
||||
server.sendmail(msg["From"], msg["To"], msg.as_string())
|
||||
server.quit()
|
||||
# tell the script to report if your message was sent or which errors need to be fixed
|
||||
plpy.notice('Sent email successfully to [{}] [{}]'.format(msg["To"], msg["Subject"]))
|
||||
return None
|
||||
except OSError as error:
|
||||
plpy.error('OS Error occurred: ' + str(error))
|
||||
except smtplib.SMTPConnectError:
|
||||
plpy.error('Failed to connect to the server. Bad connection settings?')
|
||||
except smtplib.SMTPServerDisconnected:
|
||||
plpy.error('Failed to connect to the server. Wrong user/password?')
|
||||
except smtplib.SMTPException as e:
|
||||
plpy.error('SMTP error occurred: ' + str(e))
|
||||
$send_email_py$ TRANSFORM FOR TYPE jsonb LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.send_email_py_fn
|
||||
IS 'Send email notification using plpython3u';
|
||||
|
||||
-- Add vessel_id key, expose vessel_id
|
||||
DROP FUNCTION IF EXISTS api.vessel_fn;
|
||||
CREATE OR REPLACE FUNCTION api.vessel_fn(OUT vessel JSON) RETURNS JSON
|
||||
AS $vessel$
|
||||
DECLARE
|
||||
BEGIN
|
||||
SELECT
|
||||
jsonb_build_object(
|
||||
'name', coalesce(m.name, null),
|
||||
'mmsi', coalesce(m.mmsi, null),
|
||||
'vessel_id', m.vessel_id,
|
||||
'created_at', v.created_at,
|
||||
'first_contact', coalesce(m.created_at, null),
|
||||
'last_contact', coalesce(m.time, null),
|
||||
'geojson', coalesce(ST_AsGeoJSON(geojson_t.*)::json, null)
|
||||
)::jsonb || api.vessel_details_fn()::jsonb
|
||||
INTO vessel
|
||||
FROM auth.vessels v, api.metadata m,
|
||||
( select
|
||||
current_setting('vessel.name') as name,
|
||||
time,
|
||||
courseovergroundtrue,
|
||||
speedoverground,
|
||||
anglespeedapparent,
|
||||
longitude,latitude,
|
||||
st_makepoint(longitude,latitude) AS geo_point
|
||||
FROM api.metrics
|
||||
WHERE
|
||||
latitude IS NOT NULL
|
||||
AND longitude IS NOT NULL
|
||||
AND vessel_id = current_setting('vessel.id', false)
|
||||
ORDER BY time DESC LIMIT 1
|
||||
) AS geojson_t
|
||||
WHERE
|
||||
m.vessel_id = current_setting('vessel.id')
|
||||
AND m.vessel_id = v.vessel_id;
|
||||
--RAISE notice 'api.vessel_fn %', obj;
|
||||
END;
|
||||
$vessel$ language plpgsql security definer;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.vessel_fn
|
||||
IS 'Expose vessel details to API';
|
||||
|
||||
-- Update pending new logbook from process queue
|
||||
DROP FUNCTION IF EXISTS public.process_post_logbook_fn;
|
||||
CREATE OR REPLACE FUNCTION public.process_post_logbook_fn(IN _id integer) RETURNS void AS $process_post_logbook_queue$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
log_settings jsonb;
|
||||
user_settings jsonb;
|
||||
extra_json jsonb;
|
||||
log_img_url text;
|
||||
logs_img_url text;
|
||||
extent_bbox text;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> process_post_logbook_fn invalid input %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> process_post_logbook_fn invalid logbook %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.process_post_logbook_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
|
||||
-- Generate logbook image map name from QGIS
|
||||
SELECT CONCAT('log_', logbook_rec.vessel_id::TEXT, '_', logbook_rec.id, '.png') INTO log_img_url;
|
||||
SELECT ST_Extent(ST_Transform(logbook_rec.track_geom, 3857))::TEXT AS envelope INTO extent_bbox FROM api.logbook WHERE id = logbook_rec.id;
|
||||
PERFORM public.qgis_getmap_py_fn(logbook_rec.vessel_id::TEXT, logbook_rec.id, extent_bbox::TEXT, False);
|
||||
-- Generate logs image map name from QGIS
|
||||
WITH merged AS (
|
||||
SELECT ST_Union(logbook_rec.track_geom) AS merged_geometry
|
||||
FROM api.logbook WHERE vessel_id = logbook_rec.vessel_id
|
||||
)
|
||||
SELECT ST_Extent(ST_Transform(merged_geometry, 3857))::TEXT AS envelope INTO extent_bbox FROM merged;
|
||||
SELECT CONCAT('logs_', logbook_rec.vessel_id::TEXT, '_', logbook_rec.id, '.png') INTO logs_img_url;
|
||||
PERFORM public.qgis_getmap_py_fn(logbook_rec.vessel_id::TEXT, logbook_rec.id, extent_bbox::TEXT, True);
|
||||
|
||||
-- Prepare notification, gather user settings
|
||||
SELECT json_build_object('logbook_name', logbook_rec.name, 'logbook_link', logbook_rec.id, 'logbook_img', log_img_url) INTO log_settings;
|
||||
user_settings := get_user_settings_from_vesselid_fn(logbook_rec.vessel_id::TEXT);
|
||||
SELECT user_settings::JSONB || log_settings::JSONB into user_settings;
|
||||
RAISE NOTICE '-> debug process_post_logbook_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
RAISE NOTICE '-> debug process_post_logbook_fn log_settings [%]', log_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('logbook'::TEXT, user_settings::JSONB);
|
||||
-- Process badges
|
||||
RAISE NOTICE '-> debug process_post_logbook_fn user_settings [%]', user_settings->>'email'::TEXT;
|
||||
PERFORM set_config('user.email', user_settings->>'email'::TEXT, false);
|
||||
PERFORM badges_logbook_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
PERFORM badges_geom_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
END;
|
||||
$process_post_logbook_queue$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.process_post_logbook_fn
|
||||
IS 'Generate QGIS image and Notify user for new logbook.';
|
||||
|
||||
-- Check for new logbook pending notification
|
||||
DROP FUNCTION IF EXISTS public.cron_process_post_logbook_fn;
|
||||
CREATE FUNCTION public.cron_process_post_logbook_fn() RETURNS void AS $$
|
||||
DECLARE
|
||||
process_rec record;
|
||||
BEGIN
|
||||
-- Check for new logbook pending update
|
||||
RAISE NOTICE 'cron_process_post_logbook_fn init loop';
|
||||
FOR process_rec in
|
||||
SELECT * FROM process_queue
|
||||
WHERE channel = 'post_logbook' AND processed IS NULL
|
||||
ORDER BY stored ASC LIMIT 100
|
||||
LOOP
|
||||
RAISE NOTICE 'cron_process_post_logbook_fn processing queue [%] for logbook id [%]', process_rec.id, process_rec.payload;
|
||||
-- update logbook
|
||||
PERFORM process_post_logbook_fn(process_rec.payload::INTEGER);
|
||||
-- update process_queue table , processed
|
||||
UPDATE process_queue
|
||||
SET
|
||||
processed = NOW()
|
||||
WHERE id = process_rec.id;
|
||||
RAISE NOTICE 'cron_process_post_logbook_fn processed queue [%] for logbook id [%]', process_rec.id, process_rec.payload;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ language plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_post_logbook_fn
|
||||
IS 'init by pg_cron to check for new logbook pending qgis and notification, after process_new_logbook_fn';
|
||||
|
||||
DROP FUNCTION IF EXISTS public.run_cron_jobs;
|
||||
CREATE FUNCTION public.run_cron_jobs() RETURNS void AS $$
|
||||
BEGIN
|
||||
-- In correct order
|
||||
perform public.cron_process_new_notification_fn();
|
||||
perform public.cron_process_monitor_online_fn();
|
||||
--perform public.cron_process_grafana_fn();
|
||||
perform public.cron_process_pre_logbook_fn();
|
||||
perform public.cron_process_new_logbook_fn();
|
||||
perform public.cron_process_post_logbook_fn();
|
||||
perform public.cron_process_new_stay_fn();
|
||||
--perform public.cron_process_new_moorage_fn();
|
||||
perform public.cron_process_monitor_offline_fn();
|
||||
END
|
||||
$$ language plpgsql;
|
||||
|
||||
DROP VIEW IF EXISTS api.eventlogs_view;
|
||||
CREATE VIEW api.eventlogs_view WITH (security_invoker=true,security_barrier=true) AS
|
||||
SELECT pq.*
|
||||
FROM public.process_queue pq
|
||||
WHERE channel <> 'pre_logbook'
|
||||
AND channel <> 'post_logbook'
|
||||
AND (ref_id = current_setting('user.id', true)
|
||||
OR ref_id = current_setting('vessel.id', true))
|
||||
ORDER BY id ASC;
|
||||
-- Description
|
||||
COMMENT ON VIEW
|
||||
api.eventlogs_view
|
||||
IS 'Event logs view';
|
||||
|
||||
-- CRON for new video notification
|
||||
DROP FUNCTION IF EXISTS public.cron_process_new_video_fn;
|
||||
CREATE FUNCTION public.cron_process_new_video_fn() RETURNS void AS $$
|
||||
declare
|
||||
process_rec record;
|
||||
metadata_rec record;
|
||||
video_settings jsonb;
|
||||
user_settings jsonb;
|
||||
begin
|
||||
-- Check for new event notification pending update
|
||||
RAISE NOTICE 'cron_process_new_video_fn';
|
||||
FOR process_rec in
|
||||
SELECT * FROM process_queue
|
||||
WHERE channel = 'new_video'
|
||||
AND processed IS NULL
|
||||
ORDER BY stored ASC
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_new_video_fn for [%]', process_rec.payload;
|
||||
SELECT * INTO metadata_rec
|
||||
FROM api.metadata
|
||||
WHERE vessel_id = process_rec.ref_id::TEXT;
|
||||
|
||||
IF metadata_rec.vessel_id IS NULL OR metadata_rec.vessel_id = '' THEN
|
||||
RAISE WARNING '-> cron_process_new_video_fn invalid metadata record vessel_id %', vessel_id;
|
||||
RAISE EXCEPTION 'Invalid metadata'
|
||||
USING HINT = 'Unknown vessel_id';
|
||||
RETURN;
|
||||
END IF;
|
||||
PERFORM set_config('vessel.id', metadata_rec.vessel_id, false);
|
||||
RAISE DEBUG '-> DEBUG cron_process_new_video_fn vessel_id %', current_setting('vessel.id', false);
|
||||
-- Prepare notification, gather user settings
|
||||
SELECT json_build_object('video_link', CONCAT('https://videos.openplotter.cloud/', process_rec.payload)) into video_settings;
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(metadata_rec.vessel_id::TEXT);
|
||||
SELECT user_settings::JSONB || video_settings::JSONB into user_settings;
|
||||
RAISE DEBUG '-> DEBUG cron_process_new_video_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('video_ready'::TEXT, user_settings::JSONB);
|
||||
-- update process_queue entry as processed
|
||||
UPDATE process_queue
|
||||
SET
|
||||
processed = NOW()
|
||||
WHERE id = process_rec.id;
|
||||
RAISE NOTICE '-> cron_process_new_video_fn updated process_queue table [%]', process_rec.id;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ language plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_new_video_fn
|
||||
IS 'init by pg_cron to check for new video event pending notifications, if so perform process_notification_queue_fn';
|
||||
|
||||
-- Add support for video link for maplapse
|
||||
DROP FUNCTION IF EXISTS public.send_pushover_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.send_pushover_py_fn(IN message_type TEXT, IN _user JSONB, IN app JSONB) RETURNS void
|
||||
AS $send_pushover_py$
|
||||
"""
|
||||
https://pushover.net/api#messages
|
||||
Send a notification to a pushover user
|
||||
"""
|
||||
import requests
|
||||
|
||||
# Use the shared cache to avoid preparing the email metadata
|
||||
if message_type in SD:
|
||||
plan = SD[message_type]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT * FROM email_templates WHERE name = $1", ["text"])
|
||||
SD[message_type] = plan
|
||||
|
||||
# Execute the statement with the message_type param and limit to 1 result
|
||||
rv = plpy.execute(plan, [message_type], 1)
|
||||
pushover_title = rv[0]['pushover_title']
|
||||
pushover_message = rv[0]['pushover_message']
|
||||
|
||||
# Replace fields using input jsonb obj
|
||||
if 'logbook_name' in _user and _user['logbook_name']:
|
||||
pushover_message = pushover_message.replace('__LOGBOOK_NAME__', _user['logbook_name'])
|
||||
if 'logbook_link' in _user and _user['logbook_link']:
|
||||
pushover_message = pushover_message.replace('__LOGBOOK_LINK__', str(_user['logbook_link']))
|
||||
if 'video_link' in _user and _user['video_link']:
|
||||
pushover_message = pushover_message.replace('__VIDEO_LINK__', str( _user['video_link']))
|
||||
if 'recipient' in _user and _user['recipient']:
|
||||
pushover_message = pushover_message.replace('__RECIPIENT__', _user['recipient'])
|
||||
if 'boat' in _user and _user['boat']:
|
||||
pushover_message = pushover_message.replace('__BOAT__', _user['boat'])
|
||||
if 'badge' in _user and _user['badge']:
|
||||
pushover_message = pushover_message.replace('__BADGE_NAME__', _user['badge'])
|
||||
if 'alert' in _user and _user['alert']:
|
||||
pushover_message = pushover_message.replace('__ALERT__', _user['alert'])
|
||||
|
||||
if 'app.url' in app and app['app.url']:
|
||||
pushover_message = pushover_message.replace('__APP_URL__', app['app.url'])
|
||||
|
||||
pushover_token = None
|
||||
if 'app.pushover_app_token' in app and app['app.pushover_app_token']:
|
||||
pushover_token = app['app.pushover_app_token']
|
||||
else:
|
||||
plpy.error('Error no pushover token defined, check app settings')
|
||||
return None
|
||||
pushover_user = None
|
||||
if 'pushover_user_key' in _user and _user['pushover_user_key']:
|
||||
pushover_user = _user['pushover_user_key']
|
||||
else:
|
||||
plpy.error('Error no pushover user token defined, check user settings')
|
||||
return None
|
||||
|
||||
if message_type == 'logbook' and 'logbook_img' in _user and _user['logbook_img']:
|
||||
# Send notification with gis image logbook as attachment
|
||||
img_url = 'https://gis.openplotter.cloud/{}'.format(str(_user['logbook_img']))
|
||||
response = requests.get(img_url, stream=True)
|
||||
if response.status_code == 200:
|
||||
r = requests.post("https://api.pushover.net/1/messages.json", data = {
|
||||
"token": pushover_token,
|
||||
"user": pushover_user,
|
||||
"title": pushover_title,
|
||||
"message": pushover_message
|
||||
}, files = {
|
||||
"attachment": (str(_user['logbook_img']), response.raw.data, "image/png")
|
||||
})
|
||||
else:
|
||||
r = requests.post("https://api.pushover.net/1/messages.json", data = {
|
||||
"token": pushover_token,
|
||||
"user": pushover_user,
|
||||
"title": pushover_title,
|
||||
"message": pushover_message
|
||||
})
|
||||
|
||||
#print(r.text)
|
||||
# Return ?? or None if not found
|
||||
#plpy.notice('Sent pushover successfully to [{}] [{}]'.format(r.text, r.status_code))
|
||||
if r.status_code == 200:
|
||||
plpy.notice('Sent pushover successfully to [{}] [{}] [{}]'.format(pushover_user, pushover_title, r.text))
|
||||
else:
|
||||
plpy.error('Failed to send pushover')
|
||||
return None
|
||||
$send_pushover_py$ TRANSFORM FOR TYPE jsonb LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.send_pushover_py_fn
|
||||
IS 'Send pushover notification using plpython3u';
|
||||
|
||||
-- Add support for video link for maplapse
|
||||
DROP FUNCTION IF EXISTS public.send_telegram_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.send_telegram_py_fn(IN message_type TEXT, IN _user JSONB, IN app JSONB) RETURNS void
|
||||
AS $send_telegram_py$
|
||||
"""
|
||||
https://core.telegram.org/bots/api#sendmessage
|
||||
Send a message to a telegram user or group specified on chatId
|
||||
chat_id must be a number!
|
||||
"""
|
||||
import requests
|
||||
import json
|
||||
|
||||
# Use the shared cache to avoid preparing the email metadata
|
||||
if message_type in SD:
|
||||
plan = SD[message_type]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT * FROM email_templates WHERE name = $1", ["text"])
|
||||
SD[message_type] = plan
|
||||
|
||||
# Execute the statement with the message_type param and limit to 1 result
|
||||
rv = plpy.execute(plan, [message_type], 1)
|
||||
telegram_title = rv[0]['pushover_title']
|
||||
telegram_message = rv[0]['pushover_message']
|
||||
|
||||
# Replace fields using input jsonb obj
|
||||
if 'logbook_name' in _user and _user['logbook_name']:
|
||||
telegram_message = telegram_message.replace('__LOGBOOK_NAME__', _user['logbook_name'])
|
||||
if 'logbook_link' in _user and _user['logbook_link']:
|
||||
telegram_message = telegram_message.replace('__LOGBOOK_LINK__', str(_user['logbook_link']))
|
||||
if 'video_link' in _user and _user['video_link']:
|
||||
telegram_message = telegram_message.replace('__VIDEO_LINK__', str( _user['video_link']))
|
||||
if 'recipient' in _user and _user['recipient']:
|
||||
telegram_message = telegram_message.replace('__RECIPIENT__', _user['recipient'])
|
||||
if 'boat' in _user and _user['boat']:
|
||||
telegram_message = telegram_message.replace('__BOAT__', _user['boat'])
|
||||
if 'badge' in _user and _user['badge']:
|
||||
telegram_message = telegram_message.replace('__BADGE_NAME__', _user['badge'])
|
||||
if 'alert' in _user and _user['alert']:
|
||||
telegram_message = telegram_message.replace('__ALERT__', _user['alert'])
|
||||
|
||||
if 'app.url' in app and app['app.url']:
|
||||
telegram_message = telegram_message.replace('__APP_URL__', app['app.url'])
|
||||
|
||||
telegram_token = None
|
||||
if 'app.telegram_bot_token' in app and app['app.telegram_bot_token']:
|
||||
telegram_token = app['app.telegram_bot_token']
|
||||
else:
|
||||
plpy.error('Error no telegram token defined, check app settings')
|
||||
return None
|
||||
telegram_chat_id = None
|
||||
if 'telegram_chat_id' in _user and _user['telegram_chat_id']:
|
||||
telegram_chat_id = _user['telegram_chat_id']
|
||||
else:
|
||||
plpy.error('Error no telegram user token defined, check user settings')
|
||||
return None
|
||||
|
||||
# sendMessage via requests
|
||||
headers = {'Content-Type': 'application/json',
|
||||
'Proxy-Authorization': 'Basic base64'}
|
||||
data_dict = {'chat_id': telegram_chat_id,
|
||||
'text': telegram_message,
|
||||
'parse_mode': 'HTML',
|
||||
'disable_notification': False}
|
||||
data = json.dumps(data_dict)
|
||||
url = f'https://api.telegram.org/bot{telegram_token}/sendMessage'
|
||||
r = requests.post(url, data=data, headers=headers)
|
||||
if message_type == 'logbook' and 'logbook_img' in _user and _user['logbook_img']:
|
||||
# Send gis image logbook
|
||||
# https://core.telegram.org/bots/api#sendphoto
|
||||
data_dict['photo'] = 'https://gis.openplotter.cloud/{}'.format(str(_user['logbook_img']))
|
||||
del data_dict['text']
|
||||
data = json.dumps(data_dict)
|
||||
url = f'https://api.telegram.org/bot{telegram_token}/sendPhoto'
|
||||
r = requests.post(url, data=data, headers=headers)
|
||||
|
||||
#print(r.text)
|
||||
# Return something boolean?
|
||||
#plpy.notice('Sent telegram successfully to [{}] [{}]'.format(r.text, r.status_code))
|
||||
if r.status_code == 200:
|
||||
plpy.notice('Sent telegram successfully to [{}] [{}] [{}]'.format(telegram_chat_id, telegram_title, r.text))
|
||||
else:
|
||||
plpy.error('Failed to send telegram')
|
||||
return None
|
||||
$send_telegram_py$ TRANSFORM FOR TYPE jsonb LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.send_telegram_py_fn
|
||||
IS 'Send a message to a telegram user or group specified on chatId using plpython3u';
|
||||
|
||||
-- Add maplapse video record in queue
|
||||
DROP FUNCTION IF EXISTS api.maplapse_record_fn;
|
||||
CREATE OR REPLACE FUNCTION api.maplapse_record_fn(IN maplapse TEXT) RETURNS BOOLEAN
|
||||
AS $maplapse_record$
|
||||
BEGIN
|
||||
-- payload: 'Bromera,?start_log=8430&end_log=8491&height=100vh'
|
||||
IF maplapse ~ '^(\w+)\,\?(start_log=\d+).*$' then
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('maplapse_video', maplapse, NOW(), current_setting('vessel.id', true));
|
||||
RETURN True;
|
||||
ELSE
|
||||
RETURN False;
|
||||
END IF;
|
||||
END;
|
||||
$maplapse_record$ language plpgsql security definer;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.maplapse_record_fn
|
||||
IS 'Add maplapse video record in queue';
|
||||
|
||||
CREATE ROLE maplapse_role WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 10 LOGIN PASSWORD 'mysecretpassword';
|
||||
COMMENT ON ROLE maplapse_role IS
|
||||
'Role use by maplapse external cronjob to connect and lookup the process_queue table.';
|
||||
GRANT USAGE ON SCHEMA public TO maplapse_role;
|
||||
GRANT SELECT,UPDATE,INSERT ON TABLE public.process_queue TO maplapse_role;
|
||||
GRANT USAGE, SELECT ON SEQUENCE public.process_queue_id_seq TO maplapse_role;
|
||||
-- Allow maplapse_role to select,update,insert on tbl process_queue
|
||||
CREATE POLICY public_maplapse_role ON public.process_queue TO maplapse_role
|
||||
USING (true)
|
||||
WITH CHECK (true);
|
||||
|
||||
-- Allow to execute fn for user_role and grafana
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO grafana;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO grafana;
|
||||
GRANT SELECT ON TABLE api.eventlogs_view TO user_role;
|
||||
|
||||
-- Update grafana role SQl connection to 30
|
||||
ALTER ROLE grafana WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 30 LOGIN;
|
||||
|
||||
-- Alter moorages table with default duration of 0.
|
||||
ALTER TABLE api.moorages ALTER COLUMN stay_duration SET DEFAULT 'PT0S';
|
||||
-- Update moorage_view to default with duration of 0
|
||||
DROP VIEW IF EXISTS api.moorage_view;
|
||||
CREATE OR REPLACE VIEW api.moorage_view WITH (security_invoker=true,security_barrier=true) AS -- TODO
|
||||
SELECT id,
|
||||
m.name AS Name,
|
||||
sa.description AS Default_Stay,
|
||||
sa.stay_code AS Default_Stay_Id,
|
||||
m.home_flag AS Home,
|
||||
EXTRACT(DAY FROM justify_hours ( COALESCE(m.stay_duration, 'PT0S') )) AS Total_Stay,
|
||||
COALESCE(m.stay_duration, 'PT0S') AS Total_Duration,
|
||||
m.reference_count AS Arrivals_Departures,
|
||||
m.notes
|
||||
-- m.geog
|
||||
FROM api.moorages m, api.stays_at sa
|
||||
-- m.stay_duration is only process on a stay
|
||||
-- default with duration of 0sec
|
||||
WHERE geog IS NOT NULL
|
||||
AND m.stay_code = sa.stay_code;
|
||||
-- Description
|
||||
COMMENT ON VIEW
|
||||
api.moorage_view
|
||||
IS 'Moorage details web view';
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.4'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
\c postgres
|
||||
|
||||
-- Create a every 7 minutes for cron_process_post_logbook_fn
|
||||
SELECT cron.schedule('cron_post_logbook', '*/7 * * * *', 'select public.cron_process_post_logbook_fn()');
|
||||
UPDATE cron.job SET database = 'signalk' where jobname = 'cron_post_logbook';
|
||||
-- Create a every 15 minutes for cron_process_post_logbook_fn
|
||||
SELECT cron.schedule('cron_new_video', '*/15 * * * *', 'select public.cron_process_new_video_fn()');
|
||||
UPDATE cron.job SET database = 'signalk' where jobname = 'cron_new_video';
|
755
initdb/99_migrations_202407.sql
Normal file
755
initdb/99_migrations_202407.sql
Normal file
@@ -0,0 +1,755 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration July 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Add video error notification message
|
||||
INSERT INTO public.email_templates ("name",email_subject,email_content,pushover_title,pushover_message)
|
||||
VALUES ('video_error','PostgSail Video Error',E'Hey,\nSorry we could not generate your video.\nPlease reach out to debug and solve the issue.','PostgSail Video Error!',E'There has been an error with your video.');
|
||||
|
||||
-- CRON for new video notification
|
||||
DROP FUNCTION IF EXISTS public.cron_process_new_video_fn;
|
||||
CREATE FUNCTION public.cron_process_video_fn() RETURNS void AS $cron_process_video$
|
||||
DECLARE
|
||||
process_rec record;
|
||||
metadata_rec record;
|
||||
video_settings jsonb;
|
||||
user_settings jsonb;
|
||||
BEGIN
|
||||
-- Check for new event notification pending update
|
||||
RAISE NOTICE 'cron_process_video_fn';
|
||||
FOR process_rec in
|
||||
SELECT * FROM process_queue
|
||||
WHERE (channel = 'new_video' OR channel = 'error_video')
|
||||
AND processed IS NULL
|
||||
ORDER BY stored ASC
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_video_fn for [%]', process_rec.payload;
|
||||
SELECT * INTO metadata_rec
|
||||
FROM api.metadata
|
||||
WHERE vessel_id = process_rec.ref_id::TEXT;
|
||||
|
||||
IF metadata_rec.vessel_id IS NULL OR metadata_rec.vessel_id = '' THEN
|
||||
RAISE WARNING '-> cron_process_video_fn invalid metadata record vessel_id %', vessel_id;
|
||||
RAISE EXCEPTION 'Invalid metadata'
|
||||
USING HINT = 'Unknown vessel_id';
|
||||
RETURN;
|
||||
END IF;
|
||||
PERFORM set_config('vessel.id', metadata_rec.vessel_id, false);
|
||||
RAISE DEBUG '-> DEBUG cron_process_video_fn vessel_id %', current_setting('vessel.id', false);
|
||||
-- Prepare notification, gather user settings
|
||||
SELECT json_build_object('video_link', CONCAT('https://videos.openplotter.cloud/', process_rec.payload)) into video_settings;
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(metadata_rec.vessel_id::TEXT);
|
||||
SELECT user_settings::JSONB || video_settings::JSONB into user_settings;
|
||||
RAISE DEBUG '-> DEBUG cron_process_video_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
-- Send notification
|
||||
IF process_rec.channel = 'new_video' THEN
|
||||
PERFORM send_notification_fn('video_ready'::TEXT, user_settings::JSONB);
|
||||
ELSE
|
||||
PERFORM send_notification_fn('video_error'::TEXT, user_settings::JSONB);
|
||||
END IF;
|
||||
-- update process_queue entry as processed
|
||||
UPDATE process_queue
|
||||
SET
|
||||
processed = NOW()
|
||||
WHERE id = process_rec.id;
|
||||
RAISE NOTICE '-> cron_process_video_fn updated process_queue table [%]', process_rec.id;
|
||||
END LOOP;
|
||||
END;
|
||||
$cron_process_video$ language plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_video_fn
|
||||
IS 'init by pg_cron to check for new video event pending notifications, if so perform process_notification_queue_fn';
|
||||
|
||||
-- Fix error when stateOfCharge is null. Make stateOfCharge null value assume to be charge 1.
|
||||
DROP FUNCTION IF EXISTS public.cron_alerts_fn();
|
||||
CREATE OR REPLACE FUNCTION public.cron_alerts_fn() RETURNS void AS $cron_alerts$
|
||||
DECLARE
|
||||
alert_rec record;
|
||||
default_last_metric TIMESTAMPTZ := NOW() - interval '1 day';
|
||||
last_metric TIMESTAMPTZ;
|
||||
metric_rec record;
|
||||
app_settings JSONB;
|
||||
user_settings JSONB;
|
||||
alerting JSONB;
|
||||
_alarms JSONB;
|
||||
alarms TEXT;
|
||||
alert_default JSONB := '{
|
||||
"low_pressure_threshold": 990,
|
||||
"high_wind_speed_threshold": 30,
|
||||
"low_water_depth_threshold": 1,
|
||||
"min_notification_interval": 6,
|
||||
"high_pressure_drop_threshold": 12,
|
||||
"low_battery_charge_threshold": 90,
|
||||
"low_battery_voltage_threshold": 12.5,
|
||||
"low_water_temperature_threshold": 10,
|
||||
"low_indoor_temperature_threshold": 7,
|
||||
"low_outdoor_temperature_threshold": 3
|
||||
}';
|
||||
BEGIN
|
||||
-- Check for new event notification pending update
|
||||
RAISE NOTICE 'cron_alerts_fn';
|
||||
FOR alert_rec in
|
||||
SELECT
|
||||
a.user_id,a.email,v.vessel_id,
|
||||
COALESCE((a.preferences->'alert_last_metric')::TEXT, default_last_metric::TEXT) as last_metric,
|
||||
(alert_default || (a.preferences->'alerting')::JSONB) as alerting,
|
||||
(a.preferences->'alarms')::JSONB as alarms
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels AS v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata AS m ON m.vessel_id = v.vessel_id
|
||||
WHERE (a.preferences->'alerting'->'enabled')::boolean = True
|
||||
AND m.active = True
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_alerts_fn for [%]', alert_rec;
|
||||
PERFORM set_config('vessel.id', alert_rec.vessel_id, false);
|
||||
PERFORM set_config('user.email', alert_rec.email, false);
|
||||
--RAISE WARNING 'public.cron_process_alert_rec_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(alert_rec.vessel_id::TEXT);
|
||||
RAISE NOTICE '-> cron_alerts_fn checking user_settings [%]', user_settings;
|
||||
-- Get all metrics from the last last_metric avg by 5 minutes
|
||||
FOR metric_rec in
|
||||
SELECT time_bucket('5 minutes', m.time) AS time_bucket,
|
||||
avg((m.metrics->'environment.inside.temperature')::numeric) AS intemp,
|
||||
avg((m.metrics->'environment.outside.temperature')::numeric) AS outtemp,
|
||||
avg((m.metrics->'environment.water.temperature')::numeric) AS wattemp,
|
||||
avg((m.metrics->'environment.depth.belowTransducer')::numeric) AS watdepth,
|
||||
avg((m.metrics->'environment.outside.pressure')::numeric) AS pressure,
|
||||
avg((m.metrics->'environment.wind.speedTrue')::numeric) AS wind,
|
||||
avg((m.metrics->'electrical.batteries.House.voltage')::numeric) AS voltage,
|
||||
avg(coalesce((m.metrics->>'electrical.batteries.House.capacity.stateOfCharge')::numeric, 1)) AS charge
|
||||
FROM api.metrics m
|
||||
WHERE vessel_id = alert_rec.vessel_id
|
||||
AND m.time >= alert_rec.last_metric::TIMESTAMPTZ
|
||||
GROUP BY time_bucket
|
||||
ORDER BY time_bucket ASC LIMIT 100
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_alerts_fn checking metrics [%]', metric_rec;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking alerting [%]', alert_rec.alerting;
|
||||
--RAISE NOTICE '-> cron_alerts_fn checking debug [%] [%]', kelvinToCel(metric_rec.intemp), (alert_rec.alerting->'low_indoor_temperature_threshold');
|
||||
IF kelvinToCel(metric_rec.intemp) < (alert_rec.alerting->'low_indoor_temperature_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_indoor_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_indoor_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_indoor_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_indoor_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.intemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_outdoor_temperature_threshold value:'|| kelvinToCel(metric_rec.intemp) ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_indoor_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_indoor_temperature_threshold';
|
||||
END IF;
|
||||
IF kelvinToCel(metric_rec.outtemp) < (alert_rec.alerting->'low_outdoor_temperature_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_outdoor_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_outdoor_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_outdoor_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_outdoor_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.outtemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_outdoor_temperature_threshold value:'|| kelvinToCel(metric_rec.outtemp) ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_outdoor_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_outdoor_temperature_threshold';
|
||||
END IF;
|
||||
IF kelvinToCel(metric_rec.wattemp) < (alert_rec.alerting->'low_water_temperature_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_water_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_water_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_water_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_water_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.wattemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_water_temperature_threshold value:'|| kelvinToCel(metric_rec.wattemp) ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_temperature_threshold';
|
||||
END IF;
|
||||
IF metric_rec.watdepth < (alert_rec.alerting->'low_water_depth_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_water_depth_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_water_depth_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_water_depth_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_water_depth_threshold": {"value": '|| metric_rec.watdepth ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_water_depth_threshold value:'|| metric_rec.watdepth ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_depth_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_depth_threshold';
|
||||
END IF;
|
||||
if metric_rec.pressure < (alert_rec.alerting->'high_pressure_drop_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'high_pressure_drop_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'high_pressure_drop_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'high_pressure_drop_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"high_pressure_drop_threshold": {"value": '|| metric_rec.pressure ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "high_pressure_drop_threshold value:'|| metric_rec.pressure ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_pressure_drop_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_pressure_drop_threshold';
|
||||
END IF;
|
||||
IF metric_rec.wind > (alert_rec.alerting->'high_wind_speed_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'high_wind_speed_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'high_wind_speed_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'high_wind_speed_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"high_wind_speed_threshold": {"value": '|| metric_rec.wind ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "high_wind_speed_threshold value:'|| metric_rec.wind ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_wind_speed_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_wind_speed_threshold';
|
||||
END IF;
|
||||
if metric_rec.voltage < (alert_rec.alerting->'low_battery_voltage_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_battery_voltage_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = 'lacroix.francois@gmail.com';
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_battery_voltage_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_battery_voltage_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_battery_voltage_threshold": {"value": '|| metric_rec.voltage ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_battery_voltage_threshold value:'|| metric_rec.voltage ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_voltage_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_voltage_threshold';
|
||||
END IF;
|
||||
if (metric_rec.charge*100) < (alert_rec.alerting->'low_battery_charge_threshold')::numeric then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_battery_charge_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_battery_charge_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_battery_charge_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_battery_charge_threshold": {"value": '|| (metric_rec.charge*100) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_battery_charge_threshold value:'|| (metric_rec.charge*100) ||'"}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_charge_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_charge_threshold';
|
||||
END IF;
|
||||
-- Record last metrics time
|
||||
SELECT metric_rec.time_bucket INTO last_metric;
|
||||
END LOOP;
|
||||
PERFORM api.update_user_preferences_fn('{alert_last_metric}'::TEXT, last_metric::TEXT);
|
||||
END LOOP;
|
||||
END;
|
||||
$cron_alerts$ language plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_alerts_fn
|
||||
IS 'init by pg_cron to check for alerts';
|
||||
|
||||
-- Fix error: None of these media types are available: text/xml
|
||||
DROP FUNCTION IF EXISTS api.export_logbooks_gpx_fn;
|
||||
CREATE OR REPLACE FUNCTION api.export_logbooks_gpx_fn(
|
||||
IN start_log INTEGER DEFAULT NULL,
|
||||
IN end_log INTEGER DEFAULT NULL) RETURNS "text/xml"
|
||||
AS $export_logbooks_gpx$
|
||||
declare
|
||||
merged_jsonb jsonb;
|
||||
app_settings jsonb;
|
||||
BEGIN
|
||||
-- Merge GIS track_geom of geometry type Point into a jsonb array format
|
||||
IF start_log IS NOT NULL AND public.isnumeric(start_log::text) AND public.isnumeric(end_log::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('coordinates', f->'geometry'->'coordinates', 'time', f->'properties'->>'time')
|
||||
) INTO merged_jsonb
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook
|
||||
WHERE id >= start_log
|
||||
AND id <= end_log
|
||||
AND track_geojson IS NOT NULL
|
||||
ORDER BY _from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
ELSE
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('coordinates', f->'geometry'->'coordinates', 'time', f->'properties'->>'time')
|
||||
) INTO merged_jsonb
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook
|
||||
WHERE track_geojson IS NOT NULL
|
||||
ORDER BY _from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
END IF;
|
||||
--RAISE WARNING '-> export_logbooks_gpx_fn _jsonb %' , _jsonb;
|
||||
-- Gather url from app settings
|
||||
app_settings := get_app_url_fn();
|
||||
--RAISE WARNING '-> export_logbooks_gpx_fn app_settings %', app_settings;
|
||||
-- Generate GPX XML, extract Point features from geojson.
|
||||
RETURN xmlelement(name gpx,
|
||||
xmlattributes( '1.1' as version,
|
||||
'PostgSAIL' as creator,
|
||||
'http://www.topografix.com/GPX/1/1' as xmlns,
|
||||
'http://www.opencpn.org' as "xmlns:opencpn",
|
||||
app_settings->>'app.url' as "xmlns:postgsail"),
|
||||
xmlelement(name metadata,
|
||||
xmlelement(name link, xmlattributes(app_settings->>'app.url' as href),
|
||||
xmlelement(name text, 'PostgSail'))),
|
||||
xmlelement(name trk,
|
||||
xmlelement(name name, 'logbook name'),
|
||||
xmlelement(name trkseg, xmlagg(
|
||||
xmlelement(name trkpt,
|
||||
xmlattributes(features->'coordinates'->1 as lat, features->'coordinates'->0 as lon),
|
||||
xmlelement(name time, features->'properties'->>'time')
|
||||
)))))::pg_catalog.xml
|
||||
FROM jsonb_array_elements(merged_jsonb) AS features;
|
||||
END;
|
||||
$export_logbooks_gpx$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.export_logbooks_gpx_fn
|
||||
IS 'Export a logs entries to GPX XML format';
|
||||
|
||||
-- Add export logbooks as png
|
||||
DROP FUNCTION IF EXISTS public.qgis_bbox_trip_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.qgis_bbox_trip_py_fn(IN _str_to_parse TEXT DEFAULT NULL, OUT bbox TEXT)
|
||||
AS $qgis_bbox_trip_py$
|
||||
plpy.notice('qgis_bbox_trip_py_fn _str_to_parse [{}]'.format(_str_to_parse))
|
||||
vessel_id, log_id, log_end = _str_to_parse.split('_')
|
||||
width = 1080
|
||||
height = 566
|
||||
scaleout = True
|
||||
log_extent = None
|
||||
# If we have a vessel_id then it is full logs image map
|
||||
if vessel_id and log_end is None:
|
||||
# Use the shared cache to avoid preparing the log extent
|
||||
if vessel_id in SD:
|
||||
plan = SD[vessel_id]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("WITH merged AS ( SELECT ST_Union(track_geom) AS merged_geometry FROM api.logbook WHERE vessel_id = $1 ) SELECT ST_Extent(ST_Transform(merged_geometry, 3857))::TEXT FROM merged;", ["text"])
|
||||
SD[vessel_id] = plan
|
||||
# Execute the statement with the log extent param and limit to 1 result
|
||||
rv = plpy.execute(plan, [vessel_id], 1)
|
||||
log_extent = rv[0]['st_extent']
|
||||
# If we have a vessel_id and a log_end then it is subset logs image map
|
||||
elif vessel_id and log_end:
|
||||
# Use the shared cache to avoid preparing the log extent
|
||||
shared_cache = vessel_id + str(log_id) + str(log_end)
|
||||
if shared_cache in SD:
|
||||
plan = SD[shared_cache]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("WITH merged AS ( SELECT ST_Union(track_geom) AS merged_geometry FROM api.logbook WHERE vessel_id = $1 and id >= $2::NUMERIC and id <= $3::NUMERIC) SELECT ST_Extent(ST_Transform(merged_geometry, 3857))::TEXT FROM merged;", ["text","text","text"])
|
||||
SD[shared_cache] = plan
|
||||
# Execute the statement with the log extent param and limit to 1 result
|
||||
rv = plpy.execute(plan, [vessel_id,log_id,log_end], 1)
|
||||
log_extent = rv[0]['st_extent']
|
||||
# Else we have a log_id then it is single log image map
|
||||
else :
|
||||
# Use the shared cache to avoid preparing the log extent
|
||||
if log_id in SD:
|
||||
plan = SD[log_id]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT ST_Extent(ST_Transform(track_geom, 3857)) FROM api.logbook WHERE id = $1::NUMERIC", ["text"])
|
||||
SD[log_id] = plan
|
||||
# Execute the statement with the log extent param and limit to 1 result
|
||||
rv = plpy.execute(plan, [log_id], 1)
|
||||
log_extent = rv[0]['st_extent']
|
||||
|
||||
# Extract extent
|
||||
def parse_extent_from_db(extent_raw):
|
||||
# Parse the extent_raw to extract coordinates
|
||||
extent = extent_raw.replace('BOX(', '').replace(')', '').split(',')
|
||||
min_x, min_y = map(float, extent[0].split())
|
||||
max_x, max_y = map(float, extent[1].split())
|
||||
return min_x, min_y, max_x, max_y
|
||||
|
||||
# ZoomOut from linestring extent
|
||||
def apply_scale_factor(extent, scale_factor=1.125):
|
||||
min_x, min_y, max_x, max_y = extent
|
||||
center_x = (min_x + max_x) / 2
|
||||
center_y = (min_y + max_y) / 2
|
||||
width = max_x - min_x
|
||||
height = max_y - min_y
|
||||
new_width = width * scale_factor
|
||||
new_height = height * scale_factor
|
||||
scaled_extent = (
|
||||
round(center_x - new_width / 2),
|
||||
round(center_y - new_height / 2),
|
||||
round(center_x + new_width / 2),
|
||||
round(center_y + new_height / 2),
|
||||
)
|
||||
return scaled_extent
|
||||
|
||||
def adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height):
|
||||
min_x, min_y, max_x, max_y = scaled_extent
|
||||
bbox_width = float(max_x - min_x)
|
||||
bbox_height = float(max_y - min_y)
|
||||
bbox_aspect_ratio = float(bbox_width / bbox_height)
|
||||
image_aspect_ratio = float(fixed_width / fixed_height)
|
||||
|
||||
if bbox_aspect_ratio > image_aspect_ratio:
|
||||
# Adjust height to match aspect ratio
|
||||
new_bbox_height = bbox_width / image_aspect_ratio
|
||||
height_diff = new_bbox_height - bbox_height
|
||||
min_y -= height_diff / 2
|
||||
max_y += height_diff / 2
|
||||
else:
|
||||
# Adjust width to match aspect ratio
|
||||
new_bbox_width = bbox_height * image_aspect_ratio
|
||||
width_diff = new_bbox_width - bbox_width
|
||||
min_x -= width_diff / 2
|
||||
max_x += width_diff / 2
|
||||
|
||||
adjusted_extent = (min_x, min_y, max_x, max_y)
|
||||
return adjusted_extent
|
||||
|
||||
if not log_extent:
|
||||
plpy.warning('Failed to get sql qgis_bbox_trip_py_fn log_id [{}], extent [{}]'.format(log_id, log_extent))
|
||||
#plpy.notice('qgis_bbox_trip_py_fn log_id [{}], extent [{}]'.format(log_id, log_extent))
|
||||
# Parse extent and apply ZoomOut scale factor
|
||||
if scaleout:
|
||||
scaled_extent = apply_scale_factor(parse_extent_from_db(log_extent))
|
||||
else:
|
||||
scaled_extent = parse_extent_from_db(log_extent)
|
||||
#plpy.notice('qgis_bbox_trip_py_fn log_id [{}], scaled_extent [{}]'.format(log_id, scaled_extent))
|
||||
fixed_width = width # default 1080
|
||||
fixed_height = height # default 566
|
||||
adjusted_extent = adjust_bbox_to_fixed_size(scaled_extent, fixed_width, fixed_height)
|
||||
#plpy.notice('qgis_bbox_trip_py_fn log_id [{}], adjusted_extent [{}]'.format(log_id, adjusted_extent))
|
||||
min_x, min_y, max_x, max_y = adjusted_extent
|
||||
return f"{min_x},{min_y},{max_x},{max_y}"
|
||||
$qgis_bbox_trip_py$ LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.qgis_bbox_trip_py_fn
|
||||
IS 'Generate the BBOX base on trip extent and adapt extent to the image size for QGIS Server';
|
||||
|
||||
DROP FUNCTION IF EXISTS public.grafana_py_fn;
|
||||
-- Update grafana provisioning, ERROR: KeyError: 'secureJsonFields'
|
||||
CREATE OR REPLACE FUNCTION public.grafana_py_fn(_v_name text, _v_id text, _u_email text, app jsonb)
|
||||
RETURNS void
|
||||
TRANSFORM FOR TYPE jsonb
|
||||
LANGUAGE plpython3u
|
||||
AS $function$
|
||||
"""
|
||||
https://grafana.com/docs/grafana/latest/developers/http_api/
|
||||
Create organization base on vessel name
|
||||
Create user base on user email
|
||||
Add user to organization
|
||||
Add data_source to organization
|
||||
Add dashboard to organization
|
||||
Update organization preferences
|
||||
"""
|
||||
import requests
|
||||
import json
|
||||
import re
|
||||
|
||||
grafana_uri = None
|
||||
if 'app.grafana_admin_uri' in app and app['app.grafana_admin_uri']:
|
||||
grafana_uri = app['app.grafana_admin_uri']
|
||||
else:
|
||||
plpy.error('Error no grafana_admin_uri defined, check app settings')
|
||||
return None
|
||||
|
||||
b_name = None
|
||||
if not _v_name:
|
||||
b_name = _v_id
|
||||
else:
|
||||
b_name = _v_name
|
||||
|
||||
# add vessel org
|
||||
headers = {'User-Agent': 'PostgSail', 'From': 'xbgmsharp@gmail.com',
|
||||
'Accept': 'application/json', 'Content-Type': 'application/json'}
|
||||
path = 'api/orgs'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
data_dict = {'name':b_name}
|
||||
data = json.dumps(data_dict)
|
||||
r = requests.post(url, data=data, headers=headers)
|
||||
#print(r.text)
|
||||
plpy.notice(r.json())
|
||||
if r.status_code == 200 and "orgId" in r.json():
|
||||
org_id = r.json()['orgId']
|
||||
else:
|
||||
plpy.error('Error grafana add vessel org {req} - {res}'.format(req=data_dict,res=r.json()))
|
||||
return none
|
||||
|
||||
# add user to vessel org
|
||||
path = 'api/admin/users'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
data_dict = {'orgId':org_id, 'email':_u_email, 'password':'asupersecretpassword'}
|
||||
data = json.dumps(data_dict)
|
||||
r = requests.post(url, data=data, headers=headers)
|
||||
#print(r.text)
|
||||
plpy.notice(r.json())
|
||||
if r.status_code == 200 and "id" in r.json():
|
||||
user_id = r.json()['id']
|
||||
else:
|
||||
plpy.error('Error grafana add user to vessel org')
|
||||
return
|
||||
|
||||
# read data_source
|
||||
path = 'api/datasources/1'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
r = requests.get(url, headers=headers)
|
||||
#print(r.text)
|
||||
plpy.notice(r.json())
|
||||
data_source = r.json()
|
||||
data_source['id'] = 0
|
||||
data_source['orgId'] = org_id
|
||||
data_source['uid'] = "ds_" + _v_id
|
||||
data_source['name'] = "ds_" + _v_id
|
||||
data_source['secureJsonData'] = {}
|
||||
data_source['secureJsonData']['password'] = 'mysecretpassword'
|
||||
data_source['readOnly'] = True
|
||||
if "secureJsonFields" in data_source:
|
||||
del data_source['secureJsonFields']
|
||||
|
||||
# add data_source to vessel org
|
||||
path = 'api/datasources'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
data = json.dumps(data_source)
|
||||
headers['X-Grafana-Org-Id'] = str(org_id)
|
||||
r = requests.post(url, data=data, headers=headers)
|
||||
plpy.notice(r.json())
|
||||
del headers['X-Grafana-Org-Id']
|
||||
if r.status_code != 200 and "id" not in r.json():
|
||||
plpy.error('Error grafana add data_source to vessel org')
|
||||
return
|
||||
|
||||
dashboards_tpl = [ 'pgsail_tpl_electrical', 'pgsail_tpl_logbook', 'pgsail_tpl_monitor', 'pgsail_tpl_rpi', 'pgsail_tpl_solar', 'pgsail_tpl_weather', 'pgsail_tpl_home']
|
||||
for dashboard in dashboards_tpl:
|
||||
# read dashboard template by uid
|
||||
path = 'api/dashboards/uid'
|
||||
url = f'{grafana_uri}/{path}/{dashboard}'.format(grafana_uri,path,dashboard)
|
||||
if 'X-Grafana-Org-Id' in headers:
|
||||
del headers['X-Grafana-Org-Id']
|
||||
r = requests.get(url, headers=headers)
|
||||
plpy.notice(r.json())
|
||||
if r.status_code != 200 and "id" not in r.json():
|
||||
plpy.error('Error grafana read dashboard template')
|
||||
return
|
||||
new_dashboard = r.json()
|
||||
del new_dashboard['meta']
|
||||
new_dashboard['dashboard']['version'] = 0
|
||||
new_dashboard['dashboard']['id'] = 0
|
||||
new_uid = re.sub(r'pgsail_tpl_(.*)', r'postgsail_\1', new_dashboard['dashboard']['uid'])
|
||||
new_dashboard['dashboard']['uid'] = f'{new_uid}_{_v_id}'.format(new_uid,_v_id)
|
||||
# add dashboard to vessel org
|
||||
path = 'api/dashboards/db'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
data = json.dumps(new_dashboard)
|
||||
new_data = data.replace('PCC52D03280B7034C', data_source['uid'])
|
||||
headers['X-Grafana-Org-Id'] = str(org_id)
|
||||
r = requests.post(url, data=new_data, headers=headers)
|
||||
plpy.notice(r.json())
|
||||
if r.status_code != 200 and "id" not in r.json():
|
||||
plpy.error('Error grafana add dashboard to vessel org')
|
||||
return
|
||||
|
||||
# Update Org Prefs
|
||||
path = 'api/org/preferences'
|
||||
url = f'{grafana_uri}/{path}'.format(grafana_uri,path)
|
||||
home_dashboard = {}
|
||||
home_dashboard['timezone'] = 'utc'
|
||||
home_dashboard['homeDashboardUID'] = f'postgsail_home_{_v_id}'.format(_v_id)
|
||||
data = json.dumps(home_dashboard)
|
||||
headers['X-Grafana-Org-Id'] = str(org_id)
|
||||
r = requests.patch(url, data=data, headers=headers)
|
||||
plpy.notice(r.json())
|
||||
if r.status_code != 200:
|
||||
plpy.error('Error grafana update org preferences')
|
||||
return
|
||||
|
||||
plpy.notice('Done')
|
||||
$function$
|
||||
;
|
||||
COMMENT ON FUNCTION public.grafana_py_fn(text, text, text, jsonb) IS 'Grafana Organization,User,data_source,dashboards provisioning via HTTP API using plpython3u';
|
||||
|
||||
-- Add missing comment on function cron_process_no_activity_fn
|
||||
COMMENT ON FUNCTION
|
||||
public.cron_process_no_activity_fn
|
||||
IS 'init by pg_cron, check for vessel with no activity for more than 230 days then send notification';
|
||||
|
||||
-- Update grafana,qgis,api role SQL connection to 30
|
||||
ALTER ROLE grafana WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 30 LOGIN;
|
||||
ALTER ROLE api_anonymous WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 30 LOGIN;
|
||||
ALTER ROLE qgis_role WITH NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOBYPASSRLS NOREPLICATION CONNECTION LIMIT 30 LOGIN;
|
||||
|
||||
-- Create qgis schema for qgis projects
|
||||
CREATE SCHEMA IF NOT EXISTS qgis;
|
||||
COMMENT ON SCHEMA qgis IS 'Hold qgis_projects';
|
||||
GRANT USAGE ON SCHEMA qgis TO qgis_role;
|
||||
CREATE TABLE qgis.qgis_projects (
|
||||
"name" text NOT NULL,
|
||||
metadata jsonb NULL,
|
||||
"content" bytea NULL,
|
||||
CONSTRAINT qgis_projects_pkey PRIMARY KEY (name)
|
||||
);
|
||||
-- Description
|
||||
COMMENT ON TABLE
|
||||
qgis.qgis_projects
|
||||
IS 'Store qgis projects using QGIS-Server or QGIS-Desktop from https://qgis.org/';
|
||||
GRANT SELECT,INSERT,UPDATE,DELETE ON TABLE qgis.qgis_projects TO qgis_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO qgis_role;
|
||||
|
||||
-- allow anonymous access to tbl and views
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA api TO api_anonymous;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO api_anonymous;
|
||||
-- Allow EXECUTE on all FUNCTIONS on API and public schema to user_role
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO user_role;
|
||||
-- Allow EXECUTE on all FUNCTIONS on public schema to vessel_role
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO vessel_role;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.5'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
\c postgres
|
||||
|
||||
-- Update video cronjob
|
||||
UPDATE cron.job
|
||||
SET command='select public.cron_process_video_fn()'
|
||||
WHERE jobname = 'cron_new_video';
|
||||
UPDATE cron.job
|
||||
SET jobname='cron_video'
|
||||
WHERE command='select public.cron_process_video_fn()';
|
1370
initdb/99_migrations_202408.sql
Normal file
1370
initdb/99_migrations_202408.sql
Normal file
File diff suppressed because it is too large
Load Diff
693
initdb/99_migrations_202409.sql
Normal file
693
initdb/99_migrations_202409.sql
Normal file
@@ -0,0 +1,693 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration September 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Add new email template account_inactivity
|
||||
INSERT INTO public.email_templates ("name",email_subject,email_content,pushover_title,pushover_message)
|
||||
VALUES ('inactivity','We Haven''t Seen You in a While!','Hi __RECIPIENT__,
|
||||
|
||||
You''re busy. We understand.
|
||||
|
||||
You haven''t logged into PostgSail for a considerable period. Since we last saw you, we have continued to add new and exciting features to help you explorer your navigation journey.
|
||||
|
||||
Meanwhile, we have cleanup your data. If you wish to maintain an up-to-date overview of your sail journey in PostgSail''''s dashboard, kindly log in to your account within the next seven days.
|
||||
|
||||
Please note that your account will be permanently deleted if it remains inactive for seven more days.
|
||||
|
||||
If you have any questions or concerns or if you believe this to be an error, please do not hesitate to reach out at info@openplotter.cloud.
|
||||
|
||||
Sincerely,
|
||||
Francois','We Haven''t Seen You in a While!','You haven''t logged into PostgSail for a considerable period. Login to check what''s new!.');
|
||||
|
||||
-- Update HTML email for new logbook
|
||||
DROP FUNCTION IF EXISTS public.send_email_py_fn;
|
||||
CREATE OR REPLACE FUNCTION public.send_email_py_fn(IN email_type TEXT, IN _user JSONB, IN app JSONB) RETURNS void
|
||||
AS $send_email_py$
|
||||
# Import smtplib for the actual sending function
|
||||
import smtplib
|
||||
import requests
|
||||
|
||||
# Import the email modules we need
|
||||
from email.message import EmailMessage
|
||||
from email.utils import formatdate,make_msgid
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
# Use the shared cache to avoid preparing the email metadata
|
||||
if email_type in SD:
|
||||
plan = SD[email_type]
|
||||
# A prepared statement from Python
|
||||
else:
|
||||
plan = plpy.prepare("SELECT * FROM email_templates WHERE name = $1", ["text"])
|
||||
SD[email_type] = plan
|
||||
|
||||
# Execute the statement with the email_type param and limit to 1 result
|
||||
rv = plpy.execute(plan, [email_type], 1)
|
||||
email_subject = rv[0]['email_subject']
|
||||
email_content = rv[0]['email_content']
|
||||
|
||||
# Replace fields using input jsonb obj
|
||||
if not _user or not app:
|
||||
plpy.notice('send_email_py_fn Parameters [{}] [{}]'.format(_user, app))
|
||||
plpy.error('Error missing parameters')
|
||||
return None
|
||||
if 'logbook_name' in _user and _user['logbook_name']:
|
||||
email_content = email_content.replace('__LOGBOOK_NAME__', str(_user['logbook_name']))
|
||||
if 'logbook_link' in _user and _user['logbook_link']:
|
||||
email_content = email_content.replace('__LOGBOOK_LINK__', str(_user['logbook_link']))
|
||||
if 'logbook_img' in _user and _user['logbook_img']:
|
||||
email_content = email_content.replace('__LOGBOOK_IMG__', str(_user['logbook_img']))
|
||||
if 'logbook_stats' in _user and _user['logbook_stats']:
|
||||
email_content = email_content.replace('__LOGBOOK_STATS__', str(_user['logbook_stats']))
|
||||
if 'video_link' in _user and _user['video_link']:
|
||||
email_content = email_content.replace('__VIDEO_LINK__', str(_user['video_link']))
|
||||
if 'recipient' in _user and _user['recipient']:
|
||||
email_content = email_content.replace('__RECIPIENT__', _user['recipient'])
|
||||
if 'boat' in _user and _user['boat']:
|
||||
email_content = email_content.replace('__BOAT__', _user['boat'])
|
||||
if 'badge' in _user and _user['badge']:
|
||||
email_content = email_content.replace('__BADGE_NAME__', _user['badge'])
|
||||
if 'otp_code' in _user and _user['otp_code']:
|
||||
email_content = email_content.replace('__OTP_CODE__', _user['otp_code'])
|
||||
if 'reset_qs' in _user and _user['reset_qs']:
|
||||
email_content = email_content.replace('__RESET_QS__', _user['reset_qs'])
|
||||
if 'alert' in _user and _user['alert']:
|
||||
email_content = email_content.replace('__ALERT__', _user['alert'])
|
||||
|
||||
if 'app.url' in app and app['app.url']:
|
||||
email_content = email_content.replace('__APP_URL__', app['app.url'])
|
||||
|
||||
email_from = 'root@localhost'
|
||||
if 'app.email_from' in app and app['app.email_from']:
|
||||
email_from = 'PostgSail <' + app['app.email_from'] + '>'
|
||||
#plpy.notice('Sending email from [{}] [{}]'.format(email_from, app['app.email_from']))
|
||||
|
||||
email_to = 'root@localhost'
|
||||
if 'email' in _user and _user['email']:
|
||||
email_to = _user['email']
|
||||
#plpy.notice('Sending email to [{}] [{}]'.format(email_to, _user['email']))
|
||||
else:
|
||||
plpy.error('Error email to')
|
||||
return None
|
||||
|
||||
if email_type == 'logbook':
|
||||
msg = EmailMessage()
|
||||
msg.set_content(email_content)
|
||||
else:
|
||||
msg = MIMEText(email_content, 'plain', 'utf-8')
|
||||
msg["Subject"] = email_subject
|
||||
msg["From"] = email_from
|
||||
msg["To"] = email_to
|
||||
msg["Date"] = formatdate()
|
||||
msg["Message-ID"] = make_msgid()
|
||||
|
||||
if email_type == 'logbook' and 'logbook_img' in _user and _user['logbook_img']:
|
||||
# Create a Content-ID for the image
|
||||
image_cid = make_msgid()
|
||||
# Transform to HTML template, replace text by HTML link
|
||||
logbook_link = "{__APP_URL__}/log/{__LOGBOOK_LINK__}".format( __APP_URL__=app['app.url'], __LOGBOOK_LINK__=str(_user['logbook_link']))
|
||||
timelapse_link = "{__APP_URL__}/timelapse/{__LOGBOOK_LINK__}".format( __APP_URL__=app['app.url'], __LOGBOOK_LINK__=str(_user['logbook_link']))
|
||||
email_content = email_content.replace('\n', '<br/>')
|
||||
email_content = email_content.replace(logbook_link, '<a href="{logbook_link}">{logbook_link}</a>'.format(logbook_link=str(logbook_link)))
|
||||
email_content = email_content.replace(timelapse_link, '<a href="{timelapse_link}">{timelapse_link}</a>'.format(timelapse_link=str(logbook_link)))
|
||||
email_content = email_content.replace(str(_user['logbook_name']), '<a href="{logbook_link}">{logbook_name}</a>'.format(logbook_link=str(logbook_link), logbook_name=str(_user['logbook_name'])))
|
||||
# Set an alternative html body
|
||||
msg.add_alternative("""\
|
||||
<html>
|
||||
<body>
|
||||
<p>{email_content}</p>
|
||||
<img src="cid:{image_cid}">
|
||||
</body>
|
||||
</html>
|
||||
""".format(email_content=email_content, image_cid=image_cid[1:-1]), subtype='html')
|
||||
img_url = 'https://gis.openplotter.cloud/{}'.format(str(_user['logbook_img']))
|
||||
response = requests.get(img_url, stream=True)
|
||||
if response.status_code == 200:
|
||||
msg.get_payload()[1].add_related(response.raw.data,
|
||||
maintype='image',
|
||||
subtype='png',
|
||||
cid=image_cid)
|
||||
|
||||
server_smtp = 'localhost'
|
||||
if 'app.email_server' in app and app['app.email_server']:
|
||||
server_smtp = app['app.email_server']
|
||||
#plpy.notice('Sending server [{}] [{}]'.format(server_smtp, app['app.email_server']))
|
||||
|
||||
# Send the message via our own SMTP server.
|
||||
try:
|
||||
# send your message with credentials specified above
|
||||
with smtplib.SMTP(server_smtp, 587) as server:
|
||||
if 'app.email_user' in app and app['app.email_user'] \
|
||||
and 'app.email_pass' in app and app['app.email_pass']:
|
||||
server.starttls()
|
||||
server.login(app['app.email_user'], app['app.email_pass'])
|
||||
#server.send_message(msg)
|
||||
server.sendmail(msg["From"], msg["To"], msg.as_string())
|
||||
server.quit()
|
||||
# tell the script to report if your message was sent or which errors need to be fixed
|
||||
plpy.notice('Sent email successfully to [{}] [{}]'.format(msg["To"], msg["Subject"]))
|
||||
return None
|
||||
except OSError as error:
|
||||
plpy.error('OS Error occurred: ' + str(error))
|
||||
except smtplib.SMTPConnectError:
|
||||
plpy.error('Failed to connect to the server. Bad connection settings?')
|
||||
except smtplib.SMTPServerDisconnected:
|
||||
plpy.error('Failed to connect to the server. Wrong user/password?')
|
||||
except smtplib.SMTPException as e:
|
||||
plpy.error('SMTP error occurred: ' + str(e))
|
||||
$send_email_py$ TRANSFORM FOR TYPE jsonb LANGUAGE plpython3u;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.send_email_py_fn
|
||||
IS 'Send email notification using plpython3u';
|
||||
|
||||
-- Update stats_logs_fn, update debug
|
||||
CREATE OR REPLACE FUNCTION api.stats_logs_fn(start_date text DEFAULT NULL::text, end_date text DEFAULT NULL::text, OUT stats jsonb)
|
||||
RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
_start_date TIMESTAMPTZ DEFAULT '1970-01-01';
|
||||
_end_date TIMESTAMPTZ DEFAULT NOW();
|
||||
BEGIN
|
||||
IF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
RAISE WARNING '--> stats_logs_fn, filter result stats by date [%]', start_date;
|
||||
_start_date := start_date::TIMESTAMPTZ;
|
||||
_end_date := end_date::TIMESTAMPTZ;
|
||||
END IF;
|
||||
--RAISE NOTICE '--> stats_logs_fn, _start_date [%], _end_date [%]', _start_date, _end_date;
|
||||
WITH
|
||||
meta AS (
|
||||
SELECT m.name FROM api.metadata m ),
|
||||
logs_view AS (
|
||||
SELECT *
|
||||
FROM api.logbook l
|
||||
WHERE _from_time >= _start_date::TIMESTAMPTZ
|
||||
AND _to_time <= _end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
),
|
||||
first_date AS (
|
||||
SELECT _from_time as first_date from logs_view ORDER BY first_date ASC LIMIT 1
|
||||
),
|
||||
last_date AS (
|
||||
SELECT _to_time as last_date from logs_view ORDER BY _to_time DESC LIMIT 1
|
||||
),
|
||||
max_speed_id AS (
|
||||
SELECT id FROM logs_view WHERE max_speed = (SELECT max(max_speed) FROM logs_view) ),
|
||||
max_wind_speed_id AS (
|
||||
SELECT id FROM logs_view WHERE max_wind_speed = (SELECT max(max_wind_speed) FROM logs_view)),
|
||||
max_distance_id AS (
|
||||
SELECT id FROM logs_view WHERE distance = (SELECT max(distance) FROM logs_view)),
|
||||
max_duration_id AS (
|
||||
SELECT id FROM logs_view WHERE duration = (SELECT max(duration) FROM logs_view)),
|
||||
logs_stats AS (
|
||||
SELECT
|
||||
count(*) AS count,
|
||||
max(max_speed) AS max_speed,
|
||||
max(max_wind_speed) AS max_wind_speed,
|
||||
max(distance) AS max_distance,
|
||||
sum(distance) AS sum_distance,
|
||||
max(duration) AS max_duration,
|
||||
sum(duration) AS sum_duration
|
||||
FROM logs_view l )
|
||||
--select * from logbook;
|
||||
-- Return a JSON
|
||||
SELECT jsonb_build_object(
|
||||
'name', meta.name,
|
||||
'first_date', first_date.first_date,
|
||||
'last_date', last_date.last_date,
|
||||
'max_speed_id', max_speed_id.id,
|
||||
'max_wind_speed_id', max_wind_speed_id.id,
|
||||
'max_duration_id', max_duration_id.id,
|
||||
'max_distance_id', max_distance_id.id)::jsonb || to_jsonb(logs_stats.*)::jsonb INTO stats
|
||||
FROM max_speed_id, max_wind_speed_id, max_distance_id, max_duration_id,
|
||||
logs_stats, meta, logs_view, first_date, last_date;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
-- Fix stays and moorage statistics for user by date
|
||||
CREATE OR REPLACE FUNCTION api.stats_stays_fn(
|
||||
IN start_date TEXT DEFAULT NULL,
|
||||
IN end_date TEXT DEFAULT NULL,
|
||||
OUT stats JSON) RETURNS JSON AS $stats_stays$
|
||||
DECLARE
|
||||
_start_date TIMESTAMPTZ DEFAULT '1970-01-01';
|
||||
_end_date TIMESTAMPTZ DEFAULT NOW();
|
||||
BEGIN
|
||||
IF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
RAISE NOTICE '--> stats_stays_fn, custom filter result stats by date [%]', start_date;
|
||||
_start_date := start_date::TIMESTAMPTZ;
|
||||
_end_date := end_date::TIMESTAMPTZ;
|
||||
END IF;
|
||||
--RAISE NOTICE '--> stats_stays_fn, _start_date [%], _end_date [%]', _start_date, _end_date;
|
||||
WITH
|
||||
stays as (
|
||||
select distinct(moorage_id) as moorage_id, sum(duration) as duration, count(id) as reference_count
|
||||
from api.stays s
|
||||
WHERE arrived >= _start_date::TIMESTAMPTZ
|
||||
AND departed <= _end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
group by moorage_id
|
||||
order by moorage_id
|
||||
),
|
||||
moorages AS (
|
||||
SELECT m.id, m.home_flag, m.reference_count, m.stay_duration, m.stay_code, m.country, s.duration, s.reference_count
|
||||
from api.moorages m, stays s
|
||||
where s.moorage_id = m.id
|
||||
order by moorage_id
|
||||
),
|
||||
home_ports AS (
|
||||
select count(*) as home_ports from moorages m where home_flag is true
|
||||
),
|
||||
unique_moorages AS (
|
||||
select count(*) as unique_moorages from moorages m
|
||||
),
|
||||
time_at_home_ports AS (
|
||||
select sum(m.stay_duration) as time_at_home_ports from moorages m where home_flag is true
|
||||
),
|
||||
sum_stay_duration AS (
|
||||
select sum(m.stay_duration) as sum_stay_duration from moorages m where home_flag is false
|
||||
),
|
||||
time_spent_away_arr AS (
|
||||
select m.stay_code,sum(m.stay_duration) as stay_duration from moorages m where home_flag is false group by m.stay_code order by m.stay_code
|
||||
),
|
||||
time_spent_arr as (
|
||||
select jsonb_agg(t.*) as time_spent_away_arr from time_spent_away_arr t
|
||||
),
|
||||
time_spent_away AS (
|
||||
select sum(m.stay_duration) as time_spent_away from moorages m where home_flag is false
|
||||
),
|
||||
time_spent as (
|
||||
select jsonb_agg(t.*) as time_spent_away from time_spent_away t
|
||||
)
|
||||
-- Return a JSON
|
||||
SELECT jsonb_build_object(
|
||||
'home_ports', home_ports.home_ports,
|
||||
'unique_moorages', unique_moorages.unique_moorages,
|
||||
'time_at_home_ports', time_at_home_ports.time_at_home_ports,
|
||||
'sum_stay_duration', sum_stay_duration.sum_stay_duration,
|
||||
'time_spent_away', time_spent_away.time_spent_away,
|
||||
'time_spent_away_arr', time_spent_arr.time_spent_away_arr) INTO stats
|
||||
FROM home_ports, unique_moorages,
|
||||
time_at_home_ports, sum_stay_duration, time_spent_away, time_spent_arr;
|
||||
END;
|
||||
$stats_stays$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.stats_stays_fn
|
||||
IS 'Stays/Moorages stats by date';
|
||||
|
||||
-- Update api.stats_moorages_view, fix time_spent_at_home_port
|
||||
CREATE OR REPLACE VIEW api.stats_moorages_view WITH (security_invoker=true,security_barrier=true) AS
|
||||
WITH
|
||||
home_ports AS (
|
||||
select count(*) as home_ports from api.moorages m where home_flag is true
|
||||
),
|
||||
unique_moorage AS (
|
||||
select count(*) as unique_moorage from api.moorages m
|
||||
),
|
||||
time_at_home_ports AS (
|
||||
select sum(m.stay_duration) as time_at_home_ports from api.moorages m where home_flag is true
|
||||
),
|
||||
time_spent_away AS (
|
||||
select sum(m.stay_duration) as time_spent_away from api.moorages m where home_flag is false
|
||||
)
|
||||
SELECT
|
||||
home_ports.home_ports as "home_ports",
|
||||
unique_moorage.unique_moorage as "unique_moorages",
|
||||
time_at_home_ports.time_at_home_ports as "time_spent_at_home_port(s)",
|
||||
time_spent_away.time_spent_away as "time_spent_away"
|
||||
FROM home_ports, unique_moorage, time_at_home_ports, time_spent_away;
|
||||
|
||||
-- Add stats_fn, user statistics by date
|
||||
DROP FUNCTION IF EXISTS api.stats_fn;
|
||||
CREATE OR REPLACE FUNCTION api.stats_fn(
|
||||
IN start_date TEXT DEFAULT NULL,
|
||||
IN end_date TEXT DEFAULT NULL,
|
||||
OUT stats JSONB) RETURNS JSONB AS $stats_global$
|
||||
DECLARE
|
||||
_start_date TIMESTAMPTZ DEFAULT '1970-01-01';
|
||||
_end_date TIMESTAMPTZ DEFAULT NOW();
|
||||
stats_logs JSONB;
|
||||
stats_moorages JSONB;
|
||||
stats_logs_topby JSONB;
|
||||
stats_moorages_topby JSONB;
|
||||
BEGIN
|
||||
IF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
RAISE WARNING '--> stats_fn, filter result stats by date [%]', start_date;
|
||||
_start_date := start_date::TIMESTAMPTZ;
|
||||
_end_date := end_date::TIMESTAMPTZ;
|
||||
END IF;
|
||||
RAISE NOTICE '--> stats_fn, _start_date [%], _end_date [%]', _start_date, _end_date;
|
||||
-- Get global logs statistics
|
||||
SELECT api.stats_logs_fn(_start_date::TEXT, _end_date::TEXT) INTO stats_logs;
|
||||
-- Get global stays/moorages statistics
|
||||
SELECT api.stats_stays_fn(_start_date::TEXT, _end_date::TEXT) INTO stats_moorages;
|
||||
-- Get Top 5 trips statistics
|
||||
WITH
|
||||
logs_view AS (
|
||||
SELECT id,avg_speed,max_speed,max_wind_speed,distance,duration
|
||||
FROM api.logbook l
|
||||
WHERE _from_time >= _start_date::TIMESTAMPTZ
|
||||
AND _to_time <= _end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
),
|
||||
logs_top_avg_speed AS (
|
||||
SELECT id,avg_speed FROM logs_view
|
||||
GROUP BY id,avg_speed
|
||||
ORDER BY avg_speed DESC
|
||||
LIMIT 5),
|
||||
logs_top_speed AS (
|
||||
SELECT id,max_speed FROM logs_view
|
||||
WHERE max_speed IS NOT NULL
|
||||
GROUP BY id,max_speed
|
||||
ORDER BY max_speed DESC
|
||||
LIMIT 5),
|
||||
logs_top_wind_speed AS (
|
||||
SELECT id,max_wind_speed FROM logs_view
|
||||
WHERE max_wind_speed IS NOT NULL
|
||||
GROUP BY id,max_wind_speed
|
||||
ORDER BY max_wind_speed DESC
|
||||
LIMIT 5),
|
||||
logs_top_distance AS (
|
||||
SELECT id FROM logs_view
|
||||
GROUP BY id,distance
|
||||
ORDER BY distance DESC
|
||||
LIMIT 5),
|
||||
logs_top_duration AS (
|
||||
SELECT id FROM logs_view
|
||||
GROUP BY id,duration
|
||||
ORDER BY duration DESC
|
||||
LIMIT 5)
|
||||
-- Stats Top Logs
|
||||
SELECT jsonb_build_object(
|
||||
'stats_logs', stats_logs,
|
||||
'stats_moorages', stats_moorages,
|
||||
'logs_top_speed', (SELECT jsonb_agg(logs_top_speed.*) FROM logs_top_speed),
|
||||
'logs_top_avg_speed', (SELECT jsonb_agg(logs_top_avg_speed.*) FROM logs_top_avg_speed),
|
||||
'logs_top_wind_speed', (SELECT jsonb_agg(logs_top_wind_speed.*) FROM logs_top_wind_speed),
|
||||
'logs_top_distance', (SELECT jsonb_agg(logs_top_distance.id) FROM logs_top_distance),
|
||||
'logs_top_duration', (SELECT jsonb_agg(logs_top_duration.id) FROM logs_top_duration)
|
||||
) INTO stats;
|
||||
-- Stats top 5 moorages statistics
|
||||
WITH
|
||||
stays as (
|
||||
select distinct(moorage_id) as moorage_id, sum(duration) as duration, count(id) as reference_count
|
||||
from api.stays s
|
||||
WHERE s.arrived >= _start_date::TIMESTAMPTZ
|
||||
AND s.departed <= _end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
group by s.moorage_id
|
||||
order by s.moorage_id
|
||||
),
|
||||
moorages AS (
|
||||
SELECT m.id, m.home_flag, m.reference_count, m.stay_duration, m.stay_code, m.country, s.duration as dur, s.reference_count as ref_count
|
||||
from api.moorages m, stays s
|
||||
where s.moorage_id = m.id
|
||||
order by s.moorage_id
|
||||
),
|
||||
moorages_top_arrivals AS (
|
||||
SELECT id,ref_count FROM moorages
|
||||
GROUP BY id,ref_count
|
||||
ORDER BY ref_count DESC
|
||||
LIMIT 5),
|
||||
moorages_top_duration AS (
|
||||
SELECT id,dur FROM moorages
|
||||
GROUP BY id,dur
|
||||
ORDER BY dur DESC
|
||||
LIMIT 5),
|
||||
moorages_countries AS (
|
||||
SELECT DISTINCT(country) FROM moorages
|
||||
WHERE country IS NOT NULL AND country <> 'unknown'
|
||||
GROUP BY country
|
||||
ORDER BY country DESC
|
||||
LIMIT 5)
|
||||
SELECT stats || jsonb_build_object(
|
||||
'moorages_top_arrivals', (SELECT jsonb_agg(moorages_top_arrivals) FROM moorages_top_arrivals),
|
||||
'moorages_top_duration', (SELECT jsonb_agg(moorages_top_duration) FROM moorages_top_duration),
|
||||
'moorages_top_countries', (SELECT jsonb_agg(moorages_countries.country) FROM moorages_countries)
|
||||
) INTO stats;
|
||||
END;
|
||||
$stats_global$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.stats_fn
|
||||
IS 'Stats logbook and moorages by date';
|
||||
|
||||
-- Add mapgl_fn, generate a geojson with all linestring
|
||||
DROP FUNCTION IF EXISTS api.mapgl_fn;
|
||||
CREATE OR REPLACE FUNCTION api.mapgl_fn(start_log integer DEFAULT NULL::integer, end_log integer DEFAULT NULL::integer, start_date text DEFAULT NULL::text, end_date text DEFAULT NULL::text, OUT geojson jsonb)
|
||||
RETURNS jsonb
|
||||
AS $mapgl$
|
||||
DECLARE
|
||||
_geojson jsonb;
|
||||
BEGIN
|
||||
-- Using sub query to force id order by time
|
||||
-- Extract GeoJSON LineString and merge into a new GeoJSON
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND end_log IS NULL THEN
|
||||
end_log := start_log;
|
||||
END IF;
|
||||
IF start_date IS NOT NULL AND end_date IS NULL THEN
|
||||
end_date := start_date;
|
||||
END IF;
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND public.isnumeric(start_log::text) AND public.isnumeric(end_log::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.id >= start_log
|
||||
AND l.id <= end_log
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
ELSIF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l._from_time >= start_date::TIMESTAMPTZ
|
||||
AND l._to_time <= end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
ELSE
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
END IF;
|
||||
-- Generate the GeoJSON with all moorages
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _geojson || ( SELECT
|
||||
jsonb_agg(ST_AsGeoJSON(m.*)::JSONB) as moorages_geojson
|
||||
FROM
|
||||
( SELECT
|
||||
id,name,stay_code,
|
||||
EXTRACT(DAY FROM justify_hours ( stay_duration )) AS Total_Stay,
|
||||
geog
|
||||
FROM api.moorages
|
||||
WHERE geog IS NOT null
|
||||
) AS m
|
||||
) ) INTO geojson;
|
||||
END;
|
||||
$mapgl$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.mapgl_fn
|
||||
IS 'Get all logbook LineString alone with all moorages into a geojson to be process by DeckGL';
|
||||
|
||||
-- Refresh user_role permissions
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
|
||||
-- Add cron_inactivity_fn, cleanup all data for inactive users and vessels
|
||||
CREATE OR REPLACE FUNCTION public.cron_inactivity_fn()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
no_activity_rec record;
|
||||
user_settings jsonb;
|
||||
total_metrics INTEGER;
|
||||
del_metrics INTEGER;
|
||||
out_json JSONB;
|
||||
BEGIN
|
||||
-- List accounts with vessel inactivity for more than 200 DAYS
|
||||
-- List accounts with no vessel created for more than 200 DAYS
|
||||
-- List accounts with no vessel metadata for more than 200 DAYS
|
||||
-- Check for users and vessels with no activity for more than 200 days
|
||||
-- remove data and notify user
|
||||
RAISE NOTICE 'cron_inactivity_fn';
|
||||
FOR no_activity_rec in
|
||||
with accounts as (
|
||||
SELECT a.email,a.first,a.last,
|
||||
(a.updated_at < NOW() AT TIME ZONE 'UTC' - INTERVAL '200 DAYS') as no_account_activity,
|
||||
COALESCE((m.time < NOW() AT TIME ZONE 'UTC' - INTERVAL '200 DAYS'),true) as no_metadata_activity,
|
||||
m.vessel_id IS null as no_metadata_vesssel_id,
|
||||
m.time IS null as no_metadata_time,
|
||||
v.vessel_id IS null as no_vessel_vesssel_id,
|
||||
a.preferences->>'ip' as ip,v.name as user_vesssel,
|
||||
m.name as sk_vesssel,v.vessel_id as v_vessel_id,m.vessel_id as m_vessel_id,
|
||||
a.created_at as account_created,m.time as metadata_updated_at,
|
||||
v.created_at as vessel_created,v.updated_at as vessel_updated_at
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata m ON v.vessel_id = m.vessel_id
|
||||
order by a.created_at asc
|
||||
)
|
||||
select * from accounts a where
|
||||
(no_account_activity is true
|
||||
or no_vessel_vesssel_id is true
|
||||
or no_metadata_activity is true
|
||||
or no_metadata_vesssel_id is true
|
||||
or no_metadata_time is true )
|
||||
ORDER BY a.account_created asc
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_inactivity_fn for [%]', no_activity_rec;
|
||||
SELECT json_build_object('email', no_activity_rec.email, 'recipient', no_activity_rec.first) into user_settings;
|
||||
RAISE NOTICE '-> debug cron_inactivity_fn user_settings [%]', user_settings;
|
||||
IF no_activity_rec.no_vessel_vesssel_id is true then
|
||||
PERFORM send_notification_fn('no_vessel'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_metadata_vesssel_id is true then
|
||||
PERFORM send_notification_fn('no_metadata'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_metadata_activity is true then
|
||||
PERFORM send_notification_fn('no_activity'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_account_activity is true then
|
||||
PERFORM send_notification_fn('no_activity'::TEXT, user_settings::JSONB);
|
||||
END IF;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('inactivity'::TEXT, user_settings::JSONB);
|
||||
-- Delete vessel metrics
|
||||
IF no_activity_rec.v_vessel_id IS NOT NULL THEN
|
||||
SELECT count(*) INTO total_metrics from api.metrics where vessel_id = no_activity_rec.v_vessel_id;
|
||||
WITH deleted AS (delete from api.metrics m where vessel_id = no_activity_rec.v_vessel_id RETURNING *) SELECT count(*) INTO del_metrics FROM deleted;
|
||||
SELECT jsonb_build_object('total_metrics', total_metrics, 'del_metrics', del_metrics) INTO out_json;
|
||||
RAISE NOTICE '-> debug cron_inactivity_fn [%]', out_json;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
COMMENT ON FUNCTION public.cron_inactivity_fn() IS 'init by pg_cron, check for vessel with no activity for more than 230 days then send notification';
|
||||
|
||||
-- Add cron_deactivated_fn, delete all data for inactive users and vessels
|
||||
CREATE OR REPLACE FUNCTION public.cron_deactivated_fn()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
no_activity_rec record;
|
||||
user_settings jsonb;
|
||||
del_vessel_data JSONB;
|
||||
del_meta INTEGER;
|
||||
del_vessel INTEGER;
|
||||
del_account INTEGER;
|
||||
out_json JSONB;
|
||||
BEGIN
|
||||
RAISE NOTICE 'cron_deactivated_fn';
|
||||
-- List accounts with vessel inactivity for more than 230 DAYS
|
||||
-- List accounts with no vessel created for more than 230 DAYS
|
||||
-- List accounts with no vessel metadata for more than 230 DAYS
|
||||
-- Remove data and remove user and notify user
|
||||
FOR no_activity_rec in
|
||||
with accounts as (
|
||||
SELECT a.email,a.first,a.last,
|
||||
(a.updated_at < NOW() AT TIME ZONE 'UTC' - INTERVAL '230 DAYS') as no_account_activity,
|
||||
COALESCE((m.time < NOW() AT TIME ZONE 'UTC' - INTERVAL '230 DAYS'),true) as no_metadata_activity,
|
||||
m.vessel_id IS null as no_metadata_vesssel_id,
|
||||
m.time IS null as no_metadata_time,
|
||||
v.vessel_id IS null as no_vessel_vesssel_id,
|
||||
a.preferences->>'ip' as ip,v.name as user_vesssel,
|
||||
m.name as sk_vesssel,v.vessel_id as v_vessel_id,m.vessel_id as m_vessel_id,
|
||||
a.created_at as account_created,m.time as metadata_updated_at,
|
||||
v.created_at as vessel_created,v.updated_at as vessel_updated_at
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata m ON v.vessel_id = m.vessel_id
|
||||
order by a.created_at asc
|
||||
)
|
||||
select * from accounts a where
|
||||
(no_account_activity is true
|
||||
or no_vessel_vesssel_id is true
|
||||
or no_metadata_activity is true
|
||||
or no_metadata_vesssel_id is true
|
||||
or no_metadata_time is true )
|
||||
ORDER BY a.account_created asc
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_deactivated_fn for [%]', no_activity_rec;
|
||||
SELECT json_build_object('email', no_activity_rec.email, 'recipient', no_activity_rec.first) into user_settings;
|
||||
RAISE NOTICE '-> debug cron_deactivated_fn user_settings [%]', user_settings;
|
||||
IF no_activity_rec.no_vessel_vesssel_id is true then
|
||||
PERFORM send_notification_fn('no_vessel'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_metadata_vesssel_id is true then
|
||||
PERFORM send_notification_fn('no_metadata'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_metadata_activity is true then
|
||||
PERFORM send_notification_fn('no_activity'::TEXT, user_settings::JSONB);
|
||||
ELSIF no_activity_rec.no_account_activity is true then
|
||||
PERFORM send_notification_fn('no_activity'::TEXT, user_settings::JSONB);
|
||||
END IF;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('deactivated'::TEXT, user_settings::JSONB);
|
||||
-- Delete vessel data
|
||||
IF no_activity_rec.v_vessel_id IS NOT NULL THEN
|
||||
SELECT public.delete_vessel_fn(no_activity_rec.v_vessel_id) INTO del_vessel_data;
|
||||
WITH deleted AS (delete from api.metadata where vessel_id = no_activity_rec.v_vessel_id RETURNING *) SELECT count(*) INTO del_meta FROM deleted;
|
||||
SELECT jsonb_build_object('del_metadata', del_meta) || del_vessel_data INTO del_vessel_data;
|
||||
RAISE NOTICE '-> debug cron_deactivated_fn [%]', del_vessel_data;
|
||||
END IF;
|
||||
-- Delete account data
|
||||
WITH deleted AS (delete from auth.vessels where owner_email = no_activity_rec.email RETURNING *) SELECT count(*) INTO del_vessel FROM deleted;
|
||||
WITH deleted AS (delete from auth.accounts where email = no_activity_rec.email RETURNING *) SELECT count(*) INTO del_account FROM deleted;
|
||||
SELECT jsonb_build_object('del_account', del_account, 'del_vessel', del_vessel) || del_vessel_data INTO out_json;
|
||||
RAISE NOTICE '-> debug cron_deactivated_fn [%]', out_json;
|
||||
-- TODO remove keycloak and grafana provisioning
|
||||
END LOOP;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
COMMENT ON FUNCTION public.cron_deactivated_fn() IS 'init by pg_cron, check for vessel with no activity for more than 230 then send notification and delete account and vessel data';
|
||||
|
||||
-- Remove unused and duplicate function
|
||||
DROP FUNCTION IF EXISTS public.cron_process_no_activity_fn;
|
||||
DROP FUNCTION IF EXISTS public.cron_process_inactivity_fn;
|
||||
DROP FUNCTION IF EXISTS public.cron_process_deactivated_fn;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.7'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
\c postgres
|
253
initdb/99_migrations_202410.sql
Normal file
253
initdb/99_migrations_202410.sql
Normal file
@@ -0,0 +1,253 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration October 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Update moorages map, export more properties (notes,reference_count) from moorages tbl
|
||||
CREATE OR REPLACE FUNCTION api.export_moorages_geojson_fn(OUT geojson jsonb)
|
||||
RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
BEGIN
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features',
|
||||
( SELECT
|
||||
json_agg(ST_AsGeoJSON(m.*)::JSON) as moorages_geojson
|
||||
FROM
|
||||
( SELECT
|
||||
id,name,stay_code,notes,reference_count,
|
||||
EXTRACT(DAY FROM justify_hours ( stay_duration )) AS Total_Stay,
|
||||
geog
|
||||
FROM api.moorages
|
||||
WHERE geog IS NOT NULL
|
||||
) AS m
|
||||
)
|
||||
) INTO geojson;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
COMMENT ON FUNCTION api.export_moorages_geojson_fn(out jsonb) IS 'Export moorages as geojson';
|
||||
|
||||
-- Update mapgl_fn, update moorages map sub query to export more properties (notes,reference_count) from moorages tbl
|
||||
DROP FUNCTION IF EXISTS api.mapgl_fn;
|
||||
CREATE OR REPLACE FUNCTION api.mapgl_fn(start_log integer DEFAULT NULL::integer, end_log integer DEFAULT NULL::integer, start_date text DEFAULT NULL::text, end_date text DEFAULT NULL::text, OUT geojson jsonb)
|
||||
RETURNS jsonb
|
||||
AS $mapgl$
|
||||
DECLARE
|
||||
_geojson jsonb;
|
||||
BEGIN
|
||||
-- Using sub query to force id order by time
|
||||
-- Extract GeoJSON LineString and merge into a new GeoJSON
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND end_log IS NULL THEN
|
||||
end_log := start_log;
|
||||
END IF;
|
||||
IF start_date IS NOT NULL AND end_date IS NULL THEN
|
||||
end_date := start_date;
|
||||
END IF;
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND public.isnumeric(start_log::text) AND public.isnumeric(end_log::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.id >= start_log
|
||||
AND l.id <= end_log
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
ELSIF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l._from_time >= start_date::TIMESTAMPTZ
|
||||
AND l._to_time <= end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
ELSE
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'LineString'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'LineString';
|
||||
END IF;
|
||||
-- Generate the GeoJSON with all moorages
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _geojson || ( SELECT
|
||||
jsonb_agg(ST_AsGeoJSON(m.*)::JSONB) as moorages_geojson
|
||||
FROM
|
||||
( SELECT
|
||||
id,name,stay_code,notes,reference_count,
|
||||
EXTRACT(DAY FROM justify_hours ( stay_duration )) AS Total_Stay,
|
||||
geog
|
||||
FROM api.moorages
|
||||
WHERE geog IS NOT null
|
||||
) AS m
|
||||
) ) INTO geojson;
|
||||
END;
|
||||
$mapgl$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.mapgl_fn
|
||||
IS 'Generate a geojson with all logs as geometry LineString with moorages as geometry Point to be process by DeckGL';
|
||||
|
||||
-- Update logbook_update_geojson_fn, fix corrupt linestring properties
|
||||
CREATE OR REPLACE FUNCTION public.logbook_update_geojson_fn(_id integer, _start text, _end text, OUT _track_geojson json)
|
||||
RETURNS json
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
declare
|
||||
log_geojson jsonb;
|
||||
metrics_geojson jsonb;
|
||||
_map jsonb;
|
||||
begin
|
||||
-- GeoJson Feature Logbook linestring
|
||||
SELECT
|
||||
ST_AsGeoJSON(log.*) into log_geojson
|
||||
FROM
|
||||
( SELECT
|
||||
id,name,
|
||||
distance,
|
||||
duration,
|
||||
avg_speed,
|
||||
max_speed,
|
||||
max_wind_speed,
|
||||
_from_time,
|
||||
_to_time,
|
||||
_from_moorage_id,
|
||||
_to_moorage_id,
|
||||
notes,
|
||||
extra['avg_wind_speed'] as avg_wind_speed,
|
||||
track_geom
|
||||
FROM api.logbook
|
||||
WHERE id = _id
|
||||
) AS log;
|
||||
-- GeoJson Feature Metrics point
|
||||
SELECT
|
||||
json_agg(ST_AsGeoJSON(t.*)::json) into metrics_geojson
|
||||
FROM (
|
||||
( SELECT
|
||||
time,
|
||||
courseovergroundtrue,
|
||||
speedoverground,
|
||||
windspeedapparent,
|
||||
longitude,latitude,
|
||||
'' AS notes,
|
||||
coalesce(metersToKnots((metrics->'environment.wind.speedTrue')::NUMERIC), null) as truewindspeed,
|
||||
coalesce(radiantToDegrees((metrics->'environment.wind.directionTrue')::NUMERIC), null) as truewinddirection,
|
||||
coalesce(status, null) as status,
|
||||
st_makepoint(longitude,latitude) AS geo_point
|
||||
FROM api.metrics m
|
||||
WHERE m.latitude IS NOT NULL
|
||||
AND m.longitude IS NOT NULL
|
||||
AND time >= _start::TIMESTAMPTZ
|
||||
AND time <= _end::TIMESTAMPTZ
|
||||
AND vessel_id = current_setting('vessel.id', false)
|
||||
ORDER BY m.time ASC
|
||||
)
|
||||
) AS t;
|
||||
|
||||
-- Merge jsonb
|
||||
SELECT log_geojson::jsonb || metrics_geojson::jsonb into _map;
|
||||
-- output
|
||||
SELECT
|
||||
json_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _map
|
||||
) into _track_geojson;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
COMMENT ON FUNCTION public.logbook_update_geojson_fn(in int4, in text, in text, out json) IS 'Update log details with geojson';
|
||||
|
||||
-- Add trigger to update logbook stats from user edit geojson
|
||||
DROP FUNCTION IF EXISTS public.update_logbook_with_geojson_trigger_fn;
|
||||
CREATE OR REPLACE FUNCTION public.update_logbook_with_geojson_trigger_fn() RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
geojson JSONB;
|
||||
feature JSONB;
|
||||
BEGIN
|
||||
-- Parse the incoming GeoJSON data from the track_geojson column
|
||||
geojson := NEW.track_geojson::jsonb;
|
||||
|
||||
-- Extract the first feature (assume it is the LineString)
|
||||
feature := geojson->'features'->0;
|
||||
|
||||
IF geojson IS NOT NULL AND feature IS NOT NULL AND (feature->'properties' ? 'x-update') THEN
|
||||
|
||||
-- Get properties from the feature to extract avg_speed, and max_speed
|
||||
NEW.avg_speed := (feature->'properties'->>'avg_speed')::FLOAT;
|
||||
NEW.max_speed := (feature->'properties'->>'max_speed')::FLOAT;
|
||||
NEW.max_wind_speed := (feature->'properties'->>'max_wind_speed')::FLOAT;
|
||||
NEW.extra := jsonb_set( NEW.extra,
|
||||
'{avg_wind_speed}',
|
||||
to_jsonb((feature->'properties'->>'avg_wind_speed')::FLOAT),
|
||||
true -- this flag means it will create the key if it does not exist
|
||||
);
|
||||
|
||||
-- Calculate the LineString's actual spatial distance
|
||||
NEW.track_geom := ST_GeomFromGeoJSON(feature->'geometry'::text);
|
||||
NEW.distance := TRUNC (ST_Length(NEW.track_geom,false)::INT * 0.0005399568, 4); -- convert to NM
|
||||
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.update_logbook_with_geojson_trigger_fn
|
||||
IS 'Extracts specific properties (distance, duration, avg_speed, max_speed) from a geometry LINESTRING part of a GeoJSON FeatureCollection, and then updates a column in a table named logbook';
|
||||
|
||||
-- Add trigger on logbook update to update metrics from track_geojson
|
||||
CREATE TRIGGER update_logbook_with_geojson_trigger_fn
|
||||
BEFORE UPDATE OF track_geojson ON api.logbook
|
||||
FOR EACH ROW
|
||||
WHEN (NEW.track_geojson IS DISTINCT FROM OLD.track_geojson)
|
||||
EXECUTE FUNCTION public.update_logbook_with_geojson_trigger_fn();
|
||||
|
||||
-- Refresh user_role permissions
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.8'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
\c postgres
|
1976
initdb/99_migrations_202411.sql
Normal file
1976
initdb/99_migrations_202411.sql
Normal file
File diff suppressed because it is too large
Load Diff
2336
initdb/99_migrations_202412.sql
Normal file
2336
initdb/99_migrations_202412.sql
Normal file
File diff suppressed because it is too large
Load Diff
219
initdb/99_migrations_202501.sql
Normal file
219
initdb/99_migrations_202501.sql
Normal file
@@ -0,0 +1,219 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2025 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration January-March 2025
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Update metadata table, mark client_id as deprecated
|
||||
COMMENT ON COLUMN api.metadata.client_id IS 'Deprecated client_id to be removed';
|
||||
-- Update metrics table, mark client_id as deprecated
|
||||
COMMENT ON COLUMN api.metrics.client_id IS 'Deprecated client_id to be removed';
|
||||
|
||||
-- Update metadata table update configuration column type to jsonb and comment
|
||||
ALTER TABLE api.metadata ALTER COLUMN "configuration" TYPE jsonb USING "configuration"::jsonb;
|
||||
COMMENT ON COLUMN api.metadata.configuration IS 'Signalk path mapping for metrics';
|
||||
|
||||
-- Update metadata table add new column available_keys and comment
|
||||
ALTER TABLE api.metadata ADD available_keys jsonb NULL;
|
||||
COMMENT ON COLUMN api.metadata.available_keys IS 'Signalk paths with unit for custom mapping';
|
||||
|
||||
--DROP FUNCTION public.metadata_upsert_trigger_fn();
|
||||
-- Update metadata_upsert_trigger_fn to metadata table to support configuration and available_keys and deprecated client_id
|
||||
CREATE OR REPLACE FUNCTION public.metadata_upsert_trigger_fn()
|
||||
RETURNS trigger
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
metadata_id integer;
|
||||
metadata_active boolean;
|
||||
BEGIN
|
||||
-- Require Signalk plugin version 0.4.0
|
||||
-- Set client_id to new value to allow RLS
|
||||
--PERFORM set_config('vessel.client_id', NEW.client_id, false);
|
||||
-- UPSERT - Insert vs Update for Metadata
|
||||
--RAISE NOTICE 'metadata_upsert_trigger_fn';
|
||||
--PERFORM set_config('vessel.id', NEW.vessel_id, true);
|
||||
--RAISE WARNING 'metadata_upsert_trigger_fn [%] [%]', current_setting('vessel.id', true), NEW;
|
||||
SELECT m.id,m.active INTO metadata_id, metadata_active
|
||||
FROM api.metadata m
|
||||
WHERE m.vessel_id IS NOT NULL AND m.vessel_id = current_setting('vessel.id', true);
|
||||
--RAISE NOTICE 'metadata_id is [%]', metadata_id;
|
||||
IF metadata_id IS NOT NULL THEN
|
||||
-- send notification if boat is back online
|
||||
IF metadata_active is False THEN
|
||||
-- Add monitor online entry to process queue for later notification
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('monitoring_online', metadata_id, now(), current_setting('vessel.id', true));
|
||||
END IF;
|
||||
-- Update vessel metadata
|
||||
UPDATE api.metadata
|
||||
SET
|
||||
name = NEW.name,
|
||||
mmsi = NEW.mmsi,
|
||||
--client_id = NEW.client_id,
|
||||
length = NEW.length,
|
||||
beam = NEW.beam,
|
||||
height = NEW.height,
|
||||
ship_type = NEW.ship_type,
|
||||
plugin_version = NEW.plugin_version,
|
||||
signalk_version = NEW.signalk_version,
|
||||
platform = REGEXP_REPLACE(NEW.platform, '[^a-zA-Z0-9\(\) ]', '', 'g'),
|
||||
-- configuration = NEW.configuration, -- ignore configuration from vessel, it is manage by user
|
||||
-- time = NEW.time, ignore the time sent by the vessel as it is out of sync sometimes.
|
||||
time = NOW(), -- overwrite the time sent by the vessel
|
||||
available_keys = NEW.available_keys,
|
||||
active = true
|
||||
WHERE id = metadata_id;
|
||||
RETURN NULL; -- Ignore insert
|
||||
ELSE
|
||||
IF NEW.vessel_id IS NULL THEN
|
||||
-- set vessel_id from jwt if not present in INSERT query
|
||||
NEW.vessel_id := current_setting('vessel.id');
|
||||
END IF;
|
||||
-- Ignore and overwrite the time sent by the vessel
|
||||
NEW.time := NOW();
|
||||
-- Insert new vessel metadata
|
||||
RETURN NEW; -- Insert new vessel metadata
|
||||
END IF;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
COMMENT ON FUNCTION public.metadata_upsert_trigger_fn() IS 'process metadata from vessel, upsert';
|
||||
|
||||
-- Create or replace the function that will be executed by the trigger
|
||||
-- Add metadata table trigger for update_metadata_configuration
|
||||
CREATE OR REPLACE FUNCTION api.update_metadata_configuration()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
-- Require Signalk plugin version 0.4.0
|
||||
-- Update the configuration field with current date in ISO format
|
||||
-- Using jsonb_set if configuration is already a JSONB field
|
||||
IF NEW.configuration IS NOT NULL AND
|
||||
jsonb_typeof(NEW.configuration) = 'object' THEN
|
||||
NEW.configuration = jsonb_set(
|
||||
NEW.configuration,
|
||||
'{update_at}',
|
||||
to_jsonb(to_char(NOW(), 'YYYY-MM-DD"T"HH24:MI:SS"Z"'))
|
||||
);
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
COMMENT ON FUNCTION api.update_metadata_configuration() IS 'Update the configuration field with current date in ISO format';
|
||||
|
||||
-- Create the trigger
|
||||
CREATE TRIGGER metadata_update_configuration_trigger
|
||||
BEFORE UPDATE ON api.metadata
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION api.update_metadata_configuration();
|
||||
|
||||
-- Update api.export_logbook_geojson_linestring_trip_fn, add metadata properties
|
||||
CREATE OR REPLACE FUNCTION api.export_logbooks_geojson_linestring_trips_fn(
|
||||
start_log integer DEFAULT NULL::integer,
|
||||
end_log integer DEFAULT NULL::integer,
|
||||
start_date text DEFAULT NULL::text,
|
||||
end_date text DEFAULT NULL::text,
|
||||
OUT geojson jsonb
|
||||
) RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
logs_geojson jsonb;
|
||||
BEGIN
|
||||
-- Normalize start and end values
|
||||
IF start_log IS NOT NULL AND end_log IS NULL THEN end_log := start_log; END IF;
|
||||
IF start_date IS NOT NULL AND end_date IS NULL THEN end_date := start_date; END IF;
|
||||
|
||||
WITH logbook_data AS (
|
||||
-- get the logbook geometry and metadata, an array for each log
|
||||
SELECT id, name,
|
||||
starttimestamp(trip),
|
||||
endtimestamp(trip),
|
||||
--speed(trip_sog),
|
||||
duration(trip),
|
||||
--length(trip) as length, -- Meters
|
||||
(length(trip) * 0.0005399568)::numeric as distance, -- NM
|
||||
twavg(trip_sog) as avg_sog,
|
||||
maxValue(trip_sog) as max_sog,
|
||||
maxValue(trip_depth) as max_depth, -- Depth
|
||||
maxValue(trip_batt_charge) as max_batt_charge, -- Battery Charge
|
||||
maxValue(trip_batt_voltage) as max_batt_voltage, -- Battery Voltage
|
||||
maxValue(trip_temp_water) as max_temp_water, -- Temperature water
|
||||
maxValue(trip_temp_out) as max_temp_out, -- Temperature outside
|
||||
maxValue(trip_pres_out) as max_pres_out, -- Pressure outside
|
||||
maxValue(trip_hum_out) as max_hum_out, -- Humidity outside
|
||||
twavg(trip_depth) as avg_depth, -- Depth
|
||||
twavg(trip_batt_charge) as avg_batt_charge, -- Battery Charge
|
||||
twavg(trip_batt_voltage) as avg_batt_voltage, -- Battery Voltage
|
||||
twavg(trip_temp_water) as avg_temp_water, -- Temperature water
|
||||
twavg(trip_temp_out) as avg_temp_out, -- Temperature outside
|
||||
twavg(trip_pres_out) as avg_pres_out, -- Pressure outside
|
||||
twavg(trip_hum_out) as avg_hum_out, -- Humidity outside
|
||||
trajectory(l.trip)::geometry as track_geog -- extract trip to geography
|
||||
FROM api.logbook l
|
||||
WHERE (start_log IS NULL OR l.id >= start_log) AND
|
||||
(end_log IS NULL OR l.id <= end_log) AND
|
||||
(start_date IS NULL OR l._from_time >= start_date::TIMESTAMPTZ) AND
|
||||
(end_date IS NULL OR l._to_time <= end_date::TIMESTAMPTZ + interval '23 hours 59 minutes') AND
|
||||
l.trip IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
),
|
||||
collect as (
|
||||
SELECT ST_Collect(
|
||||
ARRAY(
|
||||
SELECT track_geog FROM logbook_data))
|
||||
)
|
||||
-- Create the GeoJSON response
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', json_agg(ST_AsGeoJSON(logs.*)::json)) INTO geojson FROM logbook_data logs;
|
||||
END;
|
||||
$function$;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION api.export_logbooks_geojson_linestring_trips_fn IS 'Generate geojson geometry LineString from trip with the corresponding properties';
|
||||
|
||||
-- Add public.get_season, return the season based on the input date for logbook tag
|
||||
CREATE OR REPLACE FUNCTION public.get_season(input_date TIMESTAMPTZ)
|
||||
RETURNS TEXT AS $$
|
||||
BEGIN
|
||||
CASE
|
||||
WHEN (EXTRACT(MONTH FROM input_date) = 3 AND EXTRACT(DAY FROM input_date) >= 1) OR
|
||||
(EXTRACT(MONTH FROM input_date) BETWEEN 4 AND 5) THEN
|
||||
RETURN 'Spring';
|
||||
WHEN (EXTRACT(MONTH FROM input_date) = 6 AND EXTRACT(DAY FROM input_date) >= 1) OR
|
||||
(EXTRACT(MONTH FROM input_date) BETWEEN 7 AND 8) THEN
|
||||
RETURN 'Summer';
|
||||
WHEN (EXTRACT(MONTH FROM input_date) = 9 AND EXTRACT(DAY FROM input_date) >= 1) OR
|
||||
(EXTRACT(MONTH FROM input_date) BETWEEN 10 AND 11) THEN
|
||||
RETURN 'Fall';
|
||||
ELSE
|
||||
RETURN 'Winter';
|
||||
END CASE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
||||
|
||||
-- Refresh permissions
|
||||
GRANT SELECT ON TABLE api.metrics,api.metadata TO scheduler;
|
||||
GRANT INSERT, UPDATE, SELECT ON TABLE api.logbook,api.moorages,api.stays TO scheduler;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO scheduler;
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO scheduler;
|
||||
GRANT SELECT, UPDATE ON TABLE public.process_queue TO scheduler;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.9.0'
|
||||
WHERE "name"='app.version';
|
1149
initdb/99_migrations_202504.sql
Normal file
1149
initdb/99_migrations_202504.sql
Normal file
File diff suppressed because it is too large
Load Diff
3454
initdb/99_migrations_202505.sql
Normal file
3454
initdb/99_migrations_202505.sql
Normal file
File diff suppressed because it is too large
Load Diff
734
initdb/99_migrations_202507.sql
Normal file
734
initdb/99_migrations_202507.sql
Normal file
@@ -0,0 +1,734 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2025 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration June/July 2025
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
-- Update plugin upgrade message
|
||||
UPDATE public.email_templates
|
||||
SET email_content='Hello __RECIPIENT__,
|
||||
Please upgrade your postgsail signalk plugin. Make sure you restart your Signalk instance after upgrading. Be sure to contact me if you encounter any issue.'
|
||||
WHERE "name"='skplugin_upgrade';
|
||||
|
||||
-- DROP FUNCTION api.login(text, text);
|
||||
-- Update api.login, update the connected_at field to the current time
|
||||
CREATE OR REPLACE FUNCTION api.login(email text, pass text)
|
||||
RETURNS auth.jwt_token
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $function$
|
||||
declare
|
||||
_role name;
|
||||
result auth.jwt_token;
|
||||
app_jwt_secret text;
|
||||
_email_valid boolean := false;
|
||||
_email text := email;
|
||||
_user_id text := null;
|
||||
_user_disable boolean := false;
|
||||
headers json := current_setting('request.headers', true)::json;
|
||||
client_ip text := coalesce(headers->>'x-client-ip', NULL);
|
||||
begin
|
||||
-- check email and password
|
||||
select auth.user_role(email, pass) into _role;
|
||||
if _role is null then
|
||||
-- HTTP/403
|
||||
--raise invalid_password using message = 'invalid user or password';
|
||||
-- HTTP/401
|
||||
raise insufficient_privilege using message = 'invalid user or password';
|
||||
end if;
|
||||
|
||||
-- Check if user is disable due to abuse
|
||||
SELECT preferences['disable'],user_id INTO _user_disable,_user_id
|
||||
FROM auth.accounts a
|
||||
WHERE a.email = _email;
|
||||
IF _user_disable is True then
|
||||
-- due to the raise, the insert is never committed.
|
||||
--INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
-- VALUES ('account_disable', _email, now(), _user_id);
|
||||
RAISE sqlstate 'PT402' using message = 'Account disable, contact us',
|
||||
detail = 'Quota exceeded',
|
||||
hint = 'Upgrade your plan';
|
||||
END IF;
|
||||
|
||||
-- Check email_valid and generate OTP
|
||||
SELECT preferences['email_valid'],user_id INTO _email_valid,_user_id
|
||||
FROM auth.accounts a
|
||||
WHERE a.email = _email;
|
||||
IF _email_valid is null or _email_valid is False THEN
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('email_otp', _email, now(), _user_id);
|
||||
END IF;
|
||||
|
||||
-- Track IP per user to avoid abuse
|
||||
--RAISE WARNING 'api.login debug: [%],[%]', client_ip, login.email;
|
||||
IF client_ip IS NOT NULL THEN
|
||||
UPDATE auth.accounts a SET
|
||||
preferences = jsonb_recursive_merge(a.preferences, jsonb_build_object('ip', client_ip)),
|
||||
connected_at = NOW()
|
||||
WHERE a.email = login.email;
|
||||
END IF;
|
||||
|
||||
-- Get app_jwt_secret
|
||||
SELECT value INTO app_jwt_secret
|
||||
FROM app_settings
|
||||
WHERE name = 'app.jwt_secret';
|
||||
|
||||
--RAISE WARNING 'api.login debug: [%],[%],[%]', app_jwt_secret, _role, login.email;
|
||||
-- Generate jwt
|
||||
select jwt.sign(
|
||||
-- row_to_json(r), ''
|
||||
-- row_to_json(r)::json, current_setting('app.jwt_secret')::text
|
||||
row_to_json(r)::json, app_jwt_secret
|
||||
) as token
|
||||
from (
|
||||
select _role as role, login.email as email, -- TODO replace with user_id
|
||||
-- select _role as role, user_id as uid, -- add support in check_jwt
|
||||
extract(epoch from now())::integer + 60*60 as exp
|
||||
) r
|
||||
into result;
|
||||
return result;
|
||||
end;
|
||||
$function$
|
||||
;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION api.login IS 'Handle user login, returns a JWT token with user role and email.';
|
||||
|
||||
-- DROP FUNCTION api.monitoring_history_fn(in text, out jsonb);
|
||||
-- Update monitoring_history_fn to use custom user settings for metrics
|
||||
CREATE OR REPLACE FUNCTION api.monitoring_history_fn(time_interval text DEFAULT '24'::text, OUT history_metrics jsonb)
|
||||
RETURNS jsonb
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
bucket_interval interval := '5 minutes';
|
||||
BEGIN
|
||||
RAISE NOTICE '-> monitoring_history_fn';
|
||||
SELECT CASE time_interval
|
||||
WHEN '24' THEN '5 minutes'
|
||||
WHEN '48' THEN '2 hours'
|
||||
WHEN '72' THEN '4 hours'
|
||||
WHEN '168' THEN '7 hours'
|
||||
ELSE '5 minutes'
|
||||
END bucket INTO bucket_interval;
|
||||
RAISE NOTICE '-> monitoring_history_fn % %', time_interval, bucket_interval;
|
||||
WITH history_table AS (
|
||||
SELECT time_bucket(bucket_interval::INTERVAL, mt.time) AS time_bucket,
|
||||
avg(-- Water Temperature
|
||||
COALESCE(
|
||||
mt.metrics->'water'->>'temperature',
|
||||
mt.metrics->>(md.configuration->>'waterTemperatureKey'),
|
||||
mt.metrics->>'environment.water.temperature'
|
||||
)::FLOAT) AS waterTemperature,
|
||||
avg(-- Inside Temperature
|
||||
COALESCE(
|
||||
mt.metrics->'temperature'->>'inside',
|
||||
mt.metrics->>(md.configuration->>'insideTemperatureKey'),
|
||||
mt.metrics->>'environment.inside.temperature'
|
||||
)::FLOAT) AS insideTemperature,
|
||||
avg(-- Outside Temperature
|
||||
COALESCE(
|
||||
mt.metrics->'temperature'->>'outside',
|
||||
mt.metrics->>(md.configuration->>'outsideTemperatureKey'),
|
||||
mt.metrics->>'environment.outside.temperature'
|
||||
)::FLOAT) AS outsideTemperature,
|
||||
avg(-- Wind Speed True
|
||||
COALESCE(
|
||||
mt.metrics->'wind'->>'speed',
|
||||
mt.metrics->>(md.configuration->>'windSpeedKey'),
|
||||
mt.metrics->>'environment.wind.speedTrue'
|
||||
)::FLOAT) AS windSpeedOverGround,
|
||||
avg(-- Inside Humidity
|
||||
COALESCE(
|
||||
mt.metrics->'humidity'->>'inside',
|
||||
mt.metrics->>(md.configuration->>'insideHumidityKey'),
|
||||
mt.metrics->>'environment.inside.relativeHumidity',
|
||||
mt.metrics->>'environment.inside.humidity'
|
||||
)::FLOAT) AS insideHumidity,
|
||||
avg(-- Outside Humidity
|
||||
COALESCE(
|
||||
mt.metrics->'humidity'->>'outside',
|
||||
mt.metrics->>(md.configuration->>'outsideHumidityKey'),
|
||||
mt.metrics->>'environment.outside.relativeHumidity',
|
||||
mt.metrics->>'environment.outside.humidity'
|
||||
)::FLOAT) AS outsideHumidity,
|
||||
avg(-- Outside Pressure
|
||||
COALESCE(
|
||||
mt.metrics->'pressure'->>'outside',
|
||||
mt.metrics->>(md.configuration->>'outsidePressureKey'),
|
||||
mt.metrics->>'environment.outside.pressure'
|
||||
)::FLOAT) AS outsidePressure,
|
||||
avg(--Inside Pressure
|
||||
COALESCE(
|
||||
mt.metrics->'pressure'->>'inside',
|
||||
mt.metrics->>(md.configuration->>'insidePressureKey'),
|
||||
mt.metrics->>'environment.inside.pressure'
|
||||
)::FLOAT) AS insidePressure,
|
||||
avg(-- Battery Charge (State of Charge)
|
||||
COALESCE(
|
||||
mt.metrics->'battery'->>'charge',
|
||||
mt.metrics->>(md.configuration->>'stateOfChargeKey'),
|
||||
mt.metrics->>'electrical.batteries.House.capacity.stateOfCharge'
|
||||
)::FLOAT) AS batteryCharge,
|
||||
avg(-- Battery Voltage
|
||||
COALESCE(
|
||||
mt.metrics->'battery'->>'voltage',
|
||||
mt.metrics->>(md.configuration->>'voltageKey'),
|
||||
mt.metrics->>'electrical.batteries.House.voltage'
|
||||
)::FLOAT) AS batteryVoltage,
|
||||
avg(-- Water Depth
|
||||
COALESCE(
|
||||
mt.metrics->'water'->>'depth',
|
||||
mt.metrics->>(md.configuration->>'depthKey'),
|
||||
mt.metrics->>'environment.depth.belowTransducer'
|
||||
)::FLOAT) AS depth
|
||||
FROM api.metrics mt
|
||||
JOIN api.metadata md ON md.vessel_id = mt.vessel_id
|
||||
WHERE mt.time > (NOW() AT TIME ZONE 'UTC' - INTERVAL '1 hours' * time_interval::NUMERIC)
|
||||
GROUP BY time_bucket
|
||||
ORDER BY time_bucket asc
|
||||
)
|
||||
SELECT jsonb_agg(history_table) INTO history_metrics FROM history_table;
|
||||
END
|
||||
$function$
|
||||
;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION api.monitoring_history_fn(in text, out jsonb) IS 'Export metrics from a time period 24h, 48h, 72h, 7d';
|
||||
|
||||
-- DROP FUNCTION public.cron_alerts_fn();
|
||||
-- Update cron_alerts_fn to check for alerts, filters out empty strings (""), so they are not included in the result.
|
||||
CREATE OR REPLACE FUNCTION public.cron_alerts_fn()
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
alert_rec record;
|
||||
default_last_metric TIMESTAMPTZ := NOW() - interval '1 day';
|
||||
last_metric TIMESTAMPTZ;
|
||||
metric_rec record;
|
||||
app_settings JSONB;
|
||||
user_settings JSONB;
|
||||
alerting JSONB;
|
||||
_alarms JSONB;
|
||||
alarms TEXT;
|
||||
alert_default JSONB := '{
|
||||
"low_pressure_threshold": 990,
|
||||
"high_wind_speed_threshold": 30,
|
||||
"low_water_depth_threshold": 1,
|
||||
"min_notification_interval": 6,
|
||||
"high_pressure_drop_threshold": 12,
|
||||
"low_battery_charge_threshold": 90,
|
||||
"low_battery_voltage_threshold": 12.5,
|
||||
"low_water_temperature_threshold": 10,
|
||||
"low_indoor_temperature_threshold": 7,
|
||||
"low_outdoor_temperature_threshold": 3
|
||||
}';
|
||||
BEGIN
|
||||
-- Check for new event notification pending update
|
||||
RAISE NOTICE 'cron_alerts_fn';
|
||||
FOR alert_rec in
|
||||
SELECT
|
||||
a.user_id,a.email,v.vessel_id,
|
||||
COALESCE((a.preferences->'alert_last_metric')::TEXT, default_last_metric::TEXT) as last_metric,
|
||||
(alert_default || ( -- Filters out empty strings (""), so they are not included in the result.
|
||||
SELECT jsonb_object_agg(key, value)
|
||||
FROM jsonb_each(a.preferences->'alerting')
|
||||
WHERE value <> '""'
|
||||
)) as alerting,
|
||||
(a.preferences->'alarms')::JSONB as alarms,
|
||||
m.configuration as config
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels AS v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata AS m ON m.vessel_id = v.vessel_id
|
||||
WHERE (a.preferences->'alerting'->'enabled')::boolean = True
|
||||
AND m.active = True
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_alerts_fn for [%]', alert_rec;
|
||||
PERFORM set_config('vessel.id', alert_rec.vessel_id, false);
|
||||
PERFORM set_config('user.email', alert_rec.email, false);
|
||||
--RAISE WARNING 'public.cron_process_alert_rec_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(alert_rec.vessel_id::TEXT);
|
||||
RAISE NOTICE '-> cron_alerts_fn checking user_settings [%]', user_settings;
|
||||
-- Get all metrics from the last last_metric avg by 5 minutes
|
||||
FOR metric_rec in
|
||||
SELECT time_bucket('5 minutes', m.time) AS time_bucket,
|
||||
avg(-- Inside Temperature
|
||||
COALESCE(
|
||||
mt.metrics->'temperature'->>'inside',
|
||||
mt.metrics->>(md.configuration->>'insideTemperatureKey'),
|
||||
mt.metrics->>'environment.inside.temperature'
|
||||
)::FLOAT) AS intemp,
|
||||
avg(-- Wind Speed True
|
||||
COALESCE(
|
||||
mt.metrics->'wind'->>'speed',
|
||||
mt.metrics->>(md.configuration->>'windSpeedKey'),
|
||||
mt.metrics->>'environment.wind.speedTrue'
|
||||
)::FLOAT) AS wind,
|
||||
avg(-- Water Depth
|
||||
COALESCE(
|
||||
mt.metrics->'water'->>'depth',
|
||||
mt.metrics->>(md.configuration->>'depthKey'),
|
||||
mt.metrics->>'environment.depth.belowTransducer'
|
||||
)::FLOAT) AS watdepth,
|
||||
avg(-- Outside Temperature
|
||||
COALESCE(
|
||||
m.metrics->'temperature'->>'outside',
|
||||
m.metrics->>(alert_rec.config->>'outsideTemperatureKey'),
|
||||
m.metrics->>'environment.outside.temperature'
|
||||
)::NUMERIC) AS outtemp,
|
||||
avg(-- Water Temperature
|
||||
COALESCE(
|
||||
m.metrics->'water'->>'temperature',
|
||||
m.metrics->>(alert_rec.config->>'waterTemperatureKey'),
|
||||
m.metrics->>'environment.water.temperature'
|
||||
)::NUMERIC) AS wattemp,
|
||||
avg(-- Outside Pressure
|
||||
COALESCE(
|
||||
m.metrics->'pressure'->>'outside',
|
||||
m.metrics->>(alert_rec.config->>'outsidePressureKey'),
|
||||
m.metrics->>'environment.outside.pressure'
|
||||
)::NUMERIC) AS pressure,
|
||||
avg(-- Battery Voltage
|
||||
COALESCE(
|
||||
m.metrics->'battery'->>'voltage',
|
||||
m.metrics->>(alert_rec.config->>'voltageKey'),
|
||||
m.metrics->>'electrical.batteries.House.voltage'
|
||||
)::NUMERIC) AS voltage,
|
||||
avg(-- Battery Charge (State of Charge)
|
||||
COALESCE(
|
||||
m.metrics->'battery'->>'charge',
|
||||
m.metrics->>(alert_rec.config->>'stateOfChargeKey'),
|
||||
m.metrics->>'electrical.batteries.House.capacity.stateOfCharge'
|
||||
)::NUMERIC) AS charge
|
||||
FROM api.metrics m
|
||||
WHERE vessel_id = alert_rec.vessel_id
|
||||
AND m.time >= alert_rec.last_metric::TIMESTAMPTZ
|
||||
GROUP BY time_bucket
|
||||
ORDER BY time_bucket ASC LIMIT 100
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_alerts_fn checking metrics [%]', metric_rec;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking alerting [%]', alert_rec.alerting;
|
||||
--RAISE NOTICE '-> cron_alerts_fn checking debug [%] [%]', kelvinToCel(metric_rec.intemp), (alert_rec.alerting->'low_indoor_temperature_threshold');
|
||||
IF metric_rec.intemp IS NOT NULL AND public.kelvintocel(metric_rec.intemp::NUMERIC) < (alert_rec.alerting->'low_indoor_temperature_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug indoor_temp [%]', (alert_rec.alarms->'low_indoor_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug indoor_temp [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_indoor_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_indoor_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_indoor_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.intemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_outdoor_temperature_threshold value:'|| kelvinToCel(metric_rec.intemp) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_indoor_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_indoor_temperature_threshold';
|
||||
END IF;
|
||||
IF metric_rec.outtemp IS NOT NULL AND public.kelvintocel(metric_rec.outtemp::NUMERIC) < (alert_rec.alerting->>'low_outdoor_temperature_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug outdoor_temp [%]', (alert_rec.alarms->'low_outdoor_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug outdoor_temp [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_outdoor_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_outdoor_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_outdoor_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.outtemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_outdoor_temperature_threshold value:'|| kelvinToCel(metric_rec.outtemp) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_outdoor_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_outdoor_temperature_threshold';
|
||||
END IF;
|
||||
IF metric_rec.wattemp IS NOT NULL AND public.kelvintocel(metric_rec.wattemp::NUMERIC) < (alert_rec.alerting->>'low_water_temperature_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug water_temp [%]', (alert_rec.alarms->'low_water_temperature_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug water_temp [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_water_temperature_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_water_temperature_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_water_temperature_threshold": {"value": '|| kelvinToCel(metric_rec.wattemp) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_water_temperature_threshold value:'|| kelvinToCel(metric_rec.wattemp) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_temperature_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_temperature_threshold';
|
||||
END IF;
|
||||
IF metric_rec.watdepth IS NOT NULL AND metric_rec.watdepth::NUMERIC < (alert_rec.alerting->'low_water_depth_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug water_depth [%]', (alert_rec.alarms->'low_water_depth_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug water_depth [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_water_depth_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_water_depth_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_water_depth_threshold": {"value": '|| metric_rec.watdepth ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_water_depth_threshold value:'|| ROUND(metric_rec.watdepth,2) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_depth_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_water_depth_threshold';
|
||||
END IF;
|
||||
if metric_rec.pressure IS NOT NULL AND metric_rec.pressure::NUMERIC < (alert_rec.alerting->'high_pressure_drop_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug pressure [%]', (alert_rec.alarms->'high_pressure_drop_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug pressure [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'high_pressure_drop_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'high_pressure_drop_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"high_pressure_drop_threshold": {"value": '|| metric_rec.pressure ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "high_pressure_drop_threshold value:'|| ROUND(metric_rec.pressure,2) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_pressure_drop_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_pressure_drop_threshold';
|
||||
END IF;
|
||||
IF metric_rec.wind IS NOT NULL AND metric_rec.wind::NUMERIC > (alert_rec.alerting->'high_wind_speed_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug wind [%]', (alert_rec.alarms->'high_wind_speed_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug wind [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'high_wind_speed_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'high_wind_speed_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"high_wind_speed_threshold": {"value": '|| metric_rec.wind ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "high_wind_speed_threshold value:'|| ROUND(metric_rec.wind,2) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_wind_speed_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug high_wind_speed_threshold';
|
||||
END IF;
|
||||
IF metric_rec.voltage IS NOT NULL AND metric_rec.voltage::NUMERIC < (alert_rec.alerting->'low_battery_voltage_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug voltage [%]', (alert_rec.alarms->'low_battery_voltage_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug voltage [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_battery_voltage_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_battery_voltage_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_battery_voltage_threshold": {"value": '|| metric_rec.voltage ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_battery_voltage_threshold value:'|| ROUND(metric_rec.voltage,2) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_voltage_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_voltage_threshold';
|
||||
END IF;
|
||||
IF metric_rec.charge IS NOT NULL AND (metric_rec.charge::NUMERIC*100) < (alert_rec.alerting->'low_battery_charge_threshold')::NUMERIC then
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', (alert_rec.alarms->'low_battery_charge_threshold'->>'date')::TIMESTAMPTZ;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug [%]', metric_rec.time_bucket::TIMESTAMPTZ;
|
||||
-- Get latest alarms
|
||||
SELECT preferences->'alarms' INTO _alarms FROM auth.accounts a WHERE a.email = current_setting('user.email', false);
|
||||
-- Is alarm in the min_notification_interval time frame
|
||||
IF (
|
||||
((_alarms->'low_battery_charge_threshold'->>'date') IS NULL) OR
|
||||
(((_alarms->'low_battery_charge_threshold'->>'date')::TIMESTAMPTZ
|
||||
+ ((interval '1 hour') * (alert_rec.alerting->>'min_notification_interval')::NUMERIC))
|
||||
< metric_rec.time_bucket::TIMESTAMPTZ)
|
||||
) THEN
|
||||
-- Add alarm
|
||||
alarms := '{"low_battery_charge_threshold": {"value": '|| (metric_rec.charge*100) ||', "date":"' || metric_rec.time_bucket || '"}}';
|
||||
-- Merge alarms
|
||||
SELECT public.jsonb_recursive_merge(_alarms::jsonb, alarms::jsonb) into _alarms;
|
||||
-- Update alarms for user
|
||||
PERFORM api.update_user_preferences_fn('{alarms}'::TEXT, _alarms::TEXT);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(current_setting('vessel.id', false));
|
||||
SELECT user_settings::JSONB || ('{"alert": "low_battery_charge_threshold value:'|| ROUND(metric_rec.charge::NUMERIC*100,2) ||' date:'|| metric_rec.time_bucket ||' "}'::text)::JSONB into user_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('alert'::TEXT, user_settings::JSONB);
|
||||
-- DEBUG
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_charge_threshold +interval';
|
||||
END IF;
|
||||
RAISE NOTICE '-> cron_alerts_fn checking debug low_battery_charge_threshold';
|
||||
END IF;
|
||||
-- Record last metrics time
|
||||
SELECT metric_rec.time_bucket INTO last_metric;
|
||||
END LOOP;
|
||||
PERFORM api.update_user_preferences_fn('{alert_last_metric}'::TEXT, last_metric::TEXT);
|
||||
END LOOP;
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION public.cron_alerts_fn() IS 'init by pg_cron to check for alerts';
|
||||
|
||||
-- DROP FUNCTION public.process_pre_logbook_fn(int4);
|
||||
-- Update process_pre_logbook_fn to detect and avoid logbook we more than 1000NM in less 15h
|
||||
CREATE OR REPLACE FUNCTION public.process_pre_logbook_fn(_id integer)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $function$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
avg_rec record;
|
||||
geo_rec record;
|
||||
_invalid_time boolean;
|
||||
_invalid_interval boolean;
|
||||
_invalid_distance boolean;
|
||||
_invalid_ratio boolean;
|
||||
count_metric numeric;
|
||||
previous_stays_id numeric;
|
||||
current_stays_departed text;
|
||||
current_stays_id numeric;
|
||||
current_stays_active boolean;
|
||||
timebucket boolean;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> process_pre_logbook_fn invalid input %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> process_pre_logbook_fn invalid logbook %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.process_logbook_queue_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
|
||||
-- Check if all metrics are within 50meters base on geo loc
|
||||
count_metric := logbook_metrics_dwithin_fn(logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT, logbook_rec._from_lng::NUMERIC, logbook_rec._from_lat::NUMERIC);
|
||||
RAISE NOTICE '-> process_pre_logbook_fn logbook_metrics_dwithin_fn count:[%]', count_metric;
|
||||
|
||||
-- Calculate logbook data average and geo
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
geo_rec := logbook_update_geom_distance_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
-- Avoid/ignore/delete logbook stationary movement or time sync issue
|
||||
-- Check time start vs end
|
||||
SELECT logbook_rec._to_time::TIMESTAMPTZ < logbook_rec._from_time::TIMESTAMPTZ INTO _invalid_time;
|
||||
-- Is distance is less than 0.010
|
||||
SELECT geo_rec._track_distance < 0.010 INTO _invalid_distance;
|
||||
-- Is duration is less than 100sec
|
||||
SELECT (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ) < (100::text||' secs')::interval INTO _invalid_interval;
|
||||
-- If we have more than 800NM in less 15h
|
||||
IF geo_rec._track_distance >= 800 AND (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ) < (15::text||' hours')::interval THEN
|
||||
_invalid_distance := True;
|
||||
_invalid_interval := True;
|
||||
--RAISE NOTICE '-> process_pre_logbook_fn invalid logbook data id [%], _invalid_distance [%], _invalid_interval [%]', logbook_rec.id, _invalid_distance, _invalid_interval;
|
||||
END IF;
|
||||
-- If we have less than 20 metrics or less than 0.5NM or less than avg 0.5knts
|
||||
-- Is within metrics represent more or equal than 60% of the total entry
|
||||
IF count_metric::NUMERIC <= 20 OR geo_rec._track_distance < 0.5 OR avg_rec.avg_speed < 0.5 THEN
|
||||
SELECT (count_metric::NUMERIC / avg_rec.count_metric::NUMERIC) >= 0.60 INTO _invalid_ratio;
|
||||
END IF;
|
||||
-- if stationary fix data metrics,logbook,stays,moorage
|
||||
IF _invalid_time IS True OR _invalid_distance IS True
|
||||
OR _invalid_interval IS True OR count_metric = avg_rec.count_metric
|
||||
OR _invalid_ratio IS True
|
||||
OR avg_rec.count_metric <= 3 THEN
|
||||
RAISE NOTICE '-> process_pre_logbook_fn invalid logbook data id [%], _invalid_time [%], _invalid_distance [%], _invalid_interval [%], count_metric_in_zone [%], count_metric_log [%], _invalid_ratio [%]',
|
||||
logbook_rec.id, _invalid_time, _invalid_distance, _invalid_interval, count_metric, avg_rec.count_metric, _invalid_ratio;
|
||||
-- Update metrics status to moored
|
||||
UPDATE api.metrics
|
||||
SET status = 'moored'
|
||||
WHERE time >= logbook_rec._from_time::TIMESTAMPTZ
|
||||
AND time <= logbook_rec._to_time::TIMESTAMPTZ
|
||||
AND vessel_id = current_setting('vessel.id', false);
|
||||
-- Update logbook
|
||||
UPDATE api.logbook
|
||||
SET notes = 'invalid logbook data, stationary need to fix metrics?'
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = logbook_rec.id;
|
||||
-- Get related stays
|
||||
SELECT id,departed,active INTO current_stays_id,current_stays_departed,current_stays_active
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived = logbook_rec._to_time::TIMESTAMPTZ;
|
||||
-- Update related stays
|
||||
UPDATE api.stays s
|
||||
SET notes = 'invalid stays data, stationary need to fix metrics?'
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND arrived = logbook_rec._to_time::TIMESTAMPTZ;
|
||||
-- Find previous stays
|
||||
SELECT id INTO previous_stays_id
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived < logbook_rec._to_time::TIMESTAMPTZ
|
||||
ORDER BY s.arrived DESC LIMIT 1;
|
||||
-- Update previous stays with the departed time from current stays
|
||||
-- and set the active state from current stays
|
||||
UPDATE api.stays
|
||||
SET departed = current_stays_departed::TIMESTAMPTZ,
|
||||
active = current_stays_active
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = previous_stays_id;
|
||||
-- Clean up, remove invalid logbook and stay entry
|
||||
DELETE FROM api.logbook WHERE id = logbook_rec.id;
|
||||
RAISE WARNING '-> process_pre_logbook_fn delete invalid logbook [%]', logbook_rec.id;
|
||||
DELETE FROM api.stays WHERE id = current_stays_id;
|
||||
RAISE WARNING '-> process_pre_logbook_fn delete invalid stays [%]', current_stays_id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
--IF (logbook_rec.notes IS NULL) THEN -- run one time only
|
||||
-- -- If duration is over 24h or number of entry is over 400, check for stays and potential multiple logs with stationary location
|
||||
-- IF (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ) > INTERVAL '24 hours'
|
||||
-- OR avg_rec.count_metric > 400 THEN
|
||||
-- timebucket := public.logbook_metrics_timebucket_fn('15 minutes'::TEXT, logbook_rec.id, logbook_rec._from_time::TIMESTAMPTZ, logbook_rec._to_time::TIMESTAMPTZ);
|
||||
-- -- If true exit current process as the current logbook need to be re-process.
|
||||
-- IF timebucket IS True THEN
|
||||
-- RETURN;
|
||||
-- END IF;
|
||||
-- ELSE
|
||||
-- timebucket := public.logbook_metrics_timebucket_fn('5 minutes'::TEXT, logbook_rec.id, logbook_rec._from_time::TIMESTAMPTZ, logbook_rec._to_time::TIMESTAMPTZ);
|
||||
-- -- If true exit current process as the current logbook need to be re-process.
|
||||
-- IF timebucket IS True THEN
|
||||
-- RETURN;
|
||||
-- END IF;
|
||||
-- END IF;
|
||||
--END IF;
|
||||
|
||||
-- Add logbook entry to process queue for later processing
|
||||
INSERT INTO process_queue (channel, payload, stored, ref_id)
|
||||
VALUES ('new_logbook', logbook_rec.id, NOW(), current_setting('vessel.id', true));
|
||||
|
||||
END;
|
||||
$function$
|
||||
;
|
||||
|
||||
COMMENT ON FUNCTION public.process_pre_logbook_fn(int4) IS 'Detect/Avoid/ignore/delete logbook stationary movement or time sync issue';
|
||||
|
||||
-- Revoke security definer
|
||||
--ALTER FUNCTION api.update_logbook_observations_fn(_id integer, observations text) SECURITY INVOKER;
|
||||
--ALTER FUNCTION api.delete_logbook_fn(_id integer) SECURITY INVOKER;
|
||||
ALTER FUNCTION api.merge_logbook_fn(_id integer, _id integer) SECURITY INVOKER;
|
||||
|
||||
GRANT DELETE ON TABLE public.process_queue TO user_role;
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO user_role;
|
||||
|
||||
GRANT UPDATE (status) ON api.metrics TO user_role;
|
||||
GRANT UPDATE ON api.logbook TO user_role;
|
||||
|
||||
DROP POLICY IF EXISTS api_user_role ON api.metrics;
|
||||
CREATE POLICY api_user_role ON api.metrics TO user_role
|
||||
USING (vessel_id = current_setting('vessel.id', false))
|
||||
WITH CHECK (vessel_id = current_setting('vessel.id', false));
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.9.3'
|
||||
WHERE "name"='app.version';
|
||||
|
||||
--\c postgres
|
||||
--UPDATE cron.job SET username = 'scheduler'; -- Update to scheduler
|
||||
--UPDATE cron.job SET username = current_user WHERE jobname = 'cron_vacuum'; -- Update to superuser for vacuum permissions
|
||||
--UPDATE cron.job SET username = current_user WHERE jobname = 'job_run_details_cleanup';
|
@@ -19,6 +19,8 @@ INSERT INTO app_settings (name, value) VALUES
|
||||
('app.telegram_bot_token', '${PGSAIL_TELEGRAM_BOT_TOKEN}'),
|
||||
('app.grafana_admin_uri', '${PGSAIL_GRAFANA_ADMIN_URI}'),
|
||||
('app.keycloak_uri', '${PGSAIL_KEYCLOAK_URI}'),
|
||||
('app.gis_url', '${PGSAIL_QGIS_URL}'),
|
||||
('app.videos_url', '${PGSAIL_VIDEOS_URL}'),
|
||||
('app.url', '${PGSAIL_APP_URL}'),
|
||||
('app.version', '${PGSAIL_VERSION}');
|
||||
-- Update comment with version
|
||||
@@ -27,4 +29,8 @@ COMMENT ON DATABASE signalk IS 'PostgSail version ${PGSAIL_VERSION}';
|
||||
ALTER ROLE authenticator WITH PASSWORD '${PGSAIL_AUTHENTICATOR_PASSWORD}';
|
||||
ALTER ROLE grafana WITH PASSWORD '${PGSAIL_GRAFANA_PASSWORD}';
|
||||
ALTER ROLE grafana_auth WITH PASSWORD '${PGSAIL_GRAFANA_AUTH_PASSWORD}';
|
||||
ALTER ROLE qgis_role WITH PASSWORD '${PGSAIL_GRAFANA_AUTH_PASSWORD}';
|
||||
ALTER ROLE maplapse_role WITH PASSWORD '${PGSAIL_GRAFANA_AUTH_PASSWORD}';
|
||||
END
|
||||
|
||||
curl -s -XPOST -Hx-pgsail:${PGSAIL_VERSION} https://api.openplotter.cloud/rpc/telemetry_fn
|
||||
|
@@ -1 +1 @@
|
||||
0.7.1
|
||||
0.9.3
|
||||
|
File diff suppressed because one or more lines are too long
@@ -1,5 +1,5 @@
|
||||
# PostgSail Unit Tests
|
||||
The Unit Tests allow to automatically validate api workflow.
|
||||
The Unit Tests allow to automatically validate SQL and API workflow.
|
||||
|
||||
## A global overview
|
||||
Based on `mocha` & `psql`
|
||||
@@ -13,6 +13,6 @@ $ bash tests.sh
|
||||
|
||||
## docker
|
||||
```bash
|
||||
$ docker-compose up -d db && sleep 15 && docker-compose up -d api && sleep 5
|
||||
$ docker-compose -f docker-compose.dev.yml -f docker-compose.yml up tests
|
||||
$ docker compose up -d db && sleep 15 && docker compose up -d api && sleep 5
|
||||
$ docker compose -f docker-compose.dev.yml -f docker-compose.yml up tests
|
||||
```
|
127
tests/index.js
127
tests/index.js
@@ -27,6 +27,7 @@ const metrics_aava = require('./metrics_sample_aava.json');
|
||||
|
||||
const fs = require('fs');
|
||||
|
||||
let configtime = new Date().toISOString();
|
||||
|
||||
// CNAMEs Array
|
||||
[
|
||||
@@ -39,7 +40,7 @@ const fs = require('fs');
|
||||
vessel_metadata: {
|
||||
name: "kapla",
|
||||
mmsi: "123456789",
|
||||
client_id: "vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd",
|
||||
//client_id: "vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd",
|
||||
length: "12",
|
||||
beam: "10",
|
||||
height: "24",
|
||||
@@ -59,8 +60,8 @@ const fs = require('fs');
|
||||
user_views: [
|
||||
// not processed yet, { url: '/stays_view', res_body_length: 1},
|
||||
// not processed yet, { url: '/moorages_view', res_body_length: 1},
|
||||
{ url: '/logs_view', res_body_length: 0},
|
||||
{ url: '/log_view', res_body_length: 2},
|
||||
{ url: '/logs_view', res_body_length: 0}, // not processed yet so empty
|
||||
{ url: '/log_view', res_body_length: 0}, // not processed yet so empty
|
||||
//{ url: '/stats_view', res_body_length: 1},
|
||||
{ url: '/vessels_view', res_body_length: 1},
|
||||
],
|
||||
@@ -89,7 +90,7 @@ const fs = require('fs');
|
||||
*/
|
||||
],
|
||||
user_fn: [
|
||||
{ url: '/rpc/timelapse_fn',
|
||||
{ url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
payload: {
|
||||
start_log: 1
|
||||
},
|
||||
@@ -97,7 +98,7 @@ const fs = require('fs');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_geojson_fn',
|
||||
{ url: '/rpc/export_logbook_geojson_trip_fn',
|
||||
payload: {
|
||||
_id: 1
|
||||
},
|
||||
@@ -105,7 +106,15 @@ const fs = require('fs');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_gpx_fn',
|
||||
{ url: '/rpc/export_logbook_gpx_trip_fn',
|
||||
payload: {
|
||||
_id: 1
|
||||
},
|
||||
res: {
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 1
|
||||
},
|
||||
@@ -169,6 +178,18 @@ const fs = require('fs');
|
||||
obj_name: 'settings'
|
||||
}
|
||||
}
|
||||
],
|
||||
config_fn: [
|
||||
{ url: '/metadata?select=configuration',
|
||||
res: {
|
||||
obj_name: 'configuration'
|
||||
}
|
||||
},
|
||||
{ url: `/metadata?select=configuration&configuration->>update_at=gt.${configtime}`,
|
||||
res: {
|
||||
obj_name: 'settings'
|
||||
}
|
||||
},
|
||||
]
|
||||
},
|
||||
{ cname: process.env.PGSAIL_API_URI, name: "PostgSail unit test, aava",
|
||||
@@ -178,8 +199,8 @@ const fs = require('fs');
|
||||
preferences: { key: '{email_notifications}', value: false }, /* Disable email_notifications */
|
||||
vessel_metadata: {
|
||||
name: "aava",
|
||||
mmsi: "787654321",
|
||||
client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
mmsi: "n/a",
|
||||
//client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
length: "12",
|
||||
beam: "10",
|
||||
height: "24",
|
||||
@@ -198,8 +219,8 @@ const fs = require('fs');
|
||||
user_views: [
|
||||
// not processed yet, { url: '/stays_view', res_body_length: 1},
|
||||
// not processed yet, { url: '/moorages_view', res_body_length: 1},
|
||||
{ url: '/logs_view', res_body_length: 0},
|
||||
{ url: '/log_view', res_body_length: 1},
|
||||
{ url: '/logs_view', res_body_length: 0}, // not processed yet so empty
|
||||
{ url: '/log_view', res_body_length: 0}, // not processed yet so empty
|
||||
//{ url: '/stats_view', res_body_length: 1},
|
||||
{ url: '/vessels_view', res_body_length: 1},
|
||||
],
|
||||
@@ -228,15 +249,15 @@ const fs = require('fs');
|
||||
*/
|
||||
],
|
||||
user_fn: [
|
||||
{ url: '/rpc/timelapse_fn',
|
||||
{ url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
payload: {
|
||||
start_log: 3
|
||||
start_log: 1
|
||||
},
|
||||
res: {
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_geojson_fn',
|
||||
{ url: '/rpc/export_logbook_geojson_trip_fn',
|
||||
payload: {
|
||||
_id: 3
|
||||
},
|
||||
@@ -244,7 +265,15 @@ const fs = require('fs');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_gpx_fn',
|
||||
{ url: '/rpc/export_logbook_gpx_trip_fn',
|
||||
payload: {
|
||||
_id: 3
|
||||
},
|
||||
res: {
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 3
|
||||
},
|
||||
@@ -302,6 +331,30 @@ const fs = require('fs');
|
||||
obj_name: 'settings'
|
||||
}
|
||||
},
|
||||
],
|
||||
config_fn: [
|
||||
{ url: '/metadata?select=configuration',
|
||||
res: {
|
||||
obj_name: 'configuration'
|
||||
}
|
||||
},
|
||||
{ url: `/metadata?select=configuration&configuration->>update_at=gt.${configtime}`,
|
||||
res: {
|
||||
obj_name: 'settings'
|
||||
}
|
||||
},
|
||||
],
|
||||
meta_ext_fn: [
|
||||
{ url: '/metadata_ext?',
|
||||
res: {
|
||||
obj_name: 'configuration'
|
||||
}
|
||||
},
|
||||
{ url: `/metadata_ext?`,
|
||||
res: {
|
||||
obj_name: 'image'
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
].forEach( function(test){
|
||||
@@ -580,7 +633,7 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
.set('Authorization', `Bearer ${vessel_jwt}`)
|
||||
.set('Accept', 'application/json')
|
||||
.set('Content-Type', 'application/json')
|
||||
.set('Prefer', 'return=headers-only,resolution=merge-duplicates')
|
||||
.set('Prefer', 'missing=default,return=headers-only,resolution=merge-duplicates')
|
||||
.end(function(err,res){
|
||||
res.status.should.equal(201);
|
||||
//console.log(res.header);
|
||||
@@ -595,14 +648,15 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
describe("Vessel POST metrics, JWT vessel_role", function(){
|
||||
|
||||
let data = [];
|
||||
//console.log(vessel_metrics['metrics'][0]);
|
||||
//console.log(test.vessel_metrics['metrics'][0]);
|
||||
let i;
|
||||
for (i = 0; i < test.vessel_metrics['metrics'].length; i++) {
|
||||
data[i] = test.vessel_metrics['metrics'][i];
|
||||
// Override time, -2h to allow to new data later without delay.
|
||||
data[i]['time'] = moment.utc().subtract(2, 'hours').add(i, 'minutes').format();
|
||||
data[i]['time'] = moment.utc().subtract(1, 'day').add(i, 'minutes').format();
|
||||
// Override client_id
|
||||
data[i]['client_id'] = test.vessel_metadata.client_id;
|
||||
//data[i]['client_id'] = test.vessel_metadata.client_id;
|
||||
data[i]['client_id'] = null;
|
||||
}
|
||||
// The last entry are invalid and should be ignore.
|
||||
// - Invalid status
|
||||
@@ -611,6 +665,11 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
// Force last valid entry to be back in time from previous, it should be ignore silently
|
||||
data.at(-1).time = moment.utc(data.at(-3).time).subtract(1, 'minutes').format();
|
||||
//console.log(data[0]);
|
||||
// Force the -2 entry to be in the future add 1 year, it should be ignore silently
|
||||
data.splice(i-2, 1, data.at(-2))
|
||||
data.at(-3).time = moment.utc(data.at(-3).time).add(1, 'year').format();
|
||||
//console.log(data.at(-2));
|
||||
//console.log(data.at(-1));
|
||||
|
||||
it('/metrics?select=time', function(done) {
|
||||
request = supertest.agent(test.cname);
|
||||
@@ -629,6 +688,7 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
res.header['content-type'].should.match(new RegExp('json','g'));
|
||||
res.header['server'].should.match(new RegExp('postgrest','g'));
|
||||
should.exist(res.body);
|
||||
//console.log(res.body);
|
||||
res.body.length.should.match(test.vessel_metrics['metrics'].length-3);
|
||||
done(err);
|
||||
});
|
||||
@@ -821,6 +881,37 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
}); // Function OTP endpoint
|
||||
*/
|
||||
|
||||
describe("Function Metadata configuration endpoint, JWT vessel_role", function(){
|
||||
|
||||
let otp = null;
|
||||
test.config_fn.forEach(function (subtest) {
|
||||
it(`${subtest.url}`, function(done) {
|
||||
try {
|
||||
//console.log(`${subtest.url} ${subtest.res}`);
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(subtest.url)
|
||||
.set('Authorization', `Bearer ${vessel_jwt}`)
|
||||
.set('Accept', 'application/json')
|
||||
.end(function(err,res){
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header['content-type']);
|
||||
should.exist(res.header['server']);
|
||||
res.header['content-type'].should.match(new RegExp('json','g'));
|
||||
res.header['server'].should.match(new RegExp('postgrest','g'));
|
||||
console.log(res.body);
|
||||
should.exist(res.body);
|
||||
done(err);
|
||||
});
|
||||
}
|
||||
catch (error) {
|
||||
done();
|
||||
}
|
||||
});
|
||||
});
|
||||
}); // Function metadata configuration endpoint
|
||||
|
||||
}); // OpenAPI description
|
||||
|
||||
}); // CNAMEs Array
|
||||
|
@@ -6,7 +6,7 @@
|
||||
*
|
||||
* npm install supertest should mocha mochawesome moment
|
||||
* alias mocha="./node_modules/mocha/bin/_mocha"
|
||||
* mocha index.js --reporter mochawesome --reporter-options reportDir=/mnt/postgsail/,reportFilename=report_api.html
|
||||
* mocha index2.js --reporter mochawesome --reporter-options reportDir=/mnt/postgsail/,reportFilename=report_api.html
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -28,14 +28,15 @@ const metrics_simulator = require('./metrics_sample_simulator.json');
|
||||
vessel_metadata: {
|
||||
name: "aava",
|
||||
mmsi: "787654321",
|
||||
client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
//client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
length: "12",
|
||||
beam: "10",
|
||||
height: "24",
|
||||
ship_type: "37",
|
||||
plugin_version: "1.0.2",
|
||||
signalk_version: "1.20.0",
|
||||
time: moment().subtract(69, 'minutes').format()
|
||||
time: moment().subtract(69, 'minutes').format(),
|
||||
available_keys: [],
|
||||
},
|
||||
vessel_metrics: metrics_simulator,
|
||||
user_tables: [
|
||||
@@ -48,7 +49,7 @@ const metrics_simulator = require('./metrics_sample_simulator.json');
|
||||
// not processed yet, { url: '/stays_view', res_body_length: 1},
|
||||
// not processed yet, { url: '/moorages_view', res_body_length: 1},
|
||||
{ url: '/logs_view', res_body_length: 1},
|
||||
{ url: '/log_view', res_body_length: 2},
|
||||
{ url: '/log_view', res_body_length: 0}, // not processed yet so empty
|
||||
//{ url: '/stats_view', res_body_length: 1},
|
||||
{ url: '/vessels_view', res_body_length: 1},
|
||||
],
|
||||
@@ -77,7 +78,7 @@ const metrics_simulator = require('./metrics_sample_simulator.json');
|
||||
*/
|
||||
],
|
||||
user_fn: [
|
||||
{ url: '/rpc/timelapse_fn',
|
||||
{ url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
payload: {
|
||||
start_log: 4
|
||||
},
|
||||
@@ -85,7 +86,7 @@ const metrics_simulator = require('./metrics_sample_simulator.json');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_geojson_fn',
|
||||
{ url: '/rpc/export_logbook_geojson_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
@@ -93,7 +94,15 @@ const metrics_simulator = require('./metrics_sample_simulator.json');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_gpx_fn',
|
||||
{ url: '/rpc/export_logbook_gpx_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
res: {
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
@@ -404,18 +413,19 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
|
||||
describe("Vessel POST metadata, JWT vessel_role", function(){
|
||||
|
||||
it('/metadata', function(done) {
|
||||
it('/metadata?on_conflict=vessel_id', function(done) {
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.post('/metadata')
|
||||
.post('/metadata?on_conflict=vessel_id')
|
||||
.send(test.vessel_metadata)
|
||||
.set('Authorization', `Bearer ${vessel_jwt}`)
|
||||
.set('Accept', 'application/json')
|
||||
.set('Content-Type', 'application/json')
|
||||
.set('Prefer', 'return=headers-only')
|
||||
.set('Prefer', 'missing=default,return=headers-only,resolution=merge-duplicates')
|
||||
.end(function(err,res){
|
||||
res.status.should.equal(201);
|
||||
//console.log(res.body);
|
||||
//console.log(res.header);
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header['server']);
|
||||
res.header['server'].should.match(new RegExp('postgrest','g'));
|
||||
done(err);
|
||||
@@ -432,11 +442,12 @@ request.set('User-Agent', 'PostgSail unit tests');
|
||||
for (i = 0; i < test.vessel_metrics['metrics'].length; i++) {
|
||||
data[i] = test.vessel_metrics['metrics'][i];
|
||||
// Override time, +1h because previous sample include 47 entry.
|
||||
data[i]['time'] = moment().add(1, 'hour').add(i, 'minutes').format();
|
||||
data[i]['time'] = moment.utc().subtract(2, 'hours').add(i, 'minutes').format();
|
||||
// Override client_id
|
||||
data[i]['client_id'] = test.vessel_metadata.client_id;
|
||||
//data[i]['client_id'] = test.vessel_metadata.client_id;
|
||||
data[i]['client_id'] = null;
|
||||
}
|
||||
console.log(data[0]);
|
||||
//console.log(data[0]);
|
||||
|
||||
it('/metrics?select=time', function(done) {
|
||||
request = supertest.agent(test.cname);
|
||||
|
@@ -6,7 +6,7 @@
|
||||
*
|
||||
* npm install supertest should mocha mochawesome moment
|
||||
* alias mocha="./node_modules/mocha/bin/_mocha"
|
||||
* mocha index.js --reporter mochawesome --reporter-options reportDir=/mnt/postgsail/,reportFilename=report_api.html
|
||||
* mocha index3.js --reporter mochawesome --reporter-options reportDir=/mnt/postgsail/,reportFilename=report_api.html
|
||||
*
|
||||
*/
|
||||
|
||||
@@ -31,7 +31,7 @@ var moment = require('moment');
|
||||
vessel_metadata: {
|
||||
name: "kapla",
|
||||
mmsi: "123456789",
|
||||
client_id: "vessels.urn:mrn:imo:mmsi:123456789",
|
||||
//client_id: "vessels.urn:mrn:imo:mmsi:123456789",
|
||||
length: "12",
|
||||
beam: "10",
|
||||
height: "24",
|
||||
@@ -49,7 +49,7 @@ var moment = require('moment');
|
||||
],
|
||||
user_views: [
|
||||
{ url: '/stays_view', res_body_length: 2},
|
||||
{ url: '/moorages_view', res_body_length: 2},
|
||||
{ url: '/moorages_view', res_body_length: 3},
|
||||
{ url: '/logs_view', res_body_length: 2},
|
||||
{ url: '/log_view', res_body_length: 2},
|
||||
//{ url: '/stats_view', res_body_length: 1},
|
||||
@@ -79,7 +79,8 @@ var moment = require('moment');
|
||||
}
|
||||
],
|
||||
user_fn: [
|
||||
{ url: '/rpc/timelapse_fn',
|
||||
{ //url: '/rpc/timelapse_fn',
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
payload: {
|
||||
start_log: 2
|
||||
},
|
||||
@@ -87,7 +88,17 @@ var moment = require('moment');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_geojson_fn',
|
||||
{ //url: '/rpc/timelapse_fn',
|
||||
url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
payload: {
|
||||
start_log: 2
|
||||
},
|
||||
res: {
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ //url: '/rpc/export_logbook_geojson_fn',
|
||||
url: '/rpc/export_logbook_geojson_trip_fn',
|
||||
payload: {
|
||||
_id: 2
|
||||
},
|
||||
@@ -95,7 +106,8 @@ var moment = require('moment');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_gpx_fn',
|
||||
{ //url: '/rpc/export_logbook_gpx_fn',
|
||||
url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 2
|
||||
},
|
||||
@@ -103,7 +115,8 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_kml_fn',
|
||||
{ //url: '/rpc/export_logbook_kml_fn',
|
||||
url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 2
|
||||
},
|
||||
@@ -123,6 +136,12 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_moorages_kml_fn',
|
||||
payload: {},
|
||||
res: {
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/find_log_from_moorage_fn',
|
||||
payload: {
|
||||
_id: 2
|
||||
@@ -230,7 +249,7 @@ var moment = require('moment');
|
||||
vessel_metadata: {
|
||||
name: "aava",
|
||||
mmsi: "787654321",
|
||||
client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
//client_id: "vessels.urn:mrn:imo:mmsi:787654321",
|
||||
length: "12",
|
||||
beam: "10",
|
||||
height: "24",
|
||||
@@ -247,7 +266,7 @@ var moment = require('moment');
|
||||
],
|
||||
user_views: [
|
||||
{ url: '/stays_view', res_body_length: 2},
|
||||
{ url: '/moorages_view', res_body_length: 2},
|
||||
{ url: '/moorages_view', res_body_length: 4},
|
||||
{ url: '/logs_view', res_body_length: 2},
|
||||
{ url: '/log_view', res_body_length: 2},
|
||||
//{ url: '/stats_view', res_body_length: 1},
|
||||
@@ -277,7 +296,8 @@ var moment = require('moment');
|
||||
}
|
||||
],
|
||||
user_fn: [
|
||||
{ url: '/rpc/timelapse_fn',
|
||||
{ //url: '/rpc/timelapse_fn',
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
payload: {
|
||||
start_log: 4
|
||||
},
|
||||
@@ -285,7 +305,17 @@ var moment = require('moment');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_geojson_fn',
|
||||
{ //url: '/rpc/timelapse_fn',
|
||||
url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
payload: {
|
||||
start_log: 4
|
||||
},
|
||||
res: {
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ //url: '/rpc/export_logbook_geojson_fn',
|
||||
url: '/rpc/export_logbook_geojson_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
@@ -293,7 +323,8 @@ var moment = require('moment');
|
||||
obj_name: 'geojson'
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_gpx_fn',
|
||||
{ //url: '/rpc/export_logbook_gpx_fn',
|
||||
url: '/rpc/export_logbook_gpx_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
@@ -301,7 +332,8 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbook_kml_fn',
|
||||
{ //url: '/rpc/export_logbook_kml_fn',
|
||||
url: '/rpc/export_logbook_kml_trip_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
},
|
||||
@@ -309,7 +341,8 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbooks_gpx_fn',
|
||||
{ //url: '/rpc/export_logbooks_gpx_fn',
|
||||
url: '/rpc/export_logbooks_kml_trips_fn',
|
||||
payload: {
|
||||
start_log: 3,
|
||||
end_log: 4
|
||||
@@ -318,7 +351,8 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_logbooks_kml_fn',
|
||||
{ //url: '/rpc/export_logbooks_kml_fn',
|
||||
url: '/rpc/export_logbooks_kml_trips_fn',
|
||||
payload: {
|
||||
start_log: 3,
|
||||
end_log: 4
|
||||
@@ -339,6 +373,12 @@ var moment = require('moment');
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/export_moorages_kml_fn',
|
||||
payload: {},
|
||||
res: {
|
||||
obj_name: null
|
||||
}
|
||||
},
|
||||
{ url: '/rpc/find_log_from_moorage_fn',
|
||||
payload: {
|
||||
_id: 4
|
||||
|
@@ -163,6 +163,10 @@ var moment = require("moment");
|
||||
url: "/rpc/update_user_preferences_fn",
|
||||
payload: { key: "{public_monitoring}", value: true },
|
||||
},
|
||||
{
|
||||
url: "/rpc/update_user_preferences_fn",
|
||||
payload: { key: "{public_timelapse}", value: true },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -684,8 +688,8 @@ var moment = require("moment");
|
||||
should.exist(res.body);
|
||||
let event = res.body;
|
||||
//console.log(event);
|
||||
// minimum events log for kapla & aava 13 + 4 email_otp = 17
|
||||
event.length.should.be.aboveOrEqual(13);
|
||||
// minimum events log per users 6 + 4 logs + OTP one per login
|
||||
event.length.should.be.aboveOrEqual(11);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
@@ -45,13 +45,22 @@ var moment = require("moment");
|
||||
res: {},
|
||||
},
|
||||
timelapse: {
|
||||
url: "/rpc/timelapse_fn",
|
||||
//url: "/rpc/timelapse_fn",
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("kapla,public_timelapse,1") },
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
timelapse_full: {
|
||||
url: "/rpc/timelapse_fn",
|
||||
//url: "/rpc/timelapse_fn",
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("kapla,public_timelapse,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
replay_full: {
|
||||
//url: "/rpc/timelapse_fn",
|
||||
url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("kapla,public_timelapse,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
@@ -69,7 +78,7 @@ var moment = require("moment");
|
||||
res: {},
|
||||
},
|
||||
export_gpx: {
|
||||
url: "/rpc/export_logbook_gpx_fn",
|
||||
url: "/rpc/export_logbook_gpx_trip_fn",
|
||||
header: { name: "x-is-public", value: btoa("kapla,public_logs,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
@@ -97,13 +106,21 @@ var moment = require("moment");
|
||||
res: {},
|
||||
},
|
||||
timelapse: {
|
||||
url: "/rpc/timelapse_fn",
|
||||
//url: "/rpc/timelapse_fn",
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("aava,public_timelapse,3") },
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
timelapse_full: {
|
||||
url: "/rpc/timelapse_fn",
|
||||
//url: "/rpc/timelapse_fn",
|
||||
url: '/rpc/export_logbooks_geojson_linestring_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("aava,public_timelapse,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
replay_full: {
|
||||
url: '/rpc/export_logbooks_geojson_point_trips_fn',
|
||||
header: { name: "x-is-public", value: btoa("aava,public_timelapse,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
@@ -121,7 +138,7 @@ var moment = require("moment");
|
||||
res: {},
|
||||
},
|
||||
export_gpx: {
|
||||
url: "/rpc/export_logbook_gpx_fn",
|
||||
url: "/rpc/export_logbook_gpx_trip_fn",
|
||||
header: { name: "x-is-public", value: btoa("aava,public_logs,0") },
|
||||
payload: null,
|
||||
res: {},
|
||||
@@ -134,7 +151,7 @@ var moment = require("moment");
|
||||
request.set("User-Agent", "PostgSail unit tests");
|
||||
|
||||
describe("With no JWT as api_anonymous", function () {
|
||||
it("/logs_view, api_anonymous no jwt token", function (done) {
|
||||
it("/logs_view, api_anonymous no jwt token, x-is-public header", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
@@ -142,7 +159,7 @@ var moment = require("moment");
|
||||
.set(test.logs.header.name, test.logs.header.value)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
res.status.should.equal(404);
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
@@ -150,7 +167,7 @@ var moment = require("moment");
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/log_view, api_anonymous no jwt token", function (done) {
|
||||
it("/log_view, api_anonymous no jwt token, x-is-public header", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
@@ -166,7 +183,7 @@ var moment = require("moment");
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/monitoring_view, api_anonymous no jwt token", function (done) {
|
||||
it("/monitoring_view, api_anonymous no jwt token, x-is-public header", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
@@ -183,7 +200,7 @@ var moment = require("moment");
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/rpc/timelapse_fn, api_anonymous no jwt token", function (done) {
|
||||
it("/rpc/export_logbooks_geojson_linestring_trips_fn, api_anonymous no jwt token, x-is-public header", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
@@ -192,11 +209,44 @@ var moment = require("moment");
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.text);
|
||||
res.status.should.equal(404); // return 404 as it is not enable in user settings.
|
||||
res.status.should.equal(200); // return 404 as it is not enable in user settings.
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
should.exist(res.body.geojson);
|
||||
/*
|
||||
if (res.body.geojson.features == null) { // aava
|
||||
//res.body.geojson.features.should.not.be.ok();
|
||||
done(err);
|
||||
}
|
||||
res.body.geojson.features.length.should.be.equal(4);
|
||||
*/
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/rpc/export_logbooks_geojson_point_trips_fn, api_anonymous no jwt token, x-is-public header", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.post(test.replay_full.url)
|
||||
.set(test.replay_full.header.name, test.replay_full.header.value)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.text);
|
||||
res.status.should.equal(200); // return 404 as it is not enable in user settings.
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
should.exist(res.body.geojson);
|
||||
/*
|
||||
if (res.body.geojson.features == null) { // aava
|
||||
//res.body.geojson.features.should.not.be.ok();
|
||||
done(err);
|
||||
}
|
||||
res.body.geojson.features.length.should.be.equal(53);
|
||||
*/
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
|
226
tests/index6.js
Normal file
226
tests/index6.js
Normal file
@@ -0,0 +1,226 @@
|
||||
"use strict";
|
||||
/*
|
||||
* Unit test #6
|
||||
* Public/Anonymous access
|
||||
*
|
||||
* process.env.PGSAIL_API_URI = from inside the docker
|
||||
*
|
||||
* npm install supertest should mocha mochawesome moment
|
||||
* alias mocha="./node_modules/mocha/bin/_mocha"
|
||||
* mocha index6.js --reporter mochawesome --reporter-options reportDir=/mnt/postgsail/,reportFilename=report_api.html
|
||||
*
|
||||
*/
|
||||
|
||||
const sleep = (ms) => new Promise((r) => setTimeout(r, ms));
|
||||
|
||||
const supertest = require("supertest");
|
||||
// Deprecated
|
||||
const should = require("should");
|
||||
//const chai = require("chai");
|
||||
//const should = chai.should();
|
||||
let request = null;
|
||||
var moment = require("moment");
|
||||
|
||||
// Users Array
|
||||
[
|
||||
{
|
||||
cname: process.env.PGSAIL_API_URI,
|
||||
name: "PostgSail unit test anonymous, no x-is-public header",
|
||||
moorages: {
|
||||
url: "/moorages_view",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
stays: {
|
||||
url: "/stays_view",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
logs: {
|
||||
url: "/logs_view",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
log: {
|
||||
url: "/log_view?id=eq.1",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
monitoring: {
|
||||
url: "/monitoring_view",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
timelapse: {
|
||||
url: "/rpc/export_logbooks_geojson_linestring_trips_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
timelapse_full: {
|
||||
url: "/rpc/export_logbooks_geojson_linestring_trips_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
replay_full: {
|
||||
url: "/rpc/export_logbooks_geojson_point_trips_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
stats_logs: {
|
||||
url: "/rpc/stats_logs_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
stats_stays: {
|
||||
url: "/rpc/stats_stay_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
export_gpx: {
|
||||
url: "/rpc/export_logbook_gpx_trip_fn",
|
||||
payload: null,
|
||||
res: {},
|
||||
},
|
||||
},
|
||||
].forEach(function (test) {
|
||||
//console.log(`${test.cname}`);
|
||||
describe(`${test.name}`, function () {
|
||||
request = supertest.agent(test.cname);
|
||||
request.set("User-Agent", "PostgSail unit tests");
|
||||
|
||||
describe("With no JWT as api_anonymous, no x-is-public", function () {
|
||||
it("/stays_view, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(test.stays.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
res.body.length.should.be.equal(0);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/moorages_view, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(test.log.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
res.body.length.should.be.equal(0);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/logs_view, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(test.logs.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
res.body.length.should.be.equal(0);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/log_view, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(test.log.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
res.body.length.should.be.equal(0);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/monitoring_view, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.get(test.monitoring.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.text);
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
res.body.length.should.be.equal(0);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/rpc/export_logbooks_geojson_linestring_trips_fn, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.post(test.timelapse.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.text);
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
should.exist(res.body.geojson);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/rpc/export_logbooks_geojson_point_trips_fn, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.post(test.replay_full.url)
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.body);
|
||||
res.status.should.equal(200);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
should.exist(res.body.geojson);
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
it("/rpc/export_logbook_gpx_trip_fn, api_anonymous no jwt token", function (done) {
|
||||
// Reset agent so we do not save cookies
|
||||
request = supertest.agent(test.cname);
|
||||
request
|
||||
.post(test.export_gpx.url)
|
||||
.send({_id: 1})
|
||||
.set("Accept", "application/json")
|
||||
.end(function (err, res) {
|
||||
console.log(res.text)
|
||||
res.status.should.equal(401);
|
||||
should.exist(res.header["content-type"]);
|
||||
should.exist(res.header["server"]);
|
||||
res.header["content-type"].should.match(new RegExp("json", "g"));
|
||||
res.header["server"].should.match(new RegExp("postgrest", "g"));
|
||||
done(err);
|
||||
});
|
||||
});
|
||||
}); // user JWT
|
||||
}); // OpenAPI description
|
||||
}); // Users Array
|
@@ -180,6 +180,18 @@
|
||||
"status" : "sailing",
|
||||
"metrics" : {"navigation.log": 17441766, "navigation.trip.log": 80747, "navigation.headingTrue": 3.5972, "navigation.gnss.satellites": 10, "environment.depth.belowKeel": 20.948999999999998, "navigation.magneticVariation": 0.1414, "navigation.speedThroughWater": 3.47, "environment.water.temperature": 313.15, "electrical.batteries.1.current": 192.4, "electrical.batteries.1.voltage": 14.56, "navigation.gnss.antennaAltitude": 0.39, "network.n2k.ngt-1.130356.errorID": 0, "network.n2k.ngt-1.130356.modelID": 14, "environment.depth.belowTransducer": 20.95, "electrical.batteries.1.temperature": 299.82, "environment.depth.transducerToKeel": -0.001, "navigation.gnss.horizontalDilution": 0.8, "network.n2k.ngt-1.130356.ch1.rxLoad": 4, "network.n2k.ngt-1.130356.ch1.txLoad": 0, "network.n2k.ngt-1.130356.ch2.rxLoad": 0, "network.n2k.ngt-1.130356.ch2.txLoad": 64, "network.n2k.ngt-1.130356.ch1.deleted": 0, "network.n2k.ngt-1.130356.ch2.deleted": 0, "network.n2k.ngt-1.130356.ch2Bandwidth": 3, "network.n2k.ngt-1.130356.ch1.bandwidth": 2, "network.n2k.ngt-1.130356.ch1.rxDropped": 0, "network.n2k.ngt-1.130356.ch2.rxDropped": 0, "network.n2k.ngt-1.130356.ch1.rxFiltered": 0, "network.n2k.ngt-1.130356.ch2.rxFiltered": 0, "network.n2k.ngt-1.130356.ch1.rxBandwidth": 4, "network.n2k.ngt-1.130356.ch1.txBandwidth": 0, "network.n2k.ngt-1.130356.ch2.rxBandwidth": 0, "network.n2k.ngt-1.130356.ch2.txBandwidth": 10, "network.n2k.ngt-1.130356.uniChannelCount": 2, "network.n2k.ngt-1.130356.indiChannelCount": 2, "network.n2k.ngt-1.130356.ch1.BufferLoading": 0, "network.n2k.ngt-1.130356.ch2.bufferLoading": 0, "network.n2k.ngt-1.130356.ch1.PointerLoading": 0, "network.n2k.ngt-1.130356.ch2.pointerLoading": 0}
|
||||
},
|
||||
{
|
||||
"time" : "2022-07-31T11:41:28.561Z",
|
||||
"client_id" : "vessels.urn:mrn:imo:mmsi:987654321",
|
||||
"latitude" : 59.7163052,
|
||||
"longitude" : 25.7325741,
|
||||
"speedoverground" : 9.5,
|
||||
"courseovergroundtrue" : 198.8,
|
||||
"windspeedapparent" : 18.0,
|
||||
"anglespeedapparent" : 41.0,
|
||||
"status" : "sailing",
|
||||
"metrics" : {"navigation.log": 17441766, "navigation.trip.log": 80747, "navigation.headingTrue": 3.5972, "navigation.gnss.satellites": 10, "environment.depth.belowKeel": 20.948999999999998, "navigation.magneticVariation": 0.1414, "navigation.speedThroughWater": 3.47, "environment.water.temperature": 313.15, "electrical.batteries.1.current": 192.4, "electrical.batteries.1.voltage": 14.56, "navigation.gnss.antennaAltitude": 0.39, "network.n2k.ngt-1.130356.errorID": 0, "network.n2k.ngt-1.130356.modelID": 14, "environment.depth.belowTransducer": 20.95, "electrical.batteries.1.temperature": 299.82, "environment.depth.transducerToKeel": -0.001, "navigation.gnss.horizontalDilution": 0.8, "network.n2k.ngt-1.130356.ch1.rxLoad": 4, "network.n2k.ngt-1.130356.ch1.txLoad": 0, "network.n2k.ngt-1.130356.ch2.rxLoad": 0, "network.n2k.ngt-1.130356.ch2.txLoad": 64, "network.n2k.ngt-1.130356.ch1.deleted": 0, "network.n2k.ngt-1.130356.ch2.deleted": 0, "network.n2k.ngt-1.130356.ch2Bandwidth": 3, "network.n2k.ngt-1.130356.ch1.bandwidth": 2, "network.n2k.ngt-1.130356.ch1.rxDropped": 0, "network.n2k.ngt-1.130356.ch2.rxDropped": 0, "network.n2k.ngt-1.130356.ch1.rxFiltered": 0, "network.n2k.ngt-1.130356.ch2.rxFiltered": 0, "network.n2k.ngt-1.130356.ch1.rxBandwidth": 4, "network.n2k.ngt-1.130356.ch1.txBandwidth": 0, "network.n2k.ngt-1.130356.ch2.rxBandwidth": 0, "network.n2k.ngt-1.130356.ch2.txBandwidth": 10, "network.n2k.ngt-1.130356.uniChannelCount": 2, "network.n2k.ngt-1.130356.indiChannelCount": 2, "network.n2k.ngt-1.130356.ch1.BufferLoading": 0, "network.n2k.ngt-1.130356.ch2.bufferLoading": 0, "network.n2k.ngt-1.130356.ch1.PointerLoading": 0, "network.n2k.ngt-1.130356.ch2.pointerLoading": 0}
|
||||
},
|
||||
{
|
||||
"time" : "2022-07-31T11:42:28.569Z",
|
||||
"client_id" : "vessels.urn:mrn:imo:mmsi:987654321",
|
||||
|
@@ -18,3 +18,10 @@ SELECT api.ispublic_fn('kapla', 'public_logs', 1);
|
||||
SELECT api.ispublic_fn('kapla', 'public_logs', 3);
|
||||
SELECT api.ispublic_fn('kapla', 'public_monitoring');
|
||||
SELECT api.ispublic_fn('kapla', 'public_timelapse');
|
||||
|
||||
SELECT api.ispublic_fn('aava', 'public_test');
|
||||
SELECT api.ispublic_fn('aava', 'public_logs_list');
|
||||
SELECT api.ispublic_fn('aava', 'public_logs', 1);
|
||||
SELECT api.ispublic_fn('aava', 'public_logs', 3);
|
||||
SELECT api.ispublic_fn('aava', 'public_monitoring');
|
||||
SELECT api.ispublic_fn('aava', 'public_timelapse');
|
@@ -21,6 +21,24 @@ ispublic_fn | f
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | t
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | t
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | f
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | f
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | f
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | t
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | t
|
||||
|
||||
-[ RECORD 1 ]--
|
||||
ispublic_fn | f
|
||||
|
||||
|
@@ -17,14 +17,19 @@ SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
\echo 'Insert new api.logbook for badges'
|
||||
INSERT INTO api.logbook
|
||||
(id, active, "name", "_from", "_from_lat", "_from_lng", "_to", "_to_lat", "_to_lng", track_geom, track_geog, track_geojson, "_from_time", "_to_time", distance, duration, avg_speed, max_speed, max_wind_speed, notes, vessel_id)
|
||||
(id, active, "name", "_from", "_from_lat", "_from_lng", "_to", "_to_lat", "_to_lng", trip, "_from_time", "_to_time", distance, duration, avg_speed, max_speed, max_wind_speed, notes, vessel_id)
|
||||
OVERRIDING SYSTEM VALUE VALUES
|
||||
(nextval('api.logbook_id_seq'), false, 'Tropics Zone', NULL, NULL, NULL, NULL, NULL, NULL, 'SRID=4326;LINESTRING (-63.151124640791096 14.01074681627324, -77.0912026418618 12.870995731013664)'::public.geometry, NULL, NULL, NOW(), NOW(), 123, NULL, NULL, NULL, NULL, NULL, current_setting('vessel.id', false)),
|
||||
(nextval('api.logbook_id_seq'), false, 'Alaska Zone', NULL, NULL, NULL, NULL, NULL, NULL, 'SRID=4326;LINESTRING (-143.5773697471158 59.4404631255976, -152.35402122385003 56.58243132943173)'::public.geometry, NULL, NULL, NOW(), NOW(), 1234, NULL, NULL, NULL, NULL, NULL, current_setting('vessel.id', false));
|
||||
(nextval('api.logbook_id_seq'), false, 'Tropics Zone', NULL, NULL, NULL, NULL, NULL, NULL, 'SRID=4326;[Point(-63.151124640791096 14.01074681627324)@2025-01-01, Point(-77.0912026418618 12.870995731013664)@2025-01-02]'::public.tgeogpoint, NOW(), NOW(), 123, NULL, NULL, NULL, NULL, NULL, current_setting('vessel.id', false)),
|
||||
(nextval('api.logbook_id_seq'), false, 'Alaska Zone', NULL, NULL, NULL, NULL, NULL, NULL, 'SRID=4326;[Point(-143.5773697471158 59.4404631255976)@2025-01-01, Point(-152.35402122385003 56.58243132943173)@2025-01-02]'::public.tgeogpoint, NOW(), NOW(), 1234, NULL, NULL, NULL, NULL, NULL, current_setting('vessel.id', false));
|
||||
|
||||
-- Transform static geometry LINESTRING to mobilitydb
|
||||
-- 'SRID=4326;LINESTRING (-63.151124640791096 14.01074681627324, -77.0912026418618 12.870995731013664)'::public.geometry
|
||||
-- 'SRID=4326;LINESTRING (-143.5773697471158 59.4404631255976, -152.35402122385003 56.58243132943173)'::public.geometry
|
||||
--SELECT ST_AsGeoJSON('SRID=4326;LINESTRING (-63.151124640791096 14.01074681627324, -77.0912026418618 12.870995731013664)'::public.geometry);
|
||||
--SELECT ST_AsGeoJSON(trajectory('SRID=4326;[Point(-63.151124640791096 14.01074681627324)@2025-01-01, Point(-77.0912026418618 12.870995731013664)@2025-01-02]'::public.tgeogpoint));
|
||||
|
||||
\echo 'Set config'
|
||||
SELECT set_config('user.email', 'demo+kapla@openplotter.cloud', false);
|
||||
--SELECT set_config('vessel.client_id', 'vessels.urn:mrn:imo:mmsi:123456789', false);
|
||||
|
||||
\echo 'Process badge'
|
||||
SELECT badges_logbook_fn(5,NOW()::TEXT);
|
||||
@@ -32,11 +37,10 @@ SELECT badges_logbook_fn(6,NOW()::TEXT);
|
||||
SELECT badges_geom_fn(5,NOW()::TEXT);
|
||||
SELECT badges_geom_fn(6,NOW()::TEXT);
|
||||
|
||||
\echo 'Check badges for user'
|
||||
\echo 'Check badges for all users'
|
||||
SELECT jsonb_object_keys ( a.preferences->'badges' ) FROM auth.accounts a;
|
||||
|
||||
\echo 'Check details from vessel_id kapla'
|
||||
--SELECT get_user_settings_from_vesselid_fn('vessels.urn:mrn:imo:mmsi:123456789'::TEXT);
|
||||
SELECT
|
||||
json_build_object(
|
||||
'boat', v.name,
|
||||
@@ -53,10 +57,10 @@ SELECT
|
||||
|
||||
\echo 'Insert new api.moorages for badges'
|
||||
INSERT INTO api.moorages
|
||||
(id,"name",country,stay_code,stay_duration,reference_count,latitude,longitude,geog,home_flag,notes,vessel_id)
|
||||
(id,"name",country,stay_code,latitude,longitude,geog,home_flag,notes,vessel_id)
|
||||
OVERRIDING SYSTEM VALUE VALUES
|
||||
(8,'Badge Mooring Pro',NULL,3,'11 days 00:39:56.418',1,NULL,NULL,NULL,false,'Badge Mooring Pro',current_setting('vessel.id', false)),
|
||||
(9,'Badge Anchormaster',NULL,2,'26 days 00:49:56.418',1,NULL,NULL,NULL,false,'Badge Anchormaster',current_setting('vessel.id', false));
|
||||
(8,'Badge Mooring Pro',NULL,3,NULL,NULL,NULL,false,'Badge Mooring Pro',current_setting('vessel.id', false)),
|
||||
(9,'Badge Anchormaster',NULL,2,NULL,NULL,NULL,false,'Badge Anchormaster',current_setting('vessel.id', false));
|
||||
|
||||
\echo 'Set config'
|
||||
SELECT set_config('user.email', 'demo+aava@openplotter.cloud', false);
|
||||
@@ -68,6 +72,9 @@ SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
\echo 'Process badge'
|
||||
SELECT badges_moorages_fn();
|
||||
|
||||
\echo 'Check badges for all users'
|
||||
SELECT jsonb_object_keys ( a.preferences->'badges' ) FROM auth.accounts a;
|
||||
|
||||
\echo 'Check details from vessel_id aava'
|
||||
--SELECT get_user_settings_from_vesselid_fn('vessels.urn:mrn:imo:mmsi:787654321'::TEXT);
|
||||
SELECT
|
||||
|
@@ -27,7 +27,7 @@ badges_geom_fn |
|
||||
-[ RECORD 1 ]--+-
|
||||
badges_geom_fn |
|
||||
|
||||
Check badges for user
|
||||
Check badges for all users
|
||||
-[ RECORD 1 ]-----+------------------
|
||||
jsonb_object_keys | Helmsman
|
||||
-[ RECORD 2 ]-----+------------------
|
||||
@@ -76,6 +76,38 @@ Process badge
|
||||
-[ RECORD 1 ]------+-
|
||||
badges_moorages_fn |
|
||||
|
||||
Check badges for all users
|
||||
-[ RECORD 1 ]-----+------------------
|
||||
jsonb_object_keys | Helmsman
|
||||
-[ RECORD 2 ]-----+------------------
|
||||
jsonb_object_keys | Wake Maker
|
||||
-[ RECORD 3 ]-----+------------------
|
||||
jsonb_object_keys | Balearic Sea
|
||||
-[ RECORD 4 ]-----+------------------
|
||||
jsonb_object_keys | Stormtrooper
|
||||
-[ RECORD 5 ]-----+------------------
|
||||
jsonb_object_keys | Gulf of Finland
|
||||
-[ RECORD 6 ]-----+------------------
|
||||
jsonb_object_keys | Helmsman
|
||||
-[ RECORD 7 ]-----+------------------
|
||||
jsonb_object_keys | Wake Maker
|
||||
-[ RECORD 8 ]-----+------------------
|
||||
jsonb_object_keys | Club Alaska
|
||||
-[ RECORD 9 ]-----+------------------
|
||||
jsonb_object_keys | Stormtrooper
|
||||
-[ RECORD 10 ]----+------------------
|
||||
jsonb_object_keys | Captain Award
|
||||
-[ RECORD 11 ]----+------------------
|
||||
jsonb_object_keys | Caribbean Sea
|
||||
-[ RECORD 12 ]----+------------------
|
||||
jsonb_object_keys | Gulf of Alaska
|
||||
-[ RECORD 13 ]----+------------------
|
||||
jsonb_object_keys | Gulf of Finland
|
||||
-[ RECORD 14 ]----+------------------
|
||||
jsonb_object_keys | Navigator Award
|
||||
-[ RECORD 15 ]----+------------------
|
||||
jsonb_object_keys | Tropical Traveler
|
||||
|
||||
Check details from vessel_id aava
|
||||
-[ RECORD 1 ]-+--------------------------------------------------------------------------------------------------------------
|
||||
user_settings | {"boat" : "aava", "recipient" : "first_aava", "email" : "demo+aava@openplotter.cloud", "pushover_key" : null}
|
||||
|
@@ -21,11 +21,15 @@ SELECT v.vessel_id as "vessel_id" FROM auth.vessels v WHERE v.owner_email = 'dem
|
||||
--\echo :"vessel_id"
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
|
||||
-- Test logbook for user
|
||||
\echo 'logbook'
|
||||
SELECT count(*) FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
\echo 'logbook'
|
||||
SELECT name,_from_time IS NOT NULL AS _from_time,_to_time IS NOT NULL AS _to_time, track_geojson IS NOT NULL AS track_geojson, track_geom, distance,duration,avg_speed,max_speed,max_wind_speed,notes,extra FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
--SELECT name,_from_time IS NOT NULL AS _from_time,_to_time IS NOT NULL AS _to_time, track_geojson IS NOT NULL AS track_geojson, trajectory(trip)::geometry as track_geom, distance,duration,round(avg_speed::NUMERIC,6),max_speed,max_wind_speed,notes,extra FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
SELECT name,_from_time IS NOT NULL AS _from_time,_to_time IS NOT NULL AS _to_time, api.export_logbook_geojson_trip_fn(id) IS NOT NULL AS track_geojson, trajectory(trip)::geometry as track_geom, distance,duration,round(avg_speed::NUMERIC,6),max_speed,max_wind_speed,notes,extra FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- Test stays for user
|
||||
\echo 'stays'
|
||||
@@ -37,7 +41,7 @@ SELECT active,name IS NOT NULL AS name,geog,stay_code FROM api.stays WHERE vesse
|
||||
\echo 'eventlogs_view'
|
||||
SELECT count(*) from api.eventlogs_view;
|
||||
|
||||
-- Test event logs view for user
|
||||
-- Test stats logs view for user
|
||||
\echo 'stats_logs_fn'
|
||||
SELECT api.stats_logs_fn(null, null) INTO stats_jsonb;
|
||||
SELECT stats_logs_fn->'name' AS name,
|
||||
@@ -64,11 +68,33 @@ SELECT extra FROM api.logbook l WHERE id = 1 AND vessel_id = current_setting('ve
|
||||
SELECT api.update_logbook_observations_fn(1, '{"observations":{"cloudCoverage":1}}'::TEXT);
|
||||
SELECT extra FROM api.logbook l WHERE id = 1 AND vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'add tags to logbook'
|
||||
SELECT extra FROM api.logbook l WHERE id = 1 AND vessel_id = current_setting('vessel.id', false);
|
||||
SELECT api.update_logbook_observations_fn(1, '{"tags": ["tag_name"]}'::TEXT);
|
||||
SELECT extra FROM api.logbook l WHERE id = 1 AND vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'Check logbook geojson LineString properties'
|
||||
WITH logbook_tbl AS (
|
||||
SELECT api.logbook_update_geojson_trip_fn(id) AS geojson
|
||||
FROM api.logbook WHERE id = 1 AND vessel_id = current_setting('vessel.id', false)
|
||||
)
|
||||
SELECT jsonb_object_keys(jsonb_path_query(geojson, '$.features[0].properties'))
|
||||
FROM logbook_tbl;
|
||||
\echo 'Check logbook geojson Point properties'
|
||||
WITH logbook_tbl AS (
|
||||
SELECT api.logbook_update_geojson_trip_fn(id) AS geojson
|
||||
FROM api.logbook WHERE id = 1 AND vessel_id = current_setting('vessel.id', false)
|
||||
)
|
||||
SELECT jsonb_object_keys(jsonb_path_query(geojson, '$.features[1].properties'))
|
||||
FROM logbook_tbl;
|
||||
|
||||
-- Check export
|
||||
--\echo 'check logbook export fn'
|
||||
\echo 'Check logbook export fn'
|
||||
--SELECT api.export_logbook_geojson_fn(1);
|
||||
--SELECT api.export_logbook_gpx_fn(1);
|
||||
--SELECT api.export_logbook_kml_fn(1);
|
||||
SELECT api.export_logbook_gpx_trip_fn(1) IS NOT NULL AS gpx_trip;
|
||||
SELECT api.export_logbook_kml_trip_fn(1) IS NOT NULL AS kml_trip;
|
||||
|
||||
-- Check history
|
||||
--\echo 'monitoring history fn'
|
||||
|
@@ -11,37 +11,38 @@ user_id | t
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
SET
|
||||
logbook
|
||||
-[ RECORD 1 ]
|
||||
count | 2
|
||||
|
||||
logbook
|
||||
-[ RECORD 1 ]--+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 1 ]--+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Pojoviken to Norra hamnen
|
||||
_from_time | t
|
||||
_to_time | t
|
||||
track_geojson | t
|
||||
track_geom | 0102000020E61000001C000000B0DEBBE0E68737404DA938FBF0094E40B0DEBBE0E68737404DA938FBF0094E4020D26F5F0786374030BB270F0B094E400C6E7ED60F843740AA60545227084E40D60FC48C03823740593CE27D42074E407B39D9F322803740984C158C4A064E4091ED7C3F357E3740898BB63D54054E40A8A1208B477C37404BA3DC9059044E404C5CB4EDA17A3740C4F856115B034E40A9A44E4013793740D8F0F44A59024E40E4839ECDAA773740211FF46C56014E405408D147067637408229F03B73004E40787AA52C43743740F90FE9B7AFFF4D40F8098D4D18723740C217265305FF4D4084E82303537037409A2D464AA0FE4D4022474DCE636F37402912396A72FE4D408351499D806E374088CFB02B40FE4D4076711B0DE06D3740B356C7040FFE4D404EAC66B0BC6E374058A835CD3BFE4D40D7A3703D0A6F3740D3E10EC15EFE4D4087602F277B6E3740A779C7293AFE4D4087602F277B6E3740A779C7293AFE4D402063EE5A426E3740B5A679C729FE4D40381DEE10EC6D37409ECA7C1A0AFE4D40E2C46A06CB6B37400A43F7BF36FD4D4075931804566E3740320BDAD125FD4D409A2D464AA06E37404A5658830AFD4D40029A081B9E6E37404A5658830AFD4D40
|
||||
track_geom | 0102000020E61000001A000000B0DEBBE0E68737404DA938FBF0094E4020D26F5F0786374030BB270F0B094E400C6E7ED60F843740AA60545227084E40D60FC48C03823740593CE27D42074E407B39D9F322803740984C158C4A064E4091ED7C3F357E3740898BB63D54054E40A8A1208B477C37404BA3DC9059044E404C5CB4EDA17A3740C4F856115B034E40A9A44E4013793740D8F0F44A59024E40E4839ECDAA773740211FF46C56014E405408D147067637408229F03B73004E40787AA52C43743740F90FE9B7AFFF4D40F8098D4D18723740C217265305FF4D4084E82303537037409A2D464AA0FE4D4022474DCE636F37402912396A72FE4D408351499D806E374088CFB02B40FE4D4076711B0DE06D3740B356C7040FFE4D404EAC66B0BC6E374058A835CD3BFE4D40D7A3703D0A6F3740D3E10EC15EFE4D4087602F277B6E3740A779C7293AFE4D402063EE5A426E3740B5A679C729FE4D40381DEE10EC6D37409ECA7C1A0AFE4D40E2C46A06CB6B37400A43F7BF36FD4D4075931804566E3740320BDAD125FD4D409A2D464AA06E37404A5658830AFD4D40029A081B9E6E37404A5658830AFD4D40
|
||||
distance | 7.6447
|
||||
duration | PT27M
|
||||
avg_speed | 3.6357142857142852
|
||||
round | 3.635714
|
||||
max_speed | 6.1
|
||||
max_wind_speed | 22.1
|
||||
notes |
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}}
|
||||
-[ RECORD 2 ]--+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}, "avg_wind_speed": 14.549999999999999}
|
||||
-[ RECORD 2 ]--+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Norra hamnen to Ekenäs
|
||||
_from_time | t
|
||||
_to_time | t
|
||||
track_geojson | t
|
||||
track_geom | 0102000020E610000015000000029A081B9E6E37404A5658830AFD4D40029A081B9E6E37404A5658830AFD4D404806A6C0EF6C3740DA1B7C6132FD4D40FE65F7E461693740226C787AA5FC4D407DD3E10EC1663740B29DEFA7C6FB4D40898BB63D5465374068479724BCFA4D409A5271F6E1633740B6847CD0B3F94D40431CEBE236623740E9263108ACF84D402C6519E2585F37407E678EBFC7F74D4096218E75715B374027C5B45C23F74D402AA913D044583740968DE1C46AF64D405AF5B9DA8A5537407BEF829B9FF54D407449C2ABD253374086C954C1A8F44D407D1A0AB278543740F2B0506B9AF34D409D11A5BDC15737406688635DDCF24D4061C3D32B655937402CAF6F3ADCF14D408988888888583740B3319C58CDF04D4021FAC8C0145837408C94405DB7EF4D40B8F9593F105B37403DC0804BEDEE4D40DE4C5FE2A25D3740AE47E17A14EE4D40DE4C5FE2A25D3740AE47E17A14EE4D40
|
||||
track_geom | 0102000020E610000013000000029A081B9E6E37404A5658830AFD4D404806A6C0EF6C3740DA1B7C6132FD4D40FE65F7E461693740226C787AA5FC4D407DD3E10EC1663740B29DEFA7C6FB4D40898BB63D5465374068479724BCFA4D409A5271F6E1633740B6847CD0B3F94D40431CEBE236623740E9263108ACF84D402C6519E2585F37407E678EBFC7F74D4096218E75715B374027C5B45C23F74D402AA913D044583740968DE1C46AF64D405AF5B9DA8A5537407BEF829B9FF54D407449C2ABD253374086C954C1A8F44D407D1A0AB278543740F2B0506B9AF34D409D11A5BDC15737406688635DDCF24D4061C3D32B655937402CAF6F3ADCF14D408988888888583740B3319C58CDF04D4021FAC8C0145837408C94405DB7EF4D40B8F9593F105B37403DC0804BEDEE4D40DE4C5FE2A25D3740AE47E17A14EE4D40
|
||||
distance | 8.8968
|
||||
duration | PT20M
|
||||
avg_speed | 5.4523809523809526
|
||||
round | 5.452381
|
||||
max_speed | 6.5
|
||||
max_wind_speed | 37.2
|
||||
notes |
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT11S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}}
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT11S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}, "avg_wind_speed": 10.476190476190478}
|
||||
|
||||
stays
|
||||
-[ RECORD 1 ]
|
||||
@@ -70,19 +71,19 @@ count | 12
|
||||
|
||||
stats_logs_fn
|
||||
SELECT 1
|
||||
-[ RECORD 1 ]+----------
|
||||
-[ RECORD 1 ]+--------
|
||||
name | "kapla"
|
||||
count | 4
|
||||
max_speed | 7.1
|
||||
count | 2
|
||||
max_speed | 6.5
|
||||
max_distance | 8.8968
|
||||
max_duration | "PT1H11M"
|
||||
?column? | 3
|
||||
?column? | 30.1154
|
||||
?column? | "PT2H43M"
|
||||
?column? | 44.2
|
||||
max_duration | "PT27M"
|
||||
?column? | 2
|
||||
?column? | 16.5415
|
||||
?column? | "PT47M"
|
||||
?column? | 37.2
|
||||
?column? | 2
|
||||
?column? | 1
|
||||
?column? | 2
|
||||
?column? | 4
|
||||
?column? | 4
|
||||
first_date | t
|
||||
last_date | t
|
||||
|
||||
@@ -91,12 +92,83 @@ DROP TABLE
|
||||
stats_logs_fn |
|
||||
|
||||
update_logbook_observations_fn
|
||||
-[ RECORD 1 ]---------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}}
|
||||
-[ RECORD 1 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}, "avg_wind_speed": 14.549999999999999}
|
||||
|
||||
-[ RECORD 1 ]------------------+--
|
||||
update_logbook_observations_fn | t
|
||||
|
||||
-[ RECORD 1 ]--------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}}
|
||||
-[ RECORD 1 ]----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}, "avg_wind_speed": 14.549999999999999}
|
||||
|
||||
add tags to logbook
|
||||
-[ RECORD 1 ]----------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}, "avg_wind_speed": 14.549999999999999}
|
||||
|
||||
-[ RECORD 1 ]------------------+--
|
||||
update_logbook_observations_fn | t
|
||||
|
||||
-[ RECORD 1 ]--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
extra | {"tags": ["tag_name"], "metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}, "avg_wind_speed": 14.549999999999999}
|
||||
|
||||
Check logbook geojson LineString properties
|
||||
-[ RECORD 1 ]-----+-----------------
|
||||
jsonb_object_keys | id
|
||||
-[ RECORD 2 ]-----+-----------------
|
||||
jsonb_object_keys | _to
|
||||
-[ RECORD 3 ]-----+-----------------
|
||||
jsonb_object_keys | name
|
||||
-[ RECORD 4 ]-----+-----------------
|
||||
jsonb_object_keys | _from
|
||||
-[ RECORD 5 ]-----+-----------------
|
||||
jsonb_object_keys | notes
|
||||
-[ RECORD 6 ]-----+-----------------
|
||||
jsonb_object_keys | times
|
||||
-[ RECORD 7 ]-----+-----------------
|
||||
jsonb_object_keys | _to_time
|
||||
-[ RECORD 8 ]-----+-----------------
|
||||
jsonb_object_keys | distance
|
||||
-[ RECORD 9 ]-----+-----------------
|
||||
jsonb_object_keys | duration
|
||||
-[ RECORD 10 ]----+-----------------
|
||||
jsonb_object_keys | avg_speed
|
||||
-[ RECORD 11 ]----+-----------------
|
||||
jsonb_object_keys | max_speed
|
||||
-[ RECORD 12 ]----+-----------------
|
||||
jsonb_object_keys | _from_time
|
||||
-[ RECORD 13 ]----+-----------------
|
||||
jsonb_object_keys | _to_moorage_id
|
||||
-[ RECORD 14 ]----+-----------------
|
||||
jsonb_object_keys | avg_wind_speed
|
||||
-[ RECORD 15 ]----+-----------------
|
||||
jsonb_object_keys | max_wind_speed
|
||||
-[ RECORD 16 ]----+-----------------
|
||||
jsonb_object_keys | _from_moorage_id
|
||||
|
||||
Check logbook geojson Point properties
|
||||
-[ RECORD 1 ]-----+-------
|
||||
jsonb_object_keys | cog
|
||||
-[ RECORD 2 ]-----+-------
|
||||
jsonb_object_keys | sog
|
||||
-[ RECORD 3 ]-----+-------
|
||||
jsonb_object_keys | twa
|
||||
-[ RECORD 4 ]-----+-------
|
||||
jsonb_object_keys | twd
|
||||
-[ RECORD 5 ]-----+-------
|
||||
jsonb_object_keys | tws
|
||||
-[ RECORD 6 ]-----+-------
|
||||
jsonb_object_keys | time
|
||||
-[ RECORD 7 ]-----+-------
|
||||
jsonb_object_keys | trip
|
||||
-[ RECORD 8 ]-----+-------
|
||||
jsonb_object_keys | notes
|
||||
-[ RECORD 9 ]-----+-------
|
||||
jsonb_object_keys | status
|
||||
|
||||
Check logbook export fn
|
||||
-[ RECORD 1 ]
|
||||
gpx_trip | t
|
||||
|
||||
-[ RECORD 1 ]
|
||||
kml_trip | t
|
||||
|
||||
|
@@ -13,9 +13,14 @@ select current_database();
|
||||
|
||||
-- Check the number of process pending
|
||||
\echo 'Check the number of process pending'
|
||||
-- Should be 22
|
||||
-- Should be 24
|
||||
SELECT count(*) as jobs from public.process_queue pq where pq.processed is null;
|
||||
--set role scheduler
|
||||
-- Switch to the scheduler role
|
||||
--\echo 'Switch to the scheduler role'
|
||||
--SET ROLE scheduler;
|
||||
-- Should be 24
|
||||
SELECT count(*) as jobs from public.process_queue pq where pq.processed is null;
|
||||
-- Run the cron jobs
|
||||
SELECT public.run_cron_jobs();
|
||||
-- Check any pending job
|
||||
SELECT count(*) as any_pending_jobs from public.process_queue pq where pq.processed is null;
|
||||
|
@@ -9,6 +9,9 @@ Check the number of process pending
|
||||
-[ RECORD 1 ]
|
||||
jobs | 26
|
||||
|
||||
-[ RECORD 1 ]
|
||||
jobs | 26
|
||||
|
||||
-[ RECORD 1 ]-+-
|
||||
run_cron_jobs |
|
||||
|
||||
@@ -17,5 +20,5 @@ any_pending_jobs | 2
|
||||
|
||||
Check the number of metrics entries
|
||||
-[ RECORD 1 ]-+----
|
||||
metrics_count | 172
|
||||
metrics_count | 173
|
||||
|
||||
|
@@ -15,21 +15,21 @@ select current_database();
|
||||
-- grafana_auth
|
||||
SET ROLE grafana_auth;
|
||||
\echo 'ROLE grafana_auth current_setting'
|
||||
SELECT current_user, current_setting('user.email', true), current_setting('vessel.client_id', true), current_setting('vessel.id', true);
|
||||
SELECT current_user, current_setting('user.email', true), current_setting('vessel.id', true);
|
||||
|
||||
--SELECT a.pass,v.name,m.client_id FROM auth.accounts a JOIN auth.vessels v ON a.email = 'demo+kapla@openplotter.cloud' AND a.role = 'user_role' AND cast(a.preferences->>'email_valid' as Boolean) = True AND v.owner_email = a.email JOIN api.metadata m ON m.vessel_id = v.vessel_id;
|
||||
--SELECT a.pass,v.name,m.client_id FROM auth.accounts a JOIN auth.vessels v ON a.email = 'demo+kapla@openplotter.cloud' AND a.role = 'user_role' AND v.owner_email = a.email JOIN api.metadata m ON m.vessel_id = v.vessel_id;
|
||||
\echo 'link vessel and user based on current_setting'
|
||||
SELECT v.name,m.client_id FROM auth.accounts a JOIN auth.vessels v ON a.role = 'user_role' AND v.owner_email = a.email JOIN api.metadata m ON m.vessel_id = v.vessel_id;
|
||||
SELECT v.name, m.vessel_id IS NOT NULL AS vessel_id FROM auth.accounts a JOIN auth.vessels v ON a.role = 'user_role' AND v.owner_email = a.email JOIN api.metadata m ON m.vessel_id = v.vessel_id ORDER BY a.id DESC;
|
||||
|
||||
\echo 'auth.accounts details'
|
||||
SELECT a.user_id IS NOT NULL AS user_id, a.email, a.first, a.last, a.pass IS NOT NULL AS pass, a.role, a.preferences->'telegram'->'chat' AS telegram, a.preferences->'pushover_user_key' AS pushover_user_key FROM auth.accounts AS a;
|
||||
SELECT a.user_id IS NOT NULL AS user_id, a.email, a.first, a.last, a.pass IS NOT NULL AS pass, a.role, a.preferences->'telegram'->'chat' AS telegram, a.preferences->'pushover_user_key' AS pushover_user_key FROM auth.accounts AS a ORDER BY a.id DESC;
|
||||
\echo 'auth.vessels details'
|
||||
--SELECT 'SELECT ' || STRING_AGG('v.' || column_name, ', ') || ' FROM auth.vessels AS v' FROM information_schema.columns WHERE table_name = 'vessels' AND table_schema = 'auth' AND column_name NOT IN ('created_at', 'updated_at');
|
||||
SELECT v.vessel_id IS NOT NULL AS vessel_id, v.owner_email, v.mmsi, v.name, v.role FROM auth.vessels AS v;
|
||||
\echo 'api.metadata details'
|
||||
--
|
||||
SELECT m.id, m.name, m.mmsi, m.client_id, m.length, m.beam, m.height, m.ship_type, m.plugin_version, m.signalk_version, m.time IS NOT NULL AS time, m.active FROM api.metadata AS m;
|
||||
SELECT vessel_id IS NOT NULL AS vessel_id_not_null, m.name, m.mmsi, m.length, m.beam, m.height, m.ship_type, m.plugin_version, m.signalk_version, m.time IS NOT NULL AS time, m.active, configuration IS NOT NULL AS configuration_not_null, available_keys FROM api.metadata AS m ORDER BY m.name DESC;
|
||||
|
||||
--
|
||||
-- grafana
|
||||
@@ -46,16 +46,16 @@ SELECT v.vessel_id as "vessel_id" FROM auth.vessels v WHERE v.owner_email = 'dem
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
--SELECT current_user, current_setting('user.email', true), current_setting('vessel.client_id', true), current_setting('vessel.id', true);
|
||||
SELECT current_user, current_setting('user.email', true), current_setting('vessel.client_id', true);
|
||||
SELECT current_user, current_setting('user.email', true);
|
||||
|
||||
SELECT v.name AS __text, m.client_id AS __value FROM auth.vessels v JOIN api.metadata m ON v.owner_email = 'demo+kapla@openplotter.cloud' and m.vessel_id = v.vessel_id;
|
||||
SELECT v.name AS __text, m.vessel_id IS NOT NULL AS __value FROM auth.vessels v JOIN api.metadata m ON v.owner_email = 'demo+kapla@openplotter.cloud' and m.vessel_id = v.vessel_id;
|
||||
|
||||
\echo 'auth.vessels details'
|
||||
--SELECT * FROM auth.vessels v;
|
||||
SELECT v.vessel_id IS NOT NULL AS vessel_id, v.owner_email, v.mmsi, v.name, v.role FROM auth.vessels AS v;
|
||||
--SELECT * FROM api.metadata m;
|
||||
\echo 'api.metadata details'
|
||||
SELECT m.id, m.name, m.mmsi, m.client_id, m.length, m.beam, m.height, m.ship_type, m.plugin_version, m.signalk_version, m.time IS NOT NULL AS time, m.active FROM api.metadata AS m;
|
||||
SELECT vessel_id IS NOT NULL AS vessel_id_not_null, m.name, m.mmsi, m.length, m.beam, m.height, m.ship_type, m.plugin_version, m.signalk_version, m.time IS NOT NULL AS time, m.active, configuration IS NOT NULL AS configuration_not_null, available_keys FROM api.metadata AS m;
|
||||
|
||||
\echo 'api.logs_view'
|
||||
--SELECT * FROM api.logbook l;
|
||||
@@ -67,13 +67,18 @@ SELECT l.id, l.name, l.from, l.to, l.distance, l.duration, l._from_moorage_id, l
|
||||
--SELECT * FROM api.stays s;
|
||||
SELECT m.id, m.vessel_id IS NOT NULL AS vessel_id, m.moorage_id, m.active, m.name IS NOT NULL AS name, m.latitude, m.longitude, m.geog, m.arrived IS NOT NULL AS arrived, m.departed IS NOT NULL AS departed, m.duration, m.stay_code, m.notes FROM api.stays AS m;
|
||||
|
||||
\echo 'stays_view'
|
||||
\echo 'api.stays_view'
|
||||
--SELECT * FROM api.stays_view s;
|
||||
SELECT m.id, m.name IS NOT NULL AS name, m.moorage, m.moorage_id, m.duration, m.stayed_at, m.stayed_at_id, m.arrived IS NOT NULL AS arrived, m.departed IS NOT NULL AS departed, m.notes FROM api.stays_view AS m;
|
||||
|
||||
\echo 'api.moorages'
|
||||
--SELECT * FROM api.moorages m;
|
||||
SELECT m.id, m.vessel_id IS NOT NULL AS vessel_id, m.name, m.country, m.stay_code, m.stay_duration, m.reference_count, m.latitude, m.longitude, m.geog, m.home_flag, m.notes FROM api.moorages AS m;
|
||||
--SELECT m.id, m.vessel_id IS NOT NULL AS vessel_id, m.name, m.country, m.stay_code, m.stay_duration, m.reference_count, m.latitude, m.longitude, m.geog, m.home_flag, m.notes FROM api.moorages AS m;
|
||||
SELECT m.id, m.vessel_id IS NOT NULL AS vessel_id, m.name, m.country, m.stay_code, m.latitude, m.longitude, m.geog, m.home_flag, m.notes FROM api.moorages AS m;
|
||||
|
||||
\echo 'api.moorages_view'
|
||||
SELECT * FROM api.moorages_view s;
|
||||
SELECT * FROM api.moorages_view;
|
||||
|
||||
\echo 'api.moorage_view'
|
||||
--SELECT * FROM api.moorage_view;
|
||||
SELECT m.id, m.name, default_stay, m.latitude, m.longitude, m.geog, m.home, m.notes, logs_count, stays_count, stay_first_seen_id, stay_last_seen_id, stay_first_seen IS NOT NULL AS stay_first_seen, stay_last_seen IS NOT NULL AS stay_last_seen FROM api.moorage_view m;
|
||||
|
@@ -11,15 +11,14 @@ ROLE grafana_auth current_setting
|
||||
current_user | grafana_auth
|
||||
current_setting |
|
||||
current_setting |
|
||||
current_setting |
|
||||
|
||||
link vessel and user based on current_setting
|
||||
-[ RECORD 1 ]----------------------------------------------------------------
|
||||
-[ RECORD 1 ]----
|
||||
name | aava
|
||||
client_id | vessels.urn:mrn:imo:mmsi:787654321
|
||||
-[ RECORD 2 ]----------------------------------------------------------------
|
||||
vessel_id | t
|
||||
-[ RECORD 2 ]----
|
||||
name | kapla
|
||||
client_id | vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd
|
||||
vessel_id | t
|
||||
|
||||
auth.accounts details
|
||||
-[ RECORD 1 ]-----+-----------------------------
|
||||
@@ -56,32 +55,34 @@ name | aava
|
||||
role | vessel_role
|
||||
|
||||
api.metadata details
|
||||
-[ RECORD 1 ]---+------------------------------------------------------------------
|
||||
id | 1
|
||||
name | kapla
|
||||
mmsi | 123456789
|
||||
client_id | vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 36
|
||||
plugin_version | 0.0.1
|
||||
signalk_version | signalk_version
|
||||
time | t
|
||||
active | t
|
||||
-[ RECORD 2 ]---+------------------------------------------------------------------
|
||||
id | 2
|
||||
name | aava
|
||||
mmsi | 787654321
|
||||
client_id | vessels.urn:mrn:imo:mmsi:787654321
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 37
|
||||
plugin_version | 1.0.2
|
||||
signalk_version | 1.20.0
|
||||
time | t
|
||||
active | t
|
||||
-[ RECORD 1 ]----------+----------------
|
||||
vessel_id_not_null | t
|
||||
name | kapla
|
||||
mmsi | 123456789
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 36
|
||||
plugin_version | 0.0.1
|
||||
signalk_version | signalk_version
|
||||
time | t
|
||||
active | t
|
||||
configuration_not_null | t
|
||||
available_keys |
|
||||
-[ RECORD 2 ]----------+----------------
|
||||
vessel_id_not_null | t
|
||||
name | aava
|
||||
mmsi | 787654321
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 37
|
||||
plugin_version | 1.0.2
|
||||
signalk_version | 1.20.0
|
||||
time | t
|
||||
active | t
|
||||
configuration_not_null | t
|
||||
available_keys | []
|
||||
|
||||
SET
|
||||
ROLE grafana current_setting
|
||||
@@ -93,11 +94,10 @@ vessel_id | t
|
||||
-[ RECORD 1 ]---+-----------------------------
|
||||
current_user | grafana
|
||||
current_setting | demo+kapla@openplotter.cloud
|
||||
current_setting |
|
||||
|
||||
-[ RECORD 1 ]--------------------------------------------------------------
|
||||
-[ RECORD 1 ]--
|
||||
__text | kapla
|
||||
__value | vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd
|
||||
__value | t
|
||||
|
||||
auth.vessels details
|
||||
-[ RECORD 1 ]-----------------------------
|
||||
@@ -108,19 +108,20 @@ name | kapla
|
||||
role | vessel_role
|
||||
|
||||
api.metadata details
|
||||
-[ RECORD 1 ]---+------------------------------------------------------------------
|
||||
id | 1
|
||||
name | kapla
|
||||
mmsi | 123456789
|
||||
client_id | vessels.urn:mrn:signalk:uuid:5b4f7543-7153-4840-b139-761310b242fd
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 36
|
||||
plugin_version | 0.0.1
|
||||
signalk_version | signalk_version
|
||||
time | t
|
||||
active | t
|
||||
-[ RECORD 1 ]----------+----------------
|
||||
vessel_id_not_null | t
|
||||
name | kapla
|
||||
mmsi | 123456789
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 36
|
||||
plugin_version | 0.0.1
|
||||
signalk_version | signalk_version
|
||||
time | t
|
||||
active | t
|
||||
configuration_not_null | t
|
||||
available_keys |
|
||||
|
||||
api.logs_view
|
||||
-[ RECORD 1 ]----+-----------------------
|
||||
@@ -186,7 +187,7 @@ duration | PT2M
|
||||
stay_code | 4
|
||||
notes |
|
||||
|
||||
stays_view
|
||||
api.stays_view
|
||||
-[ RECORD 1 ]+---------------------
|
||||
id | 2
|
||||
name | t
|
||||
@@ -211,45 +212,39 @@ departed | t
|
||||
notes | new stay note 3
|
||||
|
||||
api.moorages
|
||||
-[ RECORD 1 ]---+---------------------------------------------------
|
||||
id | 1
|
||||
vessel_id | t
|
||||
name | patch moorage name 3
|
||||
country | fi
|
||||
stay_code | 2
|
||||
stay_duration | PT1M
|
||||
reference_count | 1
|
||||
latitude | 60.0776666666667
|
||||
longitude | 23.5308666666667
|
||||
geog | 0101000020E6100000B9DEBBE0E687374052A938FBF0094E40
|
||||
home_flag | t
|
||||
notes | new moorage note 3
|
||||
-[ RECORD 2 ]---+---------------------------------------------------
|
||||
id | 2
|
||||
vessel_id | t
|
||||
name | Norra hamnen
|
||||
country | fi
|
||||
stay_code | 4
|
||||
stay_duration | PT2M
|
||||
reference_count | 2
|
||||
latitude | 59.9768833333333
|
||||
longitude | 23.4321
|
||||
geog | 0101000020E6100000029A081B9E6E3740455658830AFD4D40
|
||||
home_flag | f
|
||||
notes |
|
||||
-[ RECORD 3 ]---+---------------------------------------------------
|
||||
id | 3
|
||||
vessel_id | t
|
||||
name | Ekenäs
|
||||
country | fi
|
||||
stay_code | 1
|
||||
stay_duration |
|
||||
reference_count | 1
|
||||
latitude | 59.86
|
||||
longitude | 23.3657666666667
|
||||
geog | 0101000020E6100000E84C5FE2A25D3740AE47E17A14EE4D40
|
||||
home_flag | f
|
||||
notes |
|
||||
-[ RECORD 1 ]-------------------------------------------------
|
||||
id | 1
|
||||
vessel_id | t
|
||||
name | patch moorage name 3
|
||||
country | fi
|
||||
stay_code | 2
|
||||
latitude | 60.0776666666667
|
||||
longitude | 23.5308666666667
|
||||
geog | 0101000020E6100000B9DEBBE0E687374052A938FBF0094E40
|
||||
home_flag | t
|
||||
notes | new moorage note 3
|
||||
-[ RECORD 2 ]-------------------------------------------------
|
||||
id | 2
|
||||
vessel_id | t
|
||||
name | Norra hamnen
|
||||
country | fi
|
||||
stay_code | 4
|
||||
latitude | 59.9768833333333
|
||||
longitude | 23.4321
|
||||
geog | 0101000020E6100000029A081B9E6E3740455658830AFD4D40
|
||||
home_flag | f
|
||||
notes |
|
||||
-[ RECORD 3 ]-------------------------------------------------
|
||||
id | 3
|
||||
vessel_id | t
|
||||
name | Ekenäs
|
||||
country | fi
|
||||
stay_code | 1
|
||||
latitude | 59.86
|
||||
longitude | 23.3657666666667
|
||||
geog | 0101000020E6100000E84C5FE2A25D3740AE47E17A14EE4D40
|
||||
home_flag | f
|
||||
notes |
|
||||
|
||||
api.moorages_view
|
||||
-[ RECORD 1 ]-------+---------------------
|
||||
@@ -257,15 +252,67 @@ id | 2
|
||||
moorage | Norra hamnen
|
||||
default_stay | Dock
|
||||
default_stay_id | 4
|
||||
total_stay | 0
|
||||
total_duration | PT2M
|
||||
arrivals_departures | 2
|
||||
total_duration | PT2M
|
||||
-[ RECORD 2 ]-------+---------------------
|
||||
id | 1
|
||||
moorage | patch moorage name 3
|
||||
default_stay | Anchor
|
||||
default_stay_id | 2
|
||||
total_stay | 0
|
||||
total_duration | PT1M
|
||||
arrivals_departures | 1
|
||||
total_duration | PT1M
|
||||
-[ RECORD 3 ]-------+---------------------
|
||||
id | 3
|
||||
moorage | Ekenäs
|
||||
default_stay | Unknown
|
||||
default_stay_id | 1
|
||||
arrivals_departures | 1
|
||||
total_duration | PT0S
|
||||
|
||||
api.moorage_view
|
||||
-[ RECORD 1 ]------+---------------------------------------------------
|
||||
id | 3
|
||||
name | Ekenäs
|
||||
default_stay | Unknown
|
||||
latitude | 59.86
|
||||
longitude | 23.3657666666667
|
||||
geog | 0101000020E6100000E84C5FE2A25D3740AE47E17A14EE4D40
|
||||
home | f
|
||||
notes |
|
||||
logs_count | 1
|
||||
stays_count | 0
|
||||
stay_first_seen_id |
|
||||
stay_last_seen_id |
|
||||
stay_first_seen | f
|
||||
stay_last_seen | f
|
||||
-[ RECORD 2 ]------+---------------------------------------------------
|
||||
id | 2
|
||||
name | Norra hamnen
|
||||
default_stay | Dock
|
||||
latitude | 59.9768833333333
|
||||
longitude | 23.4321
|
||||
geog | 0101000020E6100000029A081B9E6E3740455658830AFD4D40
|
||||
home | f
|
||||
notes |
|
||||
logs_count | 2
|
||||
stays_count | 1
|
||||
stay_first_seen_id | 2
|
||||
stay_last_seen_id | 2
|
||||
stay_first_seen | t
|
||||
stay_last_seen | t
|
||||
-[ RECORD 3 ]------+---------------------------------------------------
|
||||
id | 1
|
||||
name | patch moorage name 3
|
||||
default_stay | Anchor
|
||||
latitude | 60.0776666666667
|
||||
longitude | 23.5308666666667
|
||||
geog | 0101000020E6100000B9DEBBE0E687374052A938FBF0094E40
|
||||
home | t
|
||||
notes | new moorage note 3
|
||||
logs_count | 1
|
||||
stays_count | 1
|
||||
stay_first_seen_id | 1
|
||||
stay_last_seen_id | 1
|
||||
stay_first_seen | t
|
||||
stay_last_seen | t
|
||||
|
||||
|
53
tests/sql/logbook.sql
Normal file
53
tests/sql/logbook.sql
Normal file
@@ -0,0 +1,53 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
\echo 'Validate logbook operation'
|
||||
-- set user_id
|
||||
SELECT a.user_id as "user_id" FROM auth.accounts a WHERE a.email = 'demo+kapla@openplotter.cloud' \gset
|
||||
--\echo :"user_id"
|
||||
SELECT set_config('user.id', :'user_id', false) IS NOT NULL as user_id;
|
||||
|
||||
-- set vessel_id
|
||||
SELECT v.vessel_id as "vessel_id" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
--\echo :"vessel_id"
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- Count logbook for user
|
||||
\echo 'logbook'
|
||||
SELECT count(*) FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
\echo 'logbook'
|
||||
-- track_geom and track_geojson are now dynamic from mobilitydb
|
||||
SELECT name,_from_time IS NOT NULL AS _from_time_not_null, _to_time IS NOT NULL AS _to_time_not_null, trajectory(trip) AS track_geom, distance,duration,avg_speed,max_speed,max_wind_speed,notes,extra FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false) ORDER BY id ASC;
|
||||
|
||||
--
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
\echo 'ROLE user_role current_setting'
|
||||
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- Count logbook for user
|
||||
\echo 'logbook'
|
||||
SELECT count(*) FROM api.logbook;
|
||||
\echo 'logbook'
|
||||
-- track_geom and track_geojson are now dynamic from mobilitydb
|
||||
SELECT name,_from_time IS NOT NULL AS _from_time_not_null, _to_time IS NOT NULL AS _to_time_not_null, trajectory(trip) AS track_geom, distance,duration,avg_speed,max_speed,max_wind_speed,notes,extra FROM api.logbook ORDER BY id ASC;
|
||||
|
||||
-- Delete logbook for user
|
||||
\echo 'Delete logbook for user kapla'
|
||||
SELECT api.delete_logbook_fn(5); -- delete Tropics Zone
|
||||
SELECT api.delete_logbook_fn(6); -- delete Alaska Zone
|
||||
|
||||
-- Merge logbook for user
|
||||
\echo 'Merge logbook for user kapla'
|
||||
SELECT api.merge_logbook_fn(1,2);
|
138
tests/sql/logbook.sql.output
Normal file
138
tests/sql/logbook.sql.output
Normal file
@@ -0,0 +1,138 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
Validate logbook operation
|
||||
-[ RECORD 1 ]
|
||||
user_id | t
|
||||
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
logbook
|
||||
-[ RECORD 1 ]
|
||||
count | 4
|
||||
|
||||
logbook
|
||||
-[ RECORD 1 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | patch log name 3
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E61000001A000000B0DEBBE0E68737404DA938FBF0094E4020D26F5F0786374030BB270F0B094E400C6E7ED60F843740AA60545227084E40D60FC48C03823740593CE27D42074E407B39D9F322803740984C158C4A064E4091ED7C3F357E3740898BB63D54054E40A8A1208B477C37404BA3DC9059044E404C5CB4EDA17A3740C4F856115B034E40A9A44E4013793740D8F0F44A59024E40E4839ECDAA773740211FF46C56014E405408D147067637408229F03B73004E40787AA52C43743740F90FE9B7AFFF4D40F8098D4D18723740C217265305FF4D4084E82303537037409A2D464AA0FE4D4022474DCE636F37402912396A72FE4D408351499D806E374088CFB02B40FE4D4076711B0DE06D3740B356C7040FFE4D404EAC66B0BC6E374058A835CD3BFE4D40D7A3703D0A6F3740D3E10EC15EFE4D4087602F277B6E3740A779C7293AFE4D402063EE5A426E3740B5A679C729FE4D40381DEE10EC6D37409ECA7C1A0AFE4D40E2C46A06CB6B37400A43F7BF36FD4D4075931804566E3740320BDAD125FD4D409A2D464AA06E37404A5658830AFD4D40029A081B9E6E37404A5658830AFD4D40
|
||||
distance | 7.6447
|
||||
duration | PT27M
|
||||
avg_speed | 3.6357142857142852
|
||||
max_speed | 6.1
|
||||
max_wind_speed | 22.1
|
||||
notes | new log note 3
|
||||
extra | {"tags": ["tag_name"], "metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}, "avg_wind_speed": 14.549999999999999}
|
||||
-[ RECORD 2 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Norra hamnen to Ekenäs
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000013000000029A081B9E6E37404A5658830AFD4D404806A6C0EF6C3740DA1B7C6132FD4D40FE65F7E461693740226C787AA5FC4D407DD3E10EC1663740B29DEFA7C6FB4D40898BB63D5465374068479724BCFA4D409A5271F6E1633740B6847CD0B3F94D40431CEBE236623740E9263108ACF84D402C6519E2585F37407E678EBFC7F74D4096218E75715B374027C5B45C23F74D402AA913D044583740968DE1C46AF64D405AF5B9DA8A5537407BEF829B9FF54D407449C2ABD253374086C954C1A8F44D407D1A0AB278543740F2B0506B9AF34D409D11A5BDC15737406688635DDCF24D4061C3D32B655937402CAF6F3ADCF14D408988888888583740B3319C58CDF04D4021FAC8C0145837408C94405DB7EF4D40B8F9593F105B37403DC0804BEDEE4D40DE4C5FE2A25D3740AE47E17A14EE4D40
|
||||
distance | 8.8968
|
||||
duration | PT20M
|
||||
avg_speed | 5.4523809523809526
|
||||
max_speed | 6.5
|
||||
max_wind_speed | 37.2
|
||||
notes |
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT11S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}, "avg_wind_speed": 10.476190476190478}
|
||||
-[ RECORD 3 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Tropics Zone
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000002000000A4E85E0D58934FC000DC509B80052C40BC069B43D64553C090510727F3BD2940
|
||||
distance | 123
|
||||
duration |
|
||||
avg_speed |
|
||||
max_speed |
|
||||
max_wind_speed |
|
||||
notes |
|
||||
extra |
|
||||
-[ RECORD 4 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Alaska Zone
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000002000000FDB11ED079F261C090C47F1861B84D40D3505124540B63C09C091C1C8D4A4C40
|
||||
distance | 1234
|
||||
duration |
|
||||
avg_speed |
|
||||
max_speed |
|
||||
max_wind_speed |
|
||||
notes |
|
||||
extra |
|
||||
|
||||
SET
|
||||
ROLE user_role current_setting
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
logbook
|
||||
-[ RECORD 1 ]
|
||||
count | 4
|
||||
|
||||
logbook
|
||||
-[ RECORD 1 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | patch log name 3
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E61000001A000000B0DEBBE0E68737404DA938FBF0094E4020D26F5F0786374030BB270F0B094E400C6E7ED60F843740AA60545227084E40D60FC48C03823740593CE27D42074E407B39D9F322803740984C158C4A064E4091ED7C3F357E3740898BB63D54054E40A8A1208B477C37404BA3DC9059044E404C5CB4EDA17A3740C4F856115B034E40A9A44E4013793740D8F0F44A59024E40E4839ECDAA773740211FF46C56014E405408D147067637408229F03B73004E40787AA52C43743740F90FE9B7AFFF4D40F8098D4D18723740C217265305FF4D4084E82303537037409A2D464AA0FE4D4022474DCE636F37402912396A72FE4D408351499D806E374088CFB02B40FE4D4076711B0DE06D3740B356C7040FFE4D404EAC66B0BC6E374058A835CD3BFE4D40D7A3703D0A6F3740D3E10EC15EFE4D4087602F277B6E3740A779C7293AFE4D402063EE5A426E3740B5A679C729FE4D40381DEE10EC6D37409ECA7C1A0AFE4D40E2C46A06CB6B37400A43F7BF36FD4D4075931804566E3740320BDAD125FD4D409A2D464AA06E37404A5658830AFD4D40029A081B9E6E37404A5658830AFD4D40
|
||||
distance | 7.6447
|
||||
duration | PT27M
|
||||
avg_speed | 3.6357142857142852
|
||||
max_speed | 6.1
|
||||
max_wind_speed | 22.1
|
||||
notes | new log note 3
|
||||
extra | {"tags": ["tag_name"], "metrics": {"propulsion.main.runTime": "PT10S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": 1}, "avg_wind_speed": 14.549999999999999}
|
||||
-[ RECORD 2 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Norra hamnen to Ekenäs
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000013000000029A081B9E6E37404A5658830AFD4D404806A6C0EF6C3740DA1B7C6132FD4D40FE65F7E461693740226C787AA5FC4D407DD3E10EC1663740B29DEFA7C6FB4D40898BB63D5465374068479724BCFA4D409A5271F6E1633740B6847CD0B3F94D40431CEBE236623740E9263108ACF84D402C6519E2585F37407E678EBFC7F74D4096218E75715B374027C5B45C23F74D402AA913D044583740968DE1C46AF64D405AF5B9DA8A5537407BEF829B9FF54D407449C2ABD253374086C954C1A8F44D407D1A0AB278543740F2B0506B9AF34D409D11A5BDC15737406688635DDCF24D4061C3D32B655937402CAF6F3ADCF14D408988888888583740B3319C58CDF04D4021FAC8C0145837408C94405DB7EF4D40B8F9593F105B37403DC0804BEDEE4D40DE4C5FE2A25D3740AE47E17A14EE4D40
|
||||
distance | 8.8968
|
||||
duration | PT20M
|
||||
avg_speed | 5.4523809523809526
|
||||
max_speed | 6.5
|
||||
max_wind_speed | 37.2
|
||||
notes |
|
||||
extra | {"metrics": {"propulsion.main.runTime": "PT11S"}, "observations": {"seaState": -1, "visibility": -1, "cloudCoverage": -1}, "avg_wind_speed": 10.476190476190478}
|
||||
-[ RECORD 3 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Tropics Zone
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000002000000A4E85E0D58934FC000DC509B80052C40BC069B43D64553C090510727F3BD2940
|
||||
distance | 123
|
||||
duration |
|
||||
avg_speed |
|
||||
max_speed |
|
||||
max_wind_speed |
|
||||
notes |
|
||||
extra |
|
||||
-[ RECORD 4 ]-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
name | Alaska Zone
|
||||
_from_time_not_null | t
|
||||
_to_time_not_null | t
|
||||
track_geom | 0102000020E610000002000000FDB11ED079F261C090C47F1861B84D40D3505124540B63C09C091C1C8D4A4C40
|
||||
distance | 1234
|
||||
duration |
|
||||
avg_speed |
|
||||
max_speed |
|
||||
max_wind_speed |
|
||||
notes |
|
||||
extra |
|
||||
|
||||
Delete logbook for user kapla
|
||||
-[ RECORD 1 ]-----+--
|
||||
delete_logbook_fn | t
|
||||
|
||||
-[ RECORD 1 ]-----+--
|
||||
delete_logbook_fn | t
|
||||
|
||||
Merge logbook for user kapla
|
||||
-[ RECORD 1 ]----+-
|
||||
merge_logbook_fn |
|
||||
|
36
tests/sql/maplapse.sql
Normal file
36
tests/sql/maplapse.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
-- Assign vessel_id var
|
||||
SELECT v.vessel_id as "vessel_id_kapla" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
SELECT v.vessel_id as "vessel_id_aava" FROM auth.vessels v WHERE v.owner_email = 'demo+aava@openplotter.cloud' \gset
|
||||
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
SELECT set_config('vessel.id', :'vessel_id_kapla', false) IS NOT NULL as vessel_id;
|
||||
-- insert fake request maplapse
|
||||
\echo 'Insert fake request maplapse'
|
||||
SELECT api.maplapse_record_fn('Kapla,?start_log=1&end_log=1&height=100vh');
|
||||
|
||||
-- maplapse_role
|
||||
SET ROLE maplapse_role;
|
||||
|
||||
\echo 'GET pending maplapse task'
|
||||
SELECT id as maplapse_id from process_queue where channel = 'maplapse_video' and processed is null order by stored asc limit 1 \gset
|
||||
SELECT count(id) from process_queue where channel = 'maplapse_video' and processed is null limit 1;
|
||||
|
||||
\echo 'Update process on completion'
|
||||
UPDATE process_queue SET processed = NOW() WHERE id = :'maplapse_id';
|
||||
|
||||
\echo 'Insert video availability notification in process queue'
|
||||
INSERT INTO process_queue ("channel", "payload", "ref_id", "stored") VALUES ('new_video', CONCAT('video_', :'vessel_id_kapla'::TEXT, '_1', '_1.mp4'), :'vessel_id_kapla'::TEXT, NOW());
|
24
tests/sql/maplapse.sql.output
Normal file
24
tests/sql/maplapse.sql.output
Normal file
@@ -0,0 +1,24 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
SET
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
Insert fake request maplapse
|
||||
-[ RECORD 1 ]------+--
|
||||
maplapse_record_fn | t
|
||||
|
||||
SET
|
||||
GET pending maplapse task
|
||||
-[ RECORD 1 ]
|
||||
count | 1
|
||||
|
||||
Update process on completion
|
||||
UPDATE 1
|
||||
Insert video availability notification in process queue
|
||||
INSERT 0 1
|
84
tests/sql/metadata.sql
Normal file
84
tests/sql/metadata.sql
Normal file
@@ -0,0 +1,84 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
SELECT count(*) as count_eq_2 FROM api.metadata m;
|
||||
|
||||
SELECT v.vessel_id as "vessel_id" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
--\echo :"vessel_id"
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
|
||||
\echo 'api.metadata details'
|
||||
SELECT vessel_id IS NOT NULL AS vessel_id_not_null, m.name, m.mmsi, m.length, m.beam, m.height, m.ship_type, m.plugin_version, m.signalk_version, m.time IS NOT NULL AS time, m.active, configuration, available_keys FROM api.metadata AS m ORDER BY m.name ASC;
|
||||
|
||||
\echo 'api.metadata get configuration'
|
||||
select configuration from api.metadata; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'api.metadata update configuration'
|
||||
UPDATE api.metadata SET configuration = '{ "depthKey": "environment.depth.belowTransducer" }'; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'api.metadata get configuration with new value'
|
||||
select configuration->'depthKey' AS depthKey, configuration->'update_at' IS NOT NULL AS update_at_not_null from api.metadata; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'api.metadata get configuration base on update_at value'
|
||||
select configuration->'depthKey' AS depthKey, configuration->'update_at' IS NOT NULL AS update_at_not_null from api.metadata WHERE vessel_id = current_setting('vessel.id', false) AND configuration->>'update_at' <= to_char(NOW(), 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
|
||||
|
||||
-- Upsert make_model on metadata_ext table
|
||||
\echo 'api.metadata_ext set make_model'
|
||||
INSERT INTO api.metadata_ext (vessel_id, make_model)
|
||||
VALUES (current_setting('vessel.id', false), 'my super yacht')
|
||||
ON CONFLICT (vessel_id) DO UPDATE
|
||||
SET make_model = EXCLUDED.make_model;
|
||||
|
||||
-- Upsert polar on metadata_ext table
|
||||
\echo 'api.metadata_ext set polar'
|
||||
INSERT INTO api.metadata_ext (vessel_id, polar)
|
||||
VALUES (current_setting('vessel.id', false), 'twa/tws;4;6;8;10;12;14;16;20;24\n0;0;0;0;0;0;0;0;0;0')
|
||||
ON CONFLICT (vessel_id) DO UPDATE
|
||||
SET polar = EXCLUDED.polar;
|
||||
|
||||
-- Upsert image on metadata_ext table
|
||||
\echo 'api.metadata_ext set image/image_b64'
|
||||
INSERT INTO api.metadata_ext (vessel_id, image_b64)
|
||||
VALUES (current_setting('vessel.id', false), 'iVBORw0KGgoAAAANSUhEUgAAAMgAAAAyCAIAAACWMwO2AAABNklEQVR4nO3bwY6CMBiF0XYy7//KzIKk6VBjiMMNk59zVljRIH6WsrBv29bgal93HwA1CYsIYREhLCKERYSwiBAWEcIiQlhECIsIYREhLCKERYSwiBAWEcIiQlhECIsIYREhLCK+7z6A/6j33lq75G8m')
|
||||
ON CONFLICT (vessel_id) DO UPDATE
|
||||
SET image_b64 = EXCLUDED.image_b64;
|
||||
|
||||
-- Ensure make_model on metadata_ext table is updated
|
||||
\echo 'api.metadata_ext get make_model'
|
||||
SELECT make_model FROM api.metadata_ext; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- Ensure polar_updated_at on metadata_ext table is updated by trigger
|
||||
\echo 'api.metadata_ext get polar_updated_at'
|
||||
SELECT polar,polar_updated_at IS NOT NULL AS polar_updated_at_not_null FROM api.metadata_ext; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- Ensure image_updated_at on metadata_ext table is updated by trigger
|
||||
\echo 'api.metadata_ext get image_updated_at'
|
||||
SELECT image_b64 IS NULL AS image_b64_is_null,image IS NOT NULL AS image_not_null,image_updated_at IS NOT NULL AS image_updated_at_not_null FROM api.metadata_ext; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- vessel_role
|
||||
SET ROLE vessel_role;
|
||||
|
||||
\echo 'api.metadata get configuration with new value as vessel'
|
||||
select configuration->'depthKey' AS depthKey, configuration->'update_at' IS NOT NULL AS update_at_not_null from api.metadata; -- WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
\echo 'api.metadata get configuration base on update_at value as vessel'
|
||||
select configuration->'depthKey' AS depthKey, configuration->'update_at' IS NOT NULL AS update_at_not_null from api.metadata WHERE vessel_id = current_setting('vessel.id', false) AND configuration->>'update_at' <= to_char(NOW(), 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
|
||||
|
||||
-- api_anonymous
|
||||
SET ROLE api_anonymous;
|
||||
|
||||
\echo 'api_anonymous get vessel image'
|
||||
SELECT api.vessel_image(current_setting('vessel.id', false)) IS NOT NULL AS vessel_image_not_null;
|
83
tests/sql/metadata.sql.output
Normal file
83
tests/sql/metadata.sql.output
Normal file
@@ -0,0 +1,83 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
-[ RECORD 1 ]-
|
||||
count_eq_2 | 2
|
||||
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
SET
|
||||
api.metadata details
|
||||
-[ RECORD 1 ]------+----------------
|
||||
vessel_id_not_null | t
|
||||
name | kapla
|
||||
mmsi | 123456789
|
||||
length | 12
|
||||
beam | 10
|
||||
height | 24
|
||||
ship_type | 36
|
||||
plugin_version | 0.0.1
|
||||
signalk_version | signalk_version
|
||||
time | t
|
||||
active | t
|
||||
configuration |
|
||||
available_keys |
|
||||
|
||||
api.metadata get configuration
|
||||
-[ RECORD 1 ]-+-
|
||||
configuration |
|
||||
|
||||
api.metadata update configuration
|
||||
UPDATE 1
|
||||
api.metadata get configuration with new value
|
||||
-[ RECORD 1 ]------+------------------------------------
|
||||
depthkey | "environment.depth.belowTransducer"
|
||||
update_at_not_null | t
|
||||
|
||||
api.metadata get configuration base on update_at value
|
||||
-[ RECORD 1 ]------+------------------------------------
|
||||
depthkey | "environment.depth.belowTransducer"
|
||||
update_at_not_null | t
|
||||
|
||||
api.metadata_ext set make_model
|
||||
INSERT 0 1
|
||||
api.metadata_ext set polar
|
||||
INSERT 0 1
|
||||
api.metadata_ext set image/image_b64
|
||||
INSERT 0 1
|
||||
api.metadata_ext get make_model
|
||||
-[ RECORD 1 ]--------------
|
||||
make_model | my super yacht
|
||||
|
||||
api.metadata_ext get polar_updated_at
|
||||
-[ RECORD 1 ]-------------+-----------------------------------------------------
|
||||
polar | twa/tws;4;6;8;10;12;14;16;20;24\n0;0;0;0;0;0;0;0;0;0
|
||||
polar_updated_at_not_null | t
|
||||
|
||||
api.metadata_ext get image_updated_at
|
||||
-[ RECORD 1 ]-------------+--
|
||||
image_b64_is_null | f
|
||||
image_not_null | t
|
||||
image_updated_at_not_null | t
|
||||
|
||||
SET
|
||||
api.metadata get configuration with new value as vessel
|
||||
-[ RECORD 1 ]------+------------------------------------
|
||||
depthkey | "environment.depth.belowTransducer"
|
||||
update_at_not_null | t
|
||||
|
||||
api.metadata get configuration base on update_at value as vessel
|
||||
-[ RECORD 1 ]------+------------------------------------
|
||||
depthkey | "environment.depth.belowTransducer"
|
||||
update_at_not_null | t
|
||||
|
||||
SET
|
||||
api_anonymous get vessel image
|
||||
-[ RECORD 1 ]---------+--
|
||||
vessel_image_not_null | t
|
||||
|
96
tests/sql/mobilitydb.sql
Normal file
96
tests/sql/mobilitydb.sql
Normal file
@@ -0,0 +1,96 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
-- Assign vessel_id var
|
||||
SELECT v.vessel_id as "vessel_id_kapla" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
SELECT v.vessel_id as "vessel_id_aava" FROM auth.vessels v WHERE v.owner_email = 'demo+aava@openplotter.cloud' \gset
|
||||
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
-- Switch user as aava
|
||||
SELECT set_config('vessel.id', :'vessel_id_aava', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- Update notes
|
||||
\echo 'Add a note for an entry from a trip'
|
||||
-- Get original value, should be empty
|
||||
SELECT numInstants(trip), valueAtTimestamp(trip_notes,timestampN(trip,13)) from api.logbook where id = 3;
|
||||
-- Create the string
|
||||
SELECT concat('["fishing"@', timestampN(trip,13),',""@',timestampN(trip,14),']') as to_be_update FROM api.logbook where id = 3 \gset
|
||||
--\echo :to_be_update
|
||||
-- Update the notes
|
||||
SELECT api.update_trip_notes_fn(3, :'to_be_update');
|
||||
-- Compare with previous value, should include "fishing"
|
||||
SELECT valueAtTimestamp(trip_notes,timestampN(trip,13)) from api.logbook where id = 3;
|
||||
|
||||
-- Delete notes
|
||||
\echo 'Delete an entry from a trip'
|
||||
-- Get original value, should be 45
|
||||
SELECT numInstants(trip), jsonb_array_length(api.export_logbook_geojson_point_trip_fn(id)->'features') from api.logbook where id = 3;
|
||||
-- Extract the timestamps of the invalid coords
|
||||
--SELECT timestampN(trip,14) as "to_be_delete" FROM api.logbook where id = 3 \gset
|
||||
SELECT concat('[', timestampN(trip,14),',',timestampN(trip,15),')') as to_be_delete FROM api.logbook where id = 3 \gset
|
||||
--\echo :to_be_delete
|
||||
-- Delete the entry for all trip sequence
|
||||
SELECT api.delete_trip_entry_fn(3, :'to_be_delete');
|
||||
-- Compare with previous value, should be 44
|
||||
SELECT numInstants(trip), jsonb_array_length(api.export_logbook_geojson_point_trip_fn(id)->'features') from api.logbook where id = 3;
|
||||
|
||||
-- Export PostGIS geography from a trip
|
||||
\echo 'Export PostGIS geography from trajectory'
|
||||
--SELECT ST_IsValid(trajectory(trip)::geometry) IS TRUE FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
SELECT trajectory(trip)::geometry FROM api.logbook WHERE id = 3;
|
||||
|
||||
-- Export GeoJSON from a trip
|
||||
\echo 'Export GeoJSON with properties from a trip'
|
||||
SELECT jsonb_array_length(api.export_logbook_geojson_point_trip_fn(3)->'features');
|
||||
|
||||
-- Export GPX from a trip
|
||||
\echo 'Export GPX from a trip'
|
||||
SELECT api.export_logbook_gpx_trip_fn(3) IS NOT NULL;
|
||||
|
||||
-- Export KML from a trip
|
||||
\echo 'Export KML from a trip'
|
||||
SELECT api.export_logbook_kml_trip_fn(3) IS NOT NULL;
|
||||
|
||||
-- Switch user as kapla
|
||||
SELECT set_config('vessel.id', :'vessel_id_kapla', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- Export timelapse as Geometry LineString from a trip
|
||||
\echo 'Export timelapse as Geometry LineString from a trip'
|
||||
--SELECT api.export_logbooks_geojson_linestring_trips_fn(1,2) FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
-- Test geometry_type and num_properties
|
||||
-- propoerties include endtimestamp and starttimestamp
|
||||
WITH geojson_output AS (
|
||||
SELECT api.export_logbooks_geojson_linestring_trips_fn(1, 2) AS geojson
|
||||
FROM api.logbook
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
)
|
||||
SELECT
|
||||
--geojson
|
||||
geojson->'features'->0->'geometry'->>'type' AS geometry_type,
|
||||
--jsonb_array_length(jsonb_object_keys(geojson->'features'->0->'properties')::JSONB),
|
||||
--jsonb_array_length(jsonb_object_keys(geojson->'features')) AS num_geometry,
|
||||
(SELECT COUNT(*) FROM jsonb_object_keys(geojson->'features'->0->'properties')) AS num_properties
|
||||
FROM geojson_output;
|
||||
|
||||
-- Export timelapse as Geometry Point from a trip
|
||||
\echo 'Export timelapse as Geometry Point from a trip'
|
||||
SELECT api.export_logbooks_geojson_point_trips_fn(1,2) IS NOT NULL FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- Export GPX from trips
|
||||
\echo 'Export GPX from trips'
|
||||
SELECT api.export_logbooks_gpx_trips_fn(1,2) IS NOT NULL FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- Export KML from trips
|
||||
\echo 'Export KML from trips'
|
||||
SELECT api.export_logbooks_kml_trips_fn(1,2) IS NOT NULL FROM api.logbook WHERE vessel_id = current_setting('vessel.id', false);
|
70
tests/sql/mobilitydb.sql.output
Normal file
70
tests/sql/mobilitydb.sql.output
Normal file
@@ -0,0 +1,70 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
SET
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
Add a note for an entry from a trip
|
||||
-[ RECORD 1 ]----+---
|
||||
numinstants | 45
|
||||
valueattimestamp |
|
||||
|
||||
-[ RECORD 1 ]--------+-
|
||||
update_trip_notes_fn |
|
||||
|
||||
-[ RECORD 1 ]----+--------
|
||||
valueattimestamp | fishing
|
||||
|
||||
Delete an entry from a trip
|
||||
-[ RECORD 1 ]------+---
|
||||
numinstants | 45
|
||||
jsonb_array_length | 45
|
||||
|
||||
-[ RECORD 1 ]--------+-
|
||||
delete_trip_entry_fn |
|
||||
|
||||
-[ RECORD 1 ]------+---
|
||||
numinstants | 44
|
||||
jsonb_array_length | 44
|
||||
|
||||
Export PostGIS geography from trajectory
|
||||
-[ RECORD 1 ]----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
trajectory | 0102000020E61000002B0000001CACA4BA25BC3840217B18B556DC4D40569FABADD8BB384012EA33B10ADC4D408A65E9F989BB384018A023A8D0DB4D40B174F4AE30BB38409951E2299ADB4D407C348B06DFBA3840443A973D64DB4D40FE4465C39ABA38409313927131DB4D402C9CA4F963BA384029E6C52EF6DA4D4010559D7A49BA384019E25817B7DA4D401BF0F96184BC3840F868160DBEDC4D4095B7239C16BC38401DDB7C6D47DC4D40559FABADD8BB384012EA33B10ADC4D408A65E9F989BB384018A023A8D0DB4D40B274F4AE30BB38409951E2299ADB4D407C348B06DFBA3840443A973D64DB4D40FE4465C39ABA38409313927131DB4D402C9CA4F963BA384029E6C52EF6DA4D4010559D7A49BA384019E25817B7DA4D401BF0F96184BC3840F868160DBEDC4D4098B1B2C755BC38404D52F41B81DC4D4095B7239C16BC38401DDB7C6D47DC4D407448C55AD7BB38406CABFEAD09DC4D40D8367B5688BB38400D54C6BFCFDB4D40B274F4AE30BB38409951E2299ADB4D401256BEC2DDBA3840EB22E06B63DB4D40FE4465C39ABA38409313927131DB4D402C9CA4F963BA384029E6C52EF6DA4D40407D152A49BA3840CDA66D0DB6DA4D40BDBFE6C182BC3840F9269710BDDC4D4098B1B2C755BC38404D52F41B81DC4D4024308CAA15BC3840B85DC36746DC4D407448C55AD7BB38406CABFEAD09DC4D40D8367B5688BB38400D54C6BFCFDB4D408224A24E2FBB384070BEC74F99DB4D4028B0A5EC99BA3840F90C4D7E30DB4D402C4080B163BA38402FA93528F5DA4D40407D152A49BA3840CDA66D0DB6DA4D40BDBFE6C182BC3840F9269710BDDC4D4099B1B2C755BC38404D52F41B81DC4D4024308CAA15BC3840B85DC36746DC4D407448C55AD7BB38406CABFEAD09DC4D40D8367B5688BB38400D54C6BFCFDB4D408224A24E2FBB384070BEC74F99DB4D401156BEC2DDBA3840EB22E06B63DB4D40
|
||||
|
||||
Export GeoJSON with properties from a trip
|
||||
-[ RECORD 1 ]------+---
|
||||
jsonb_array_length | 44
|
||||
|
||||
Export GPX from a trip
|
||||
-[ RECORD 1 ]
|
||||
?column? | t
|
||||
|
||||
Export KML from a trip
|
||||
-[ RECORD 1 ]
|
||||
?column? | t
|
||||
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
Export timelapse as Geometry LineString from a trip
|
||||
-[ RECORD 1 ]--+-----------
|
||||
geometry_type | LineString
|
||||
num_properties | 35
|
||||
|
||||
Export timelapse as Geometry Point from a trip
|
||||
-[ RECORD 1 ]
|
||||
?column? | t
|
||||
|
||||
Export GPX from trips
|
||||
-[ RECORD 1 ]
|
||||
?column? | t
|
||||
|
||||
Export KML from trips
|
||||
-[ RECORD 1 ]
|
||||
?column? | t
|
||||
|
@@ -22,15 +22,15 @@ count | 21
|
||||
|
||||
Test monitoring_view3 for user
|
||||
-[ RECORD 1 ]
|
||||
count | 3736
|
||||
count | 3775
|
||||
|
||||
Test monitoring_voltage for user
|
||||
-[ RECORD 1 ]
|
||||
count | 47
|
||||
count | 48
|
||||
|
||||
Test monitoring_temperatures for user
|
||||
-[ RECORD 1 ]
|
||||
count | 120
|
||||
count | 121
|
||||
|
||||
Test monitoring_humidity for user
|
||||
-[ RECORD 1 ]
|
||||
|
53
tests/sql/qgis.sql
Normal file
53
tests/sql/qgis.sql
Normal file
@@ -0,0 +1,53 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
-- Assign vessel_id var
|
||||
SELECT v.vessel_id as "vessel_id_kapla" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
SELECT v.vessel_id as "vessel_id_aava" FROM auth.vessels v WHERE v.owner_email = 'demo+aava@openplotter.cloud' \gset
|
||||
|
||||
-- qgis
|
||||
SET ROLE qgis_role;
|
||||
|
||||
-- Get BBOX Extent from SQL query for a log:
|
||||
-- "^/log_(\w+)_(\d+).png$"
|
||||
-- "^/log_(\w+)_(\d+)_sat.png$
|
||||
-- require a log_id, optional image width and height, scale_out
|
||||
\echo 'Get BBOX Extent from SQL query for a log: "^/log_(\w+)_(\d+).png$"'
|
||||
SELECT public.qgis_bbox_py_fn(null, 1);
|
||||
SELECT public.qgis_bbox_py_fn(null, 3);
|
||||
-- "^/log_(\w+)_(\d+)_line.png$"
|
||||
\echo 'Get BBOX Extent from SQL query for a log as line: "^/log_(\w+)_(\d+)_line.png$"'
|
||||
SELECT public.qgis_bbox_py_fn(null, 1, 333, 216, False);
|
||||
SELECT public.qgis_bbox_py_fn(null, 3, 333, 216, False);
|
||||
-- Get BBOX Extent from SQL query for all logs by vessel_id
|
||||
-- "^/logs_(\w+)_(\d+).png$"
|
||||
-- require a vessel_id, optional image width and height, scale_out
|
||||
\echo 'Get BBOX Extent from SQL query for all logs by vessel_id: "^/logs_(\w+)_(\d+).png$"'
|
||||
SELECT public.qgis_bbox_py_fn(:'vessel_id_kapla'::TEXT);
|
||||
SELECT public.qgis_bbox_py_fn(:'vessel_id_aava'::TEXT);
|
||||
-- Get BBOX Extent from SQL query for all logs by vessel_id
|
||||
-- "^/logs_(\w+)_(\d+).png$"
|
||||
-- require a vessel_id, optional image width and height, scale_out
|
||||
\echo 'Get BBOX Extent from SQL query for a trip by vessel_id: "^/trip_(\w+)_(\d+)_(\d+).png$"'
|
||||
SELECT public.qgis_bbox_py_fn(:'vessel_id_kapla'::TEXT, 1, 2);
|
||||
SELECT public.qgis_bbox_py_fn(:'vessel_id_aava'::TEXT, 3, 4);
|
||||
-- require a vessel_id, optional image width and height, scale_out as in Apache
|
||||
\echo 'Get BBOX Extent from SQL query for a trip by vessel_id: "^/trip_((\w+)_(\d+)_(\d+)).png$"'
|
||||
SELECT public.qgis_bbox_trip_py_fn(CONCAT(:'vessel_id_kapla'::TEXT, '_', 1, '_',2));
|
||||
SELECT public.qgis_bbox_trip_py_fn(CONCAT(:'vessel_id_aava'::TEXT, '_', 3, '_', 4));
|
||||
|
||||
--SELECT set_config('vessel.id', :'vessel_id_kapla', false) IS NOT NULL as vessel_id;
|
||||
-- SQL request from QGIS to fetch the necessary data base on vessel_id
|
||||
--SELECT id, vessel_id, name as logname, ST_Transform(track_geom, 3857) as track_geom, ROUND(distance, 2) as distance, ROUND(EXTRACT(epoch FROM duration)/3600,2) as duration,_from_time,_to_time FROM api.logbook where track_geom is not null and _to_time is not null ORDER BY _from_time DESC;
|
||||
--SELECT count(*) FROM api.logbook WHERE track_geom IS NOT NULL AND _to_time iIS NOT NULL;
|
||||
SELECT count(*) FROM api.logbook WHERE trip IS NOT NULL AND _to_time IS NOT NULL;
|
46
tests/sql/qgis.sql.output
Normal file
46
tests/sql/qgis.sql.output
Normal file
@@ -0,0 +1,46 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
SET
|
||||
Get BBOX Extent from SQL query for a log: "^/log_(w+)_(d+).png$"
|
||||
-[ RECORD 1 ]---+------------------------------------------------------
|
||||
qgis_bbox_py_fn | 2556155.0636042403,8365608,2660086.9363957597,8420076
|
||||
|
||||
-[ RECORD 1 ]---+----------------------------------------------------
|
||||
qgis_bbox_py_fn | 2749398.035335689,8334944,2756917.964664311,8338885
|
||||
|
||||
Get BBOX Extent from SQL query for a log as line: "^/log_(w+)_(d+)_line.png$"
|
||||
-[ RECORD 1 ]---+-------------------------------------------------------------------------
|
||||
qgis_bbox_py_fn | 2570800.6277114027,8368634.173700442,2645441.4677270483,8417049.85371059
|
||||
|
||||
-[ RECORD 1 ]---+--------------------------------------------------------------------------
|
||||
qgis_bbox_py_fn | 2750457.4431765806,8335162.530580978,2755858.0759322727,8338665.643719805
|
||||
|
||||
Get BBOX Extent from SQL query for all logs by vessel_id: "^/logs_(w+)_(d+).png$"
|
||||
-[ RECORD 1 ]---+------------------------------------------------------
|
||||
qgis_bbox_py_fn | 2556155.0636042403,8365608,2660086.9363957597,8420076
|
||||
|
||||
-[ RECORD 1 ]---+------------------------------------------------------
|
||||
qgis_bbox_py_fn | -2006284.4558303887,4864146,5013530.455830389,8543049
|
||||
|
||||
Get BBOX Extent from SQL query for a trip by vessel_id: "^/trip_(w+)_(d+)_(d+).png$"
|
||||
-[ RECORD 1 ]---+-------------------------------------
|
||||
qgis_bbox_py_fn | 2595383,4787988.0,2620859,11997696.0
|
||||
|
||||
-[ RECORD 1 ]---+---------------------------------------
|
||||
qgis_bbox_py_fn | 97351,-192283890.5,2909895,205691085.5
|
||||
|
||||
Get BBOX Extent from SQL query for a trip by vessel_id: "^/trip_((w+)_(d+)_(d+)).png$"
|
||||
-[ RECORD 1 ]--------+------------------------------------------------------
|
||||
qgis_bbox_trip_py_fn | 2556155.0636042403,8365608,2660086.9363957597,8420076
|
||||
|
||||
-[ RECORD 1 ]--------+------------------------------------------------------
|
||||
qgis_bbox_trip_py_fn | -2006284.4558303887,4864146,5013530.455830389,8543049
|
||||
|
||||
-[ RECORD 1 ]
|
||||
count | 3
|
||||
|
47
tests/sql/stays_ext.sql
Normal file
47
tests/sql/stays_ext.sql
Normal file
@@ -0,0 +1,47 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Listing
|
||||
--
|
||||
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
-- output display format
|
||||
\x on
|
||||
|
||||
SELECT count(*) as count_eq_0 FROM api.stays_ext m;
|
||||
|
||||
SELECT v.vessel_id as "vessel_id" FROM auth.vessels v WHERE v.owner_email = 'demo+kapla@openplotter.cloud' \gset
|
||||
--\echo :"vessel_id"
|
||||
SELECT set_config('vessel.id', :'vessel_id', false) IS NOT NULL as vessel_id;
|
||||
|
||||
-- user_role
|
||||
SET ROLE user_role;
|
||||
|
||||
\echo 'api.stays details'
|
||||
SELECT vessel_id IS NOT NULL AS vessel_id_not_null, m.name IS NOT NULL AS name_not_null FROM api.stays AS m WHERE active IS False ORDER BY m.name ASC;
|
||||
|
||||
-- Upsert image on stays_ext table
|
||||
\echo 'api.stays_ext set image/image_b64'
|
||||
INSERT INTO api.stays_ext (vessel_id, stay_id, image_b64)
|
||||
VALUES (current_setting('vessel.id', false), 1, 'iVBORw0KGgoAAAANSUhEUgAAAMgAAAAyCAIAAACWMwO2AAABNklEQVR4nO3bwY6CMBiF0XYy7//KzIKk6VBjiMMNk59zVljRIH6WsrBv29bgal93HwA1CYsIYREhLCKERYSwiBAWEcIiQlhECIsIYREhLCKERYSwiBAWEcIiQlhECIsIYREhLCK+7z6A/6j33lq75G8m')
|
||||
ON CONFLICT (stay_id) DO UPDATE
|
||||
SET image_b64 = EXCLUDED.image_b64;
|
||||
|
||||
-- Ensure image_updated_at on metadata_ext table is updated by trigger
|
||||
\echo 'api.stays_ext get image_updated_at'
|
||||
SELECT image_b64 IS NULL AS image_b64_is_null,image IS NOT NULL AS image_not_null,image_updated_at IS NOT NULL AS image_updated_at_not_null FROM api.metadata_ext; --WHERE vessel_id = current_setting('vessel.id', false);
|
||||
|
||||
-- vessel_role
|
||||
SET ROLE vessel_role;
|
||||
|
||||
\echo 'api.stays_ext'
|
||||
SELECT vessel_id IS NOT NULL AS vessel_id_not_null, stay_id FROM api.stays_ext;
|
||||
|
||||
-- api_anonymous
|
||||
SET ROLE api_anonymous;
|
||||
|
||||
\echo 'api_anonymous get stays image'
|
||||
SELECT api.stays_image(current_setting('vessel.id', false), 1) IS NOT NULL AS stays_image_not_null;
|
37
tests/sql/stays_ext.sql.output
Normal file
37
tests/sql/stays_ext.sql.output
Normal file
@@ -0,0 +1,37 @@
|
||||
current_database
|
||||
------------------
|
||||
signalk
|
||||
(1 row)
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
-[ RECORD 1 ]-
|
||||
count_eq_0 | 0
|
||||
|
||||
-[ RECORD 1 ]
|
||||
vessel_id | t
|
||||
|
||||
SET
|
||||
api.stays details
|
||||
-[ RECORD 1 ]------+--
|
||||
vessel_id_not_null | t
|
||||
name_not_null | t
|
||||
-[ RECORD 2 ]------+--
|
||||
vessel_id_not_null | t
|
||||
name_not_null | t
|
||||
|
||||
api.stays_ext set image/image_b64
|
||||
INSERT 0 1
|
||||
api.stays_ext get image_updated_at
|
||||
-[ RECORD 1 ]-------------+--
|
||||
image_b64_is_null | f
|
||||
image_not_null | t
|
||||
image_updated_at_not_null | t
|
||||
|
||||
SET
|
||||
api.stays_ext
|
||||
SET
|
||||
api_anonymous get stays image
|
||||
-[ RECORD 1 ]--------+--
|
||||
stays_image_not_null | t
|
||||
|
@@ -5,11 +5,11 @@
|
||||
|
||||
You are now connected to database "signalk" as user "username".
|
||||
Expanded display is on.
|
||||
-[ RECORD 1 ]--+-------------------------------
|
||||
server_version | 16.2 (Debian 16.2-1.pgdg110+2)
|
||||
-[ RECORD 1 ]--+--------------------------------
|
||||
server_version | 16.10 (Debian 16.10-1.pgdg12+1)
|
||||
|
||||
-[ RECORD 1 ]--------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
postgis_full_version | POSTGIS="3.4.2 c19ce56" [EXTENSION] PGSQL="160" GEOS="3.9.0-CAPI-1.16.2" PROJ="7.2.1 NETWORK_ENABLED=OFF URL_ENDPOINT=https://cdn.proj.org USER_WRITABLE_DIRECTORY=/var/lib/postgresql/.local/share/proj DATABASE_PATH=/usr/share/proj/proj.db" LIBXML="2.9.10" LIBJSON="0.15" LIBPROTOBUF="1.3.3" WAGYU="0.5.0 (Internal)"
|
||||
-[ RECORD 1 ]--------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
postgis_full_version | POSTGIS="3.5.3 aab5f55" [EXTENSION] PGSQL="160" GEOS="3.11.1-CAPI-1.17.1" PROJ="9.1.1 NETWORK_ENABLED=OFF URL_ENDPOINT=https://cdn.proj.org USER_WRITABLE_DIRECTORY=/var/lib/postgresql/.local/share/proj DATABASE_PATH=/usr/share/proj/proj.db" (compiled against PROJ 9.1.1) LIBXML="2.9.14" LIBJSON="0.16" LIBPROTOBUF="1.4.1" WAGYU="0.5.0 (Internal)"
|
||||
|
||||
-[ RECORD 1 ]--------------------------------------------------------------------------------------
|
||||
Name | citext
|
||||
@@ -22,41 +22,51 @@ Version | 1.0
|
||||
Schema | public
|
||||
Description | transform between jsonb and plpython3u
|
||||
-[ RECORD 3 ]--------------------------------------------------------------------------------------
|
||||
Name | mobilitydb
|
||||
Version | 1.2.0
|
||||
Schema | public
|
||||
Description | MobilityDB geospatial trajectory data management & analysis platform
|
||||
-[ RECORD 4 ]--------------------------------------------------------------------------------------
|
||||
Name | moddatetime
|
||||
Version | 1.0
|
||||
Schema | public
|
||||
Description | functions for tracking last modification time
|
||||
-[ RECORD 4 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 5 ]--------------------------------------------------------------------------------------
|
||||
Name | pg_stat_statements
|
||||
Version | 1.10
|
||||
Schema | public
|
||||
Description | track planning and execution statistics of all SQL statements executed
|
||||
-[ RECORD 5 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 6 ]--------------------------------------------------------------------------------------
|
||||
Name | pgcrypto
|
||||
Version | 1.3
|
||||
Schema | public
|
||||
Description | cryptographic functions
|
||||
-[ RECORD 6 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 7 ]--------------------------------------------------------------------------------------
|
||||
Name | plpgsql
|
||||
Version | 1.0
|
||||
Schema | pg_catalog
|
||||
Description | PL/pgSQL procedural language
|
||||
-[ RECORD 7 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 8 ]--------------------------------------------------------------------------------------
|
||||
Name | plpython3u
|
||||
Version | 1.0
|
||||
Schema | pg_catalog
|
||||
Description | PL/Python3U untrusted procedural language
|
||||
-[ RECORD 8 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 9 ]--------------------------------------------------------------------------------------
|
||||
Name | postgis
|
||||
Version | 3.4.2
|
||||
Version | 3.5.3
|
||||
Schema | public
|
||||
Description | PostGIS geometry and geography spatial types and functions
|
||||
-[ RECORD 9 ]--------------------------------------------------------------------------------------
|
||||
-[ RECORD 10 ]-------------------------------------------------------------------------------------
|
||||
Name | timescaledb
|
||||
Version | 2.14.2
|
||||
Version | 2.21.3
|
||||
Schema | public
|
||||
Description | Enables scalable inserts and complex queries for time-series data (Community Edition)
|
||||
-[ RECORD 10 ]-------------------------------------------------------------------------------------
|
||||
-[ RECORD 11 ]-------------------------------------------------------------------------------------
|
||||
Name | timescaledb_toolkit
|
||||
Version | 1.21.0
|
||||
Schema | public
|
||||
Description | Library of analytical hyperfunctions, time-series pipelining, and other SQL utilities
|
||||
-[ RECORD 12 ]-------------------------------------------------------------------------------------
|
||||
Name | uuid-ossp
|
||||
Version | 1.1
|
||||
Schema | public
|
||||
@@ -96,24 +106,24 @@ laninline | 0
|
||||
lanvalidator | 2248
|
||||
lanacl |
|
||||
-[ RECORD 4 ]-+-----------
|
||||
oid | 13545
|
||||
oid | 13568
|
||||
lanname | plpgsql
|
||||
lanowner | 10
|
||||
lanispl | t
|
||||
lanpltrusted | t
|
||||
lanplcallfoid | 13542
|
||||
laninline | 13543
|
||||
lanvalidator | 13544
|
||||
lanplcallfoid | 13565
|
||||
laninline | 13566
|
||||
lanvalidator | 13567
|
||||
lanacl |
|
||||
-[ RECORD 5 ]-+-----------
|
||||
oid | 18175
|
||||
oid | 18251
|
||||
lanname | plpython3u
|
||||
lanowner | 10
|
||||
lanispl | t
|
||||
lanpltrusted | t
|
||||
lanplcallfoid | 18172
|
||||
laninline | 18173
|
||||
lanvalidator | 18174
|
||||
lanplcallfoid | 18248
|
||||
laninline | 18249
|
||||
lanvalidator | 18250
|
||||
lanacl |
|
||||
|
||||
-[ RECORD 1 ]+-----------
|
||||
@@ -180,42 +190,51 @@ Type | table
|
||||
Owner | username
|
||||
-[ RECORD 8 ]---------------------------------
|
||||
Schema | public
|
||||
Name | ne_10m_geography_marine_polys
|
||||
Name | mobilitydb_opcache
|
||||
Type | table
|
||||
Owner | username
|
||||
-[ RECORD 9 ]---------------------------------
|
||||
Schema | public
|
||||
Name | ne_10m_geography_marine_polys
|
||||
Type | table
|
||||
Owner | username
|
||||
-[ RECORD 10 ]--------------------------------
|
||||
Schema | public
|
||||
Name | ne_10m_geography_marine_polys_gid_seq
|
||||
Type | sequence
|
||||
Owner | username
|
||||
-[ RECORD 10 ]--------------------------------
|
||||
-[ RECORD 11 ]--------------------------------
|
||||
Schema | public
|
||||
Name | process_queue
|
||||
Type | table
|
||||
Owner | username
|
||||
-[ RECORD 11 ]--------------------------------
|
||||
-[ RECORD 12 ]--------------------------------
|
||||
Schema | public
|
||||
Name | process_queue_id_seq
|
||||
Type | sequence
|
||||
Owner | username
|
||||
-[ RECORD 12 ]--------------------------------
|
||||
-[ RECORD 13 ]--------------------------------
|
||||
Schema | public
|
||||
Name | spatial_ref_sys
|
||||
Type | table
|
||||
Owner | username
|
||||
|
||||
-[ RECORD 1 ]--------
|
||||
-[ RECORD 1 ]------------
|
||||
schema_api | logbook
|
||||
-[ RECORD 2 ]--------
|
||||
-[ RECORD 2 ]------------
|
||||
schema_api | metadata
|
||||
-[ RECORD 3 ]--------
|
||||
-[ RECORD 3 ]------------
|
||||
schema_api | metadata_ext
|
||||
-[ RECORD 4 ]------------
|
||||
schema_api | metrics
|
||||
-[ RECORD 4 ]--------
|
||||
-[ RECORD 5 ]------------
|
||||
schema_api | moorages
|
||||
-[ RECORD 5 ]--------
|
||||
-[ RECORD 6 ]------------
|
||||
schema_api | stays
|
||||
-[ RECORD 6 ]--------
|
||||
-[ RECORD 7 ]------------
|
||||
schema_api | stays_at
|
||||
-[ RECORD 8 ]------------
|
||||
schema_api | stays_ext
|
||||
|
||||
-[ RECORD 1 ]-+------------------------------
|
||||
schema_public | aistypes
|
||||
@@ -232,10 +251,12 @@ schema_public | iso3166
|
||||
-[ RECORD 7 ]-+------------------------------
|
||||
schema_public | mid
|
||||
-[ RECORD 8 ]-+------------------------------
|
||||
schema_public | ne_10m_geography_marine_polys
|
||||
schema_public | mobilitydb_opcache
|
||||
-[ RECORD 9 ]-+------------------------------
|
||||
schema_public | process_queue
|
||||
schema_public | ne_10m_geography_marine_polys
|
||||
-[ RECORD 10 ]+------------------------------
|
||||
schema_public | process_queue
|
||||
-[ RECORD 11 ]+------------------------------
|
||||
schema_public | spatial_ref_sys
|
||||
|
||||
-[ RECORD 1 ]---------
|
||||
@@ -262,31 +283,13 @@ with_check | true
|
||||
-[ RECORD 2 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 3 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, true))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 4 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | api_scheduler_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 5 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 3 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | grafana_role
|
||||
@@ -295,7 +298,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 6 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 4 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | grafana_proxy_role
|
||||
@@ -304,52 +307,34 @@ roles | {grafana_auth}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 5 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | admin_all
|
||||
permissive | PERMISSIVE
|
||||
roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 6 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | vessels
|
||||
policyname | grafana_proxy_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {grafana_auth}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 7 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | admin_all
|
||||
permissive | PERMISSIVE
|
||||
roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 8 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 9 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | vessels
|
||||
policyname | grafana_proxy_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {grafana_auth}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 10 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, true))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 11 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_scheduler_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 12 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 8 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | grafana_role
|
||||
@@ -358,7 +343,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 13 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 9 ]------------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_anonymous_role
|
||||
@@ -367,7 +352,7 @@ roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 14 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 10 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | admin_all
|
||||
@@ -376,7 +361,7 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 15 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 11 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | api_vessel_role
|
||||
@@ -385,7 +370,7 @@ roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 16 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 12 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | accounts
|
||||
policyname | admin_all
|
||||
@@ -394,7 +379,7 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 17 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 13 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | api_user_role
|
||||
@@ -403,7 +388,7 @@ roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, true))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 18 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 14 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | api_scheduler_role
|
||||
@@ -412,7 +397,7 @@ roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 19 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 15 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | grafana_role
|
||||
@@ -421,7 +406,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 20 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 16 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | api_anonymous_role
|
||||
@@ -430,7 +415,7 @@ roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 21 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 17 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | admin_all
|
||||
@@ -439,7 +424,7 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 22 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 18 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | api_vessel_role
|
||||
@@ -448,7 +433,25 @@ roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 23 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 19 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | logbook
|
||||
policyname | logbook_qgis_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {qgis_role}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 20 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | public
|
||||
tablename | process_queue
|
||||
policyname | public_maplapse_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {maplapse_role}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 21 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | api_user_role
|
||||
@@ -457,7 +460,7 @@ roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, true))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 24 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 22 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | api_scheduler_role
|
||||
@@ -466,7 +469,7 @@ roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 25 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 23 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | grafana_role
|
||||
@@ -475,7 +478,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 26 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 24 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays
|
||||
policyname | api_anonymous_role
|
||||
@@ -484,7 +487,7 @@ roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 27 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 25 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | admin_all
|
||||
@@ -493,25 +496,34 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 26 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 27 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays_ext
|
||||
policyname | admin_all
|
||||
permissive | PERMISSIVE
|
||||
roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 28 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | true
|
||||
-[ RECORD 29 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, true))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 30 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 29 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | api_scheduler_role
|
||||
@@ -520,7 +532,7 @@ roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 31 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 30 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | grafana_role
|
||||
@@ -529,7 +541,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 32 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 31 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | moorages
|
||||
policyname | api_anonymous_role
|
||||
@@ -538,7 +550,7 @@ roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | false
|
||||
-[ RECORD 33 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 32 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | vessels
|
||||
policyname | admin_all
|
||||
@@ -547,7 +559,7 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 34 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 33 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | vessels
|
||||
policyname | api_user_role
|
||||
@@ -556,7 +568,7 @@ roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | ((vessel_id = current_setting('vessel.id'::text, true)) AND ((owner_email)::text = current_setting('user.email'::text, true)))
|
||||
with_check | ((vessel_id = current_setting('vessel.id'::text, true)) AND ((owner_email)::text = current_setting('user.email'::text, true)))
|
||||
-[ RECORD 35 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 34 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | vessels
|
||||
policyname | grafana_role
|
||||
@@ -565,7 +577,7 @@ roles | {grafana}
|
||||
cmd | ALL
|
||||
qual | ((owner_email)::text = current_setting('user.email'::text, true))
|
||||
with_check | false
|
||||
-[ RECORD 36 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 35 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | accounts
|
||||
policyname | api_user_role
|
||||
@@ -574,7 +586,7 @@ roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | ((email)::text = current_setting('user.email'::text, true))
|
||||
with_check | ((email)::text = current_setting('user.email'::text, true))
|
||||
-[ RECORD 37 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 36 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | accounts
|
||||
policyname | api_scheduler_role
|
||||
@@ -583,7 +595,7 @@ roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | ((email)::text = current_setting('user.email'::text, true))
|
||||
with_check | ((email)::text = current_setting('user.email'::text, true))
|
||||
-[ RECORD 38 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 37 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | auth
|
||||
tablename | accounts
|
||||
policyname | grafana_proxy_role
|
||||
@@ -592,7 +604,7 @@ roles | {grafana_auth}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 39 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 38 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | public
|
||||
tablename | process_queue
|
||||
policyname | admin_all
|
||||
@@ -601,7 +613,7 @@ roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 40 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 39 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | public
|
||||
tablename | process_queue
|
||||
policyname | api_vessel_role
|
||||
@@ -610,7 +622,7 @@ roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | ((ref_id = current_setting('user.id'::text, true)) OR (ref_id = current_setting('vessel.id'::text, true)))
|
||||
with_check | true
|
||||
-[ RECORD 41 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 40 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | public
|
||||
tablename | process_queue
|
||||
policyname | api_user_role
|
||||
@@ -619,7 +631,7 @@ roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | ((ref_id = current_setting('user.id'::text, true)) OR (ref_id = current_setting('vessel.id'::text, true)))
|
||||
with_check | ((ref_id = current_setting('user.id'::text, true)) OR (ref_id = current_setting('vessel.id'::text, true)))
|
||||
-[ RECORD 42 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
-[ RECORD 41 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | public
|
||||
tablename | process_queue
|
||||
policyname | api_scheduler_role
|
||||
@@ -628,6 +640,105 @@ roles | {scheduler}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 42 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays_ext
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 43 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays_ext
|
||||
policyname | api_anonymous_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 44 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | stays_ext
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | false
|
||||
with_check | false
|
||||
-[ RECORD 45 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata_ext
|
||||
policyname | admin_all
|
||||
permissive | PERMISSIVE
|
||||
roles | {username}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | true
|
||||
-[ RECORD 46 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata_ext
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 47 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata_ext
|
||||
policyname | api_anonymous_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {api_anonymous}
|
||||
cmd | ALL
|
||||
qual | true
|
||||
with_check | false
|
||||
-[ RECORD 48 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata_ext
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | false
|
||||
with_check | false
|
||||
-[ RECORD 49 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 50 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | api_vessel_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {vessel_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 51 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metadata
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
-[ RECORD 52 ]-----------------------------------------------------------------------------------------------------------------------------
|
||||
schemaname | api
|
||||
tablename | metrics
|
||||
policyname | api_user_role
|
||||
permissive | PERMISSIVE
|
||||
roles | {user_role}
|
||||
cmd | ALL
|
||||
qual | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
with_check | (vessel_id = current_setting('vessel.id'::text, false))
|
||||
|
||||
Test nominatim reverse_geocode_py_fn
|
||||
-[ RECORD 1 ]---------+----------------------------------------
|
||||
@@ -635,22 +746,22 @@ reverse_geocode_py_fn | {"name": "Spain", "country_code": "es"}
|
||||
|
||||
Test geoip reverse_geoip_py_fn
|
||||
Test opverpass API overpass_py_fn
|
||||
-[ RECORD 1 ]--+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
overpass_py_fn | {"fee": "yes", "vhf": "09", "name": "Port Olímpic", "phone": "+34 933561016", "leisure": "marina", "website": "https://portolimpic.barcelona/", "wikidata": "Q171204", "wikipedia": "ca:Port Olímpic de Barcelona", "addr:street": "Moll de Xaloc", "power_supply": "yes", "seamark:type": "harbour", "addr:postcode": "08005", "internet_access": "wlan", "wikimedia_commons": "Category:Port Olímpic (Barcelona)", "sanitary_dump_station": "yes", "seamark:harbour:category": "marina"}
|
||||
-[ RECORD 1 ]--+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
overpass_py_fn | {"fee": "yes", "vhf": "09", "name": "Port Olímpic", "phone": "+34 933561016", "leisure": "marina", "website": "https://portolimpic.barcelona/", "wikidata": "Q171204", "panoramax": "b637d864-4a5f-4d56-a68e-6c81d2c70292", "wikipedia": "ca:Port Olímpic de Barcelona", "check_date": "2024-09-16", "addr:street": "Moll de Xaloc", "power_supply": "yes", "seamark:type": "harbour", "addr:postcode": "08005", "internet_access": "wlan", "wikimedia_commons": "Category:Port Olímpic (Barcelona)", "sanitary_dump_station": "yes", "seamark:harbour:category": "marina"}
|
||||
|
||||
-[ RECORD 1 ]--+----------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
overpass_py_fn | {"name": "Port de la Ginesta", "type": "multipolygon", "leisure": "marina", "name:ca": "Port de la Ginesta", "wikidata": "Q16621038", "wikipedia": "ca:Port Ginesta"}
|
||||
-[ RECORD 1 ]--+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
overpass_py_fn | {"name": "Port de la Ginesta", "type": "multipolygon", "leisure": "marina", "name:ca": "Port de la Ginesta", "wikidata": "Q16621038", "wikipedia": "ca:Port Ginesta", "check_date": "2024-08-23"}
|
||||
|
||||
-[ RECORD 1 ]--+----------------------------------------------
|
||||
overpass_py_fn | {"name": "Norra hamnen", "leisure": "marina"}
|
||||
-[ RECORD 1 ]--+---------------------------------------------------------------------------------------------------------------
|
||||
overpass_py_fn | {"name": "Norra hamnen", "leisure": "marina", "seamark:type": "harbour", "seamark:harbour:category": "marina"}
|
||||
|
||||
-[ RECORD 1 ]----------------------------------------------------------------------------------------------------------------------------------------------
|
||||
versions_fn | {"api_version" : "0.7.1", "sys_version" : "PostgreSQL 16.2", "timescaledb" : "2.14.2", "postgis" : "3.4.2", "postgrest" : "PostgREST 12.0.2"}
|
||||
-[ RECORD 1 ]-----------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
versions_fn | {"api_version" : "0.9.3", "sys_version" : "PostgreSQL 16.10", "mobilitydb" : "1.2.0", "timescaledb" : "2.21.3", "postgis" : "3.5.3", "postgrest" : "PostgREST 13.0.5"}
|
||||
|
||||
-[ RECORD 1 ]-----------------
|
||||
api_version | 0.7.1
|
||||
sys_version | PostgreSQL 16.2
|
||||
timescaledb | 2.14.2
|
||||
postgis | 3.4.2
|
||||
postgrest | PostgREST 12.0.2
|
||||
api_version | 0.9.3
|
||||
sys_version | PostgreSQL 16.10
|
||||
timescaledb | 2.21.3
|
||||
postgis | 3.5.3
|
||||
postgrest | PostgREST 13.0.5
|
||||
|
||||
|
123
tests/tests.sh
123
tests/tests.sh
@@ -1,4 +1,4 @@
|
||||
# PostgSail Unit test
|
||||
# PostgSail Unit test
|
||||
|
||||
if [[ -z "${PGSAIL_DB_URI}" ]]; then
|
||||
echo "PGSAIL_DB_URI is undefined"
|
||||
@@ -14,14 +14,6 @@ if [[ ! -x "/usr/bin/psql" ]]; then
|
||||
apt update && apt -y install postgresql-client
|
||||
fi
|
||||
|
||||
# go install
|
||||
if [[ ! -x "/usr/bin/go" || ! -x "/root/go/bin/mermerd" ]]; then
|
||||
#wget -q https://go.dev/dl/go1.21.4.linux-arm64.tar.gz && \
|
||||
#rm -rf /usr/local/go && tar -C /usr/local -xzf go1.21.4.linux-arm64.tar.gz && \
|
||||
apt update && apt -y install golang && \
|
||||
go install github.com/KarnerTh/mermerd@latest
|
||||
fi
|
||||
|
||||
# pnpm install
|
||||
if [[ ! -x "/usr/local/bin/pnpm" ]]; then
|
||||
npm install -g pnpm
|
||||
@@ -48,6 +40,19 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# metadata and vessel configuration unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/metadata.sql > output/metadata.sql.output
|
||||
diff sql/metadata.sql.output output/metadata.sql.output > /dev/null
|
||||
#diff -u sql/metadata.sql.output output/metadata.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo OK
|
||||
else
|
||||
echo SQL metadata.sql FAILED
|
||||
diff -u sql/metadata.sql.output output/metadata.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# https://www.postgresql.org/docs/current/app-psql.html
|
||||
# run cron jobs
|
||||
#psql -U ${POSTGRES_USER} -h 172.30.0.1 signalk < sql/cron_run_jobs.sql > output/cron_run_jobs.sql.output
|
||||
@@ -125,6 +130,19 @@ else
|
||||
exit
|
||||
fi
|
||||
|
||||
# Stays extended unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/stays_ext.sql > output/stays_ext.sql.output
|
||||
diff sql/stays_ext.sql.output output/stays_ext.sql.output > /dev/null
|
||||
#diff -u sql/stays_ext.sql.output output/stays_ext.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo OK
|
||||
else
|
||||
echo SQL stays_ext.sql FAILED
|
||||
diff -u sql/stays_ext.sql.output output/stays_ext.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Summary unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/summary.sql > output/summary.sql.output
|
||||
diff sql/summary.sql.output output/summary.sql.output > /dev/null
|
||||
@@ -168,6 +186,14 @@ else
|
||||
echo mocha index5.js
|
||||
exit 1
|
||||
fi
|
||||
# Anonymous API unit tests
|
||||
$mymocha index6.js --reporter ./node_modules/mochawesome --reporter-options reportDir=output/,reportFilename=report6.html
|
||||
if [ $? -eq 0 ]; then
|
||||
echo OK
|
||||
else
|
||||
echo mocha index6.js
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Anonymous SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/anonymous.sql > output/anonymous.sql.output
|
||||
@@ -182,6 +208,71 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# logbook SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/logbook.sql > output/logbook.sql.output
|
||||
diff sql/logbook.sql.output output/logbook.sql.output > /dev/null
|
||||
#diff -u sql/logbook.sql.output output/logbook.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo SQL logbook.sql OK
|
||||
else
|
||||
echo SQL logbook.sql FAILED
|
||||
diff -u sql/logbook.sql.output output/logbook.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Stats SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/stats.sql > output/stats.sql.output
|
||||
diff sql/stats.sql.output output/stats.sql.output > /dev/null
|
||||
#diff -u sql/stats.sql.output output/stats.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo SQL stats.sql OK
|
||||
else
|
||||
echo SQL stats.sql FAILED
|
||||
diff -u sql/stats.sql.output output/stats.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# MobilityDB SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/mobilitydb.sql > output/mobilitydb.sql.output
|
||||
diff sql/mobilitydb.sql.output output/mobilitydb.sql.output > /dev/null
|
||||
#diff -u sql/mobilitydb.sql.output output/mobilitydb.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo SQL mobilitydb.sql OK
|
||||
else
|
||||
echo SQL mobilitydb.sql FAILED
|
||||
diff -u sql/mobilitydb.sql.output output/mobilitydb.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# qgis SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/qgis.sql > output/qgis.sql.output
|
||||
diff sql/qgis.sql.output output/qgis.sql.output > /dev/null
|
||||
#diff -u sql/qgis.sql.output output/qgis.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo SQL qgis.sql OK
|
||||
else
|
||||
echo SQL qgis.sql FAILED
|
||||
diff -u sql/qgis.sql.output output/qgis.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# maplapse SQL unit tests
|
||||
psql ${PGSAIL_DB_URI} < sql/maplapse.sql > output/maplapse.sql.output
|
||||
diff sql/maplapse.sql.output output/maplapse.sql.output > /dev/null
|
||||
#diff -u sql/maplapse.sql.output output/maplapse.sql.output | wc -l
|
||||
#echo 0
|
||||
if [ $? -eq 0 ]; then
|
||||
echo SQL maplapse.sql OK
|
||||
else
|
||||
echo SQL maplapse.sql FAILED
|
||||
diff -u sql/maplapse.sql.output output/maplapse.sql.output
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Download and update openapi documentation
|
||||
wget ${PGSAIL_API_URI} -O openapi.json
|
||||
#echo 0
|
||||
@@ -192,17 +283,3 @@ else
|
||||
echo openapi.json FAILED
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Generate and update mermaid schema documentation
|
||||
/root/go/bin/mermerd --runConfig ../docs/ERD/mermerdConfig.yaml
|
||||
#echo $?
|
||||
echo 0 # not working in github-actions
|
||||
if [ $? -eq 0 ]; then
|
||||
cp postgsail.md ../docs/ERD/postgsail.md
|
||||
echo postgsail.md OK
|
||||
else
|
||||
echo postgsail.md FAILED
|
||||
exit 1
|
||||
fi
|
||||
|
||||
#npm i -D schemalint && npx schemalint
|
||||
|
Reference in New Issue
Block a user