mirror of
https://github.com/xbgmsharp/postgsail.git
synced 2025-09-17 19:27:49 +00:00
Compare commits
35 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
670aa2e43a | ||
![]() |
1a8790f2a0 | ||
![]() |
6d97bb1e32 | ||
![]() |
779cee21ec | ||
![]() |
e48192e609 | ||
![]() |
f2beead3a7 | ||
![]() |
5df3e1dbd3 | ||
![]() |
ed555c1f32 | ||
![]() |
02dd68f2d8 | ||
![]() |
d9329705ba | ||
![]() |
54af136682 | ||
![]() |
6f96a070b8 | ||
![]() |
e79086c4a2 | ||
![]() |
ed417a4c5d | ||
![]() |
546274ce29 | ||
![]() |
d5b6072273 | ||
![]() |
e8addd2e9c | ||
![]() |
f843a6a1f3 | ||
![]() |
478bbf5529 | ||
![]() |
c9523e2f6f | ||
![]() |
5fa85821de | ||
![]() |
7ccef80904 | ||
![]() |
04cc7de245 | ||
![]() |
a75ba105df | ||
![]() |
7eddeefa47 | ||
![]() |
314cdc71c7 | ||
![]() |
d508ac1662 | ||
![]() |
6f9956ee46 | ||
![]() |
0b854374ff | ||
![]() |
9b647c9a49 | ||
![]() |
800f0c83e3 | ||
![]() |
7424fbbe49 | ||
![]() |
e183530435 | ||
![]() |
4444f73919 | ||
![]() |
241c70fcb5 |
@@ -6,17 +6,23 @@ POSTGRES_DB=postgres
|
||||
PGSAIL_AUTHENTICATOR_PASSWORD=password
|
||||
PGSAIL_GRAFANA_PASSWORD=password
|
||||
PGSAIL_GRAFANA_AUTH_PASSWORD=password
|
||||
# SMTP server settings
|
||||
PGSAIL_EMAIL_FROM=root@localhost
|
||||
PGSAIL_EMAIL_SERVER=localhost
|
||||
#PGSAIL_EMAIL_USER= Comment if not use
|
||||
#PGSAIL_EMAIL_PASS= Comment if not use
|
||||
# Pushover settings
|
||||
#PGSAIL_PUSHOVER_APP_TOKEN= Comment if not use
|
||||
#PGSAIL_PUSHOVER_APP_URL= Comment if not use
|
||||
# TELEGRAM BOT, ask BotFather
|
||||
#PGSAIL_TELEGRAM_BOT_TOKEN= Comment if not use
|
||||
# webapp entrypoint, typically the public DNS or IP
|
||||
PGSAIL_APP_URL=http://localhost:8080
|
||||
# API entrypoint from the webapp, typically the public DNS or IP
|
||||
PGSAIL_API_URL=http://localhost:3000
|
||||
# POSTGREST ENV Settings
|
||||
PGRST_DB_URI=postgres://authenticator:${PGSAIL_AUTHENTICATOR_PASSWORD}@db:5432/signalk
|
||||
# % cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1
|
||||
PGRST_JWT_SECRET=_at_least_32__char__long__random
|
||||
# Grafana ENV Settings
|
||||
GF_SECURITY_ADMIN_PASSWORD=password
|
||||
|
14
.github/FUNDING.yml
vendored
Normal file
14
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [xbgmsharp]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
polar: # Replace with a single Polar username
|
||||
buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
1
CHANGELOG.md
Normal file
1
CHANGELOG.md
Normal file
@@ -0,0 +1 @@
|
||||
## Please see [Releases](https://github.com/xbgmsharp/postgsail/releases) for the release notes.
|
45
CODE_OF_CONDUCT.md
Normal file
45
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all project spaces, and it also applies when an individual is representing the project or its community in public spaces. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project maintainer using any of the [private contact addresses](https://github.com/dec0dOS/amazing-github-template#support). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4, available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
|
||||
|
||||
For answers to common questions about this code of conduct, see <https://www.contributor-covenant.org/faq>
|
3
CONTRIBUTING.md
Normal file
3
CONTRIBUTING.md
Normal file
@@ -0,0 +1,3 @@
|
||||
Styleguides
|
||||
|
||||
Ensure you code is in lint formatting.
|
256
README.md
256
README.md
@@ -1,11 +1,37 @@
|
||||
# PostgSail
|
||||
<br/>
|
||||
<p align="center">
|
||||
<a href="https://github.com/xbgmsharp/postgsail">
|
||||
<img src="https://iot.openplotter.cloud/android-chrome-192x192.png" alt="Logo" width="80" height="80">
|
||||
</a>
|
||||
|
||||
Effortless cloud based solution for storing and sharing your SignalK data. Allow you to effortlessly log your sails and monitor your boat with historical data.
|
||||
<h3 align="center">PostgSail</h3>
|
||||
|
||||
<p align="center">
|
||||
PostgSail is an open-source alternative to traditional vessel data management!
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md"><strong>Explore the docs »</strong></a>
|
||||
<br/>
|
||||
<br/>
|
||||
<a href="#about-the-project">View Demo</a>
|
||||
.
|
||||
<a href="https://github.com/xbgmsharp/postgsail/issues">Report Bug</a>
|
||||
.
|
||||
<a href="https://github.com/xbgmsharp/postgsail/issues">Request Feature</a>
|
||||
.
|
||||
<a href="https://xbgmsharp.github.io/postgsail/">Website</a>
|
||||
.
|
||||
<a href="https://github.com/sponsors/xbgmsharp">Sponsors</a>
|
||||
.
|
||||
<a href="https://discord.gg/uuZrwz4dCS">Discord</a>
|
||||
</p>
|
||||
</p>
|
||||
|
||||
[](https://github.com/xbgmsharp/postgsail/releases/latest)
|
||||
[](#license)
|
||||
[](https://github.com/xbgmsharp/postgsail/issues)
|
||||
[](http://makeapullrequest.com)
|
||||

|
||||
|
||||
[](https://github.com/xbgmsharp/postgsail/actions/workflows/db-test.yml)
|
||||
[](https://github.com/xbgmsharp/postgsail/actions/workflows/frontend-test.yml)
|
||||
@@ -22,6 +48,35 @@ postgsail-telegram-bot:
|
||||
|
||||
[](https://www.bestpractices.dev/projects/8124)
|
||||
|
||||
|
||||
## Table Of Contents
|
||||
|
||||
- [Table Of Contents](#table-of-contents)
|
||||
- [About The Project](#about-the-project)
|
||||
- [Features](#features)
|
||||
- [Cloud-hosted PostgSail](#cloud-hosted-postgsail)
|
||||
- [On-Premise (for free)](#on-premise-for-free)
|
||||
- [Roadmap](#roadmap)
|
||||
- [Contributing](#contributing)
|
||||
- [Creating A Pull Request](#creating-a-pull-request)
|
||||
- [License](#license)
|
||||
- [Acknowledgements](#acknowledgements)
|
||||
|
||||
## About The Project
|
||||
|
||||
https://github.com/xbgmsharp/signalk-postgsail/assets/1498985/b2669c39-11ad-4a50-9f91-9397f9057ee8
|
||||
|
||||
Effortless cloud based solution for storing and sharing your SignalK data. Allow you to effortlessly log your sails and monitor your boat with historical data.
|
||||
|
||||
Here's how:
|
||||
|
||||
It is all about SQL, object-relational, time-series, spatial databases with a bit of python.
|
||||
|
||||
PostgSail is an open-source alternative to traditional vessel data management.
|
||||
It is based on a well known open-source technology stack, Signalk, PostgreSQL, TimescaleDB, PostGIS, PostgREST. It does perfectly integrate with standard monitoring tool stack like Grafana.
|
||||
|
||||
To understand the why and how, you might want to read [Why.md](https://github.com/xbgmsharp/postgsail/blob/main/Why.md)
|
||||
|
||||
## Features
|
||||
|
||||
- Automatically log your voyages without manually starting or stopping a trip.
|
||||
@@ -44,178 +99,41 @@ postgsail-telegram-bot:
|
||||
- Polar performance.
|
||||
- Anything missing? just ask!
|
||||
|
||||
## Context
|
||||
## Cloud-hosted PostgSail
|
||||
|
||||
It is all about SQL, object-relational, time-series, spatial databases with a bit of python.
|
||||
Remove the hassle of running PostgSail yourself. Here you can skip the technical setup, the maintenance work and server costs by getting PostgSail on our reliable and secure PostgSail Cloud. Register and try for free at [iot.openplotter.cloud](https://iot.openplotter.cloud/).
|
||||
|
||||
PostgSail is an open-source alternative to traditional vessel data management.
|
||||
It is based on a well known open-source technology stack, Signalk, PostgreSQL, TimescaleDB, PostGIS, PostgREST. It does perfectly integrate with standard monitoring tool stack like Grafana.
|
||||
## On-Premise (for free)
|
||||
|
||||
To understand the why and how, you might want to read [Why.md](https://github.com/xbgmsharp/postgsail/tree/main/Why.md)
|
||||
Self host postgSail where you want and how you want. There are no restrictions, you’re in full control. [Install Guide](https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md)
|
||||
|
||||
## Architecture
|
||||
A simple scalable architecture:
|
||||
## Roadmap
|
||||
|
||||
For more clarity and visibility the complete [Architecture overview](https://github.com/xbgmsharp/postgsail/blob/main/docs/README.md).
|
||||
See the [open issues](https://github.com/xbgmsharp/postgsail/issues) for a list of proposed features (and known issues).
|
||||
|
||||
For more clarity and visibility the complete [Entity-Relationship Diagram (ERD)](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) is export as Mermaid, PNG and SVG file.
|
||||
Join the community, Get support and exchange on [Discord](https://discord.gg/uuZrwz4dCS). Missing a feature? just ask!
|
||||
|
||||
## Cloud
|
||||
## Contributing
|
||||
|
||||
If you prefer not to install or administer your instance of PostgSail, hosted versions of PostgSail are available in the cloud of your choice.
|
||||
Contributions are what make the open source community such an amazing place to be learn, inspire, and create. Any contributions you make are **greatly appreciated**.
|
||||
* If you have suggestions for features, feel free to [open an issue](https://github.com/xbgmsharp/postgsail/issues/new) to discuss it, or directly create a pull request with necessary changes.
|
||||
* Please make sure you check your spelling and grammar.
|
||||
* Create individual PR for each suggestion.
|
||||
* Please also read through the [Code Of Conduct](https://github.com/xbgmsharp/postgsail/blob/main/CODE_OF_CONDUCT.md) before posting your first idea as well.
|
||||
|
||||
### The cloud advantage.
|
||||
### Creating A Pull Request
|
||||
|
||||
Hosted and fully–managed options for PostgSail, designed for all your deployment and business needs. Register and try for free at https://iot.openplotter.cloud/.
|
||||
1. Fork the Project
|
||||
2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
|
||||
3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
|
||||
4. Push to the Branch (`git push origin feature/AmazingFeature`)
|
||||
5. Open a Pull Request
|
||||
|
||||
## Using PostgSail
|
||||
## License
|
||||
|
||||
A full-featured development environment.
|
||||
Distributed under the Apache License Version 2.0. See [LICENSE](https://github.com/xbgmsharp/postgsail/blob/main/LICENSE) for more information.
|
||||
|
||||
#### With CodeSandbox
|
||||
|
||||
- Develop on [](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
- or via [direct link](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
|
||||
#### With DevPod
|
||||
|
||||
- [](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail/&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
- or via [direct link](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
|
||||
#### With Docker Dev Environments
|
||||
- [Open in Docker dev-envs!](https://open.docker.com/dashboard/dev-envs?url=https://github.com/xbgmsharp/postgsail/)
|
||||
|
||||
### pre-deploy configuration
|
||||
|
||||
To get these running, copy `.env.example` and rename to `.env` then set the value accordingly.
|
||||
|
||||
```bash
|
||||
# cp .env.example .env
|
||||
```
|
||||
|
||||
Notice, that `PGRST_JWT_SECRET` must be at least 32 characters long.
|
||||
|
||||
`$ head /dev/urandom | tr -dc A-Za-z0-9 | head -c 42 ; echo ''`
|
||||
|
||||
```bash
|
||||
# nano .env
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
By default there is no network set and all data are store in a docker volume.
|
||||
You can update the default settings by editing `docker-compose.yml` and `docker-compose.dev.yml` to your need.
|
||||
|
||||
First let's initialize the database.
|
||||
|
||||
#### Step 1. Initialize database
|
||||
|
||||
First let's import the SQL schema, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db
|
||||
```
|
||||
|
||||
#### Step 2. Start backend (db, api)
|
||||
|
||||
Then launch the full stack (db, api) backend, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db api
|
||||
```
|
||||
|
||||
The API should be accessible via port HTTP/3000.
|
||||
The database should be accessible via port TCP/5432.
|
||||
|
||||
You can connect to the database via a web gui like [pgadmin](https://www.pgadmin.org/) or you can use a client [dbeaver](https://dbeaver.io/).
|
||||
|
||||
#### Step 3. Start frontend (web)
|
||||
|
||||
Then launch the web frontend, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up web
|
||||
```
|
||||
The frontend should be accessible via port HTTP/8080.
|
||||
|
||||
### SQL Configuration
|
||||
|
||||
Check and update your postgsail settings via SQL in the table `app_settings`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM app_settings;
|
||||
```
|
||||
|
||||
```sql
|
||||
UPDATE app_settings
|
||||
SET
|
||||
value = 'new_value'
|
||||
WHERE name = 'app.email_server';
|
||||
```
|
||||
|
||||
### Ingest data
|
||||
|
||||
Next, to ingest data from signalk, you need to install [signalk-postgsail](https://github.com/xbgmsharp/signalk-postgsail) plugin on your signalk server instance.
|
||||
|
||||
Also, if you like, you can import saillogger data using the postgsail helpers, [postgsail-helpers](https://github.com/xbgmsharp/postgsail-helpers).
|
||||
|
||||
You might want to import your influxdb1 data as well, [outflux](https://github.com/timescale/outflux).
|
||||
For InfluxDB 2.x and 3.x. You will need to enable the 1.x APIs to use them. Consult the InfluxDB documentation for more details.
|
||||
|
||||
Last, if you like, you can import the sample data from Signalk NMEA Plaka by running the tests.
|
||||
If everything goes well all tests pass successfully and you should receive a few notifications by email or PushOver or Telegram.
|
||||
[End-to-End (E2E) Testing.](https://github.com/xbgmsharp/postgsail/blob/main/tests/)
|
||||
|
||||
```
|
||||
$ docker-compose up tests
|
||||
```
|
||||
|
||||
### API Documentation
|
||||
|
||||
The OpenAPI description output depends on the permissions of the role that is contained in the JWT role claim.
|
||||
|
||||
Other applications can also use the [PostgSAIL API](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/xbgmsharp/postgsail/main/openapi.json).
|
||||
|
||||
API anonymous:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/
|
||||
```
|
||||
|
||||
API user_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_login_or_signup_fn'
|
||||
```
|
||||
|
||||
API vessel_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_register_vessel_fn'
|
||||
```
|
||||
|
||||
#### API main workflow
|
||||
|
||||
Check the [End-to-End (E2E) test sample](https://github.com/xbgmsharp/postgsail/blob/main/tests/).
|
||||
|
||||
### Docker dependencies
|
||||
|
||||
`docker-compose` is used to start environment dependencies. Dependencies consist of 3 containers:
|
||||
|
||||
- `timescaledb-postgis` alias `db`, PostgreSQL with TimescaleDB extension along with the PostGIS extension.
|
||||
- `postgrest` alias `api`, Standalone web server that turns your PostgreSQL database directly into a RESTful API.
|
||||
- `grafana` alias `app`, visualize and monitor your data
|
||||
|
||||
### Optional docker images
|
||||
|
||||
- [pgAdmin](https://hub.docker.com/r/dpage/pgadmin4), web UI to monitor and manage multiple PostgreSQL
|
||||
- [Swagger](https://hub.docker.com/r/swaggerapi/swagger-ui), web UI to visualize documentation from PostgREST
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose-optional.yml up
|
||||
```
|
||||
|
||||
### Software reference
|
||||
## Acknowledgements
|
||||
|
||||
An out of the box IoT platform using Docker (could be extend to K3 or K8) with the following software:
|
||||
|
||||
@@ -224,18 +142,4 @@ An out of the box IoT platform using Docker (could be extend to K3 or K8) with t
|
||||
- [TimescaleDB, Time-series data extends PostgreSQL](https://www.timescale.com)
|
||||
- [PostGIS, a spatial database extender for PostgreSQL object-relational database.](https://postgis.net/)
|
||||
- [Grafana, open observability platform | Grafana Labs](https://grafana.com)
|
||||
|
||||
### Support
|
||||
|
||||
To get support, please create new [issue](https://github.com/xbgmsharp/postgsail/issues).
|
||||
|
||||
There is more likely security flows and bugs.
|
||||
|
||||
### Contribution
|
||||
|
||||
I'm happy to accept Pull Requests!
|
||||
Feel free to contribute.
|
||||
|
||||
### License
|
||||
|
||||
This is a free software, Apache License Version 2.0.
|
||||
- And many more
|
||||
|
@@ -18,7 +18,7 @@ services:
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- ./db-data:/var/lib/postgresql/data
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
- ./initdb:/docker-entrypoint-initdb.d
|
||||
logging:
|
||||
options:
|
||||
@@ -71,8 +71,8 @@ services:
|
||||
links:
|
||||
- "db:database"
|
||||
volumes:
|
||||
- data:/var/lib/grafana
|
||||
- data:/var/log/grafana
|
||||
- grafana-data:/var/lib/grafana
|
||||
- grafana-data:/var/log/grafana
|
||||
- ./grafana:/etc/grafana
|
||||
ports:
|
||||
- "3001:3000"
|
||||
@@ -145,4 +145,5 @@ services:
|
||||
max-size: 10m
|
||||
|
||||
volumes:
|
||||
data: {}
|
||||
grafana-data: {}
|
||||
postgres-data: {}
|
||||
|
@@ -7,7 +7,7 @@ Auto generated Mermaid diagram using [mermerd](https://github.com/KarnerTh/merme
|
||||
[PostgSail SQL Schema](https://github.com/xbgmsharp/postgsail/tree/main/docs/ERD/postgsail.md "PostgSail SQL Schema")
|
||||
|
||||
## Further
|
||||
There is 3 main schemas:
|
||||
There is 3 main schemas into the signalk database:
|
||||
- API Schema:
|
||||
- tables
|
||||
- metrics
|
||||
@@ -72,4 +72,37 @@ flowchart TD
|
||||
B --> O{Update account,vessel,otp}
|
||||
F --> P{Update metadata}
|
||||
G --> P
|
||||
```
|
||||
```
|
||||
cron job are not process by default because if you don't have the correct settings set (SMTP, PushOver, Telegram), you might enter in a loop with error and you could be blocked or banned from the external services.
|
||||
|
||||
Therefor by default they are no active job as it require external configuration settings (SMTP, PushOver, Telegram).
|
||||
To activate all cron.job run the following SQL command:
|
||||
```sql
|
||||
UPDATE cron.job SET active = True;
|
||||
```
|
||||
Be sure to review your postgsail settings via SQL in the table `app_settings`:
|
||||
```sql
|
||||
SELECT * FROM app_settings;
|
||||
```
|
||||
|
||||
### How to bypass OTP for a local install?
|
||||
|
||||
You can skip the otp, add or update json key value to the account preference.
|
||||
"email_valid": true
|
||||
|
||||
OTP is created and sent by email using a cron in postgres/cron/job
|
||||
|
||||
accounts are store in table signalk/auth/accounts
|
||||
|
||||
You should have an history in table signalk/public/process_queue
|
||||
```sql
|
||||
select * from public.process_queue;
|
||||
```
|
||||
|
||||
### How to turn off signups
|
||||
|
||||
If you just want to use this as a standalone application and don't want people to be able to sign up for an account.
|
||||
|
||||
```SQL
|
||||
revoke execute on function api.signup(text,text,text,text) to api_anonymous;
|
||||
```
|
||||
|
198
docs/README.md
198
docs/README.md
@@ -1,4 +1,200 @@
|
||||
|
||||
Simple and scalable architecture.
|
||||
|
||||
## Architecture
|
||||
|
||||
Efficient, simple and scalable architecture.
|
||||
|
||||

|
||||
|
||||
|
||||
For more clarity and visibility the complete [Entity-Relationship Diagram (ERD)](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) is export as Mermaid, PNG and SVG file.
|
||||
|
||||
## Using PostgSail
|
||||
### Development
|
||||
|
||||
A full-featured development environment.
|
||||
|
||||
#### With CodeSandbox
|
||||
|
||||
- Develop on [](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
- or via [direct link](https://codesandbox.io/p/github/xbgmsharp/postgsail/main)
|
||||
|
||||
#### With DevPod
|
||||
|
||||
- [](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail/&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
- or via [direct link](https://devpod.sh/open#https://github.com/xbgmsharp/postgsail&workspace=postgsail&provider=docker&ide=openvscode)
|
||||
|
||||
#### With Docker Dev Environments
|
||||
- [Open in Docker dev-envs!](https://open.docker.com/dashboard/dev-envs?url=https://github.com/xbgmsharp/postgsail/)
|
||||
|
||||
|
||||
### On-premise (self-hosted)
|
||||
|
||||
#### pre-deploy configuration
|
||||
|
||||
To get these running, copy `.env.example` and rename to `.env` then set the value accordingly.
|
||||
|
||||
```bash
|
||||
# cp .env.example .env
|
||||
```
|
||||
|
||||
```bash
|
||||
# nano .env
|
||||
```
|
||||
|
||||
Notice, that `PGRST_JWT_SECRET` must be at least 32 characters long.
|
||||
|
||||
`$ cat /dev/urandom | LC_ALL=C tr -dc 'a-zA-Z0-9' | fold -w 42 | head -n 1`
|
||||
|
||||
`PGSAIL_APP_URL` is the URL you connect to from your browser.
|
||||
|
||||
`PGSAIL_API_URL` is the URL where `PGSAIL_APP_URL` connect to.
|
||||
|
||||
`PGRST_DB_URI` is the URI where the `PGSAIL_API_URL` connect to.
|
||||
|
||||
To summarize:
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph frontend
|
||||
direction TB
|
||||
A(PGSAIL_APP_URL)
|
||||
B(PGSAIL_API_URL)
|
||||
end
|
||||
subgraph backend
|
||||
direction TB
|
||||
B(PGSAIL_API_URL) -- SQL --> C(PGRST_DB_URI)
|
||||
end
|
||||
%% ^ These subgraphs are identical, except for the links to them:
|
||||
|
||||
%% Link *to* subgraph1: subgraph1 direction is maintained
|
||||
|
||||
User -- HTTP --> A
|
||||
User -- HTTP --> B
|
||||
%% Link *within* subgraph2:
|
||||
%% subgraph2 inherits the direction of the top-level graph (LR)
|
||||
|
||||
Boat -- HTTP --> B
|
||||
```
|
||||
|
||||
### Deploy
|
||||
|
||||
There is two compose files used. You can update the default settings by editing `docker-compose.yml` and `docker-compose.dev.yml` to your need.
|
||||
|
||||
Now let's initialize the database.
|
||||
|
||||
#### Step 1. Initialize database
|
||||
|
||||
First let's import the SQL schema, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db
|
||||
```
|
||||
|
||||
#### Step 2. Start backend (db, api)
|
||||
|
||||
Then launch the full backend stack (db, api), execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up db api
|
||||
```
|
||||
|
||||
The API should be accessible via port HTTP/3000.
|
||||
The database should be accessible via port TCP/5432.
|
||||
|
||||
You can connect to the database via a web gui like [pgadmin](https://www.pgadmin.org/) or you can use a client [dbeaver](https://dbeaver.io/).
|
||||
```bash
|
||||
$ docker compose -f docker-compose.yml -f docker-compose.dev.yml pgadmin
|
||||
```
|
||||
Then connect to the web UI on port HTTP/5050.
|
||||
|
||||
#### Step 3. Start frontend (web)
|
||||
|
||||
Then launch the web frontend, execute:
|
||||
|
||||
```bash
|
||||
$ docker compose up web
|
||||
```
|
||||
This step can take some time as it will first do a build to generate the static website based on your settings.
|
||||
|
||||
The frontend should be accessible via port HTTP/8080.
|
||||
|
||||
Users are collaborating on an installation guide, [Self-hosted-installation-guide](https://github.com/xbgmsharp/postgsail/wiki/Self-hosted-installation-guide)
|
||||
|
||||
### SQL Configuration
|
||||
|
||||
Check and update your postgsail settings via SQL in the table `app_settings`:
|
||||
|
||||
```sql
|
||||
SELECT * FROM app_settings;
|
||||
```
|
||||
|
||||
```sql
|
||||
UPDATE app_settings
|
||||
SET
|
||||
value = 'new_value'
|
||||
WHERE name = 'app.email_server';
|
||||
```
|
||||
|
||||
As it is all about SQL, [Read more](https://github.com/xbgmsharp/postgsail/blob/main/docs/ERD/README.md) about the database and explore your data.
|
||||
|
||||
### Ingest data
|
||||
|
||||
Next, to ingest data from signalk, you need to install [signalk-postgsail](https://github.com/xbgmsharp/signalk-postgsail) plugin on your signalk server instance.
|
||||
|
||||
Also, if you like, you can import saillogger data using the postgsail helpers, [postgsail-helpers](https://github.com/xbgmsharp/postgsail-helpers).
|
||||
|
||||
You might want to import your influxdb1 data as well, [outflux](https://github.com/timescale/outflux).
|
||||
For InfluxDB 2.x and 3.x. You will need to enable the 1.x APIs to use them. Consult the InfluxDB documentation for more details.
|
||||
|
||||
Last, if you like, you can import the sample data from Signalk NMEA Plaka by running the tests.
|
||||
If everything goes well all tests pass successfully and you should receive a few notifications by email or PushOver or Telegram.
|
||||
[End-to-End (E2E) Testing.](https://github.com/xbgmsharp/postgsail/blob/main/tests/)
|
||||
|
||||
```
|
||||
$ docker-compose up tests
|
||||
```
|
||||
|
||||
### API Documentation
|
||||
|
||||
The OpenAPI description output depends on the permissions of the role that is contained in the JWT role claim.
|
||||
|
||||
Other applications can also use the [PostgSAIL API](https://petstore.swagger.io/?url=https://raw.githubusercontent.com/xbgmsharp/postgsail/main/openapi.json).
|
||||
|
||||
API anonymous:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/
|
||||
```
|
||||
|
||||
API user_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_login_or_signup_fn'
|
||||
```
|
||||
|
||||
API vessel_role:
|
||||
|
||||
```
|
||||
$ curl http://localhost:3000/ -H 'Authorization: Bearer my_token_from_register_vessel_fn'
|
||||
```
|
||||
|
||||
#### API main workflow
|
||||
|
||||
Check the [End-to-End (E2E) test sample](https://github.com/xbgmsharp/postgsail/blob/main/tests/).
|
||||
|
||||
### Docker dependencies
|
||||
|
||||
`docker-compose` is used to start environment dependencies. Dependencies consist of 3 containers:
|
||||
|
||||
- `timescaledb-postgis` alias `db`, PostgreSQL with TimescaleDB extension along with the PostGIS extension.
|
||||
- `postgrest` alias `api`, Standalone web server that turns your PostgreSQL database directly into a RESTful API.
|
||||
- `grafana` alias `app`, visualize and monitor your data
|
||||
|
||||
### Optional docker images
|
||||
|
||||
- [pgAdmin](https://hub.docker.com/r/dpage/pgadmin4), web UI to monitor and manage multiple PostgreSQL
|
||||
- [Swagger](https://hub.docker.com/r/swaggerapi/swagger-ui), web UI to visualize documentation from PostgREST
|
||||
|
||||
```
|
||||
docker-compose -f docker-compose-optional.yml up
|
||||
```
|
2
frontend
2
frontend
Submodule frontend updated: 31579e219d...6195e98c6b
625
initdb/99_migrations_202404.sql
Normal file
625
initdb/99_migrations_202404.sql
Normal file
@@ -0,0 +1,625 @@
|
||||
---------------------------------------------------------------------------
|
||||
-- Copyright 2021-2024 Francois Lacroix <xbgmsharp@gmail.com>
|
||||
-- This file is part of PostgSail which is released under Apache License, Version 2.0 (the "License").
|
||||
-- See file LICENSE or go to http://www.apache.org/licenses/LICENSE-2.0 for full license details.
|
||||
--
|
||||
-- Migration April 2024
|
||||
--
|
||||
-- List current database
|
||||
select current_database();
|
||||
|
||||
-- connect to the DB
|
||||
\c signalk
|
||||
|
||||
\echo 'Timing mode is enabled'
|
||||
\timing
|
||||
|
||||
\echo 'Force timezone, just in case'
|
||||
set timezone to 'UTC';
|
||||
|
||||
UPDATE public.email_templates
|
||||
SET email_content='Hello __RECIPIENT__,
|
||||
Sorry!We could not convert your boat into a Windy Personal Weather Station due to missing data (temperature, wind or pressure).
|
||||
Windy Personal Weather Station is now disable.'
|
||||
WHERE "name"='windy_error';
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.cron_windy_fn() RETURNS void AS $$
|
||||
DECLARE
|
||||
windy_rec record;
|
||||
default_last_metric TIMESTAMPTZ := NOW() - interval '1 day';
|
||||
last_metric TIMESTAMPTZ := NOW();
|
||||
metric_rec record;
|
||||
windy_metric jsonb;
|
||||
app_settings jsonb;
|
||||
user_settings jsonb;
|
||||
windy_pws jsonb;
|
||||
BEGIN
|
||||
-- Check for new observations pending update
|
||||
RAISE NOTICE 'cron_process_windy_fn';
|
||||
-- Gather url from app settings
|
||||
app_settings := get_app_settings_fn();
|
||||
-- Find users with Windy active and with an active vessel
|
||||
-- Map account id to Windy Station ID
|
||||
FOR windy_rec in
|
||||
SELECT
|
||||
a.id,a.email,v.vessel_id,v.name,
|
||||
COALESCE((a.preferences->'windy_last_metric')::TEXT, default_last_metric::TEXT) as last_metric
|
||||
FROM auth.accounts a
|
||||
LEFT JOIN auth.vessels AS v ON v.owner_email = a.email
|
||||
LEFT JOIN api.metadata AS m ON m.vessel_id = v.vessel_id
|
||||
WHERE (a.preferences->'public_windy')::boolean = True
|
||||
AND m.active = True
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_windy_fn for [%]', windy_rec;
|
||||
PERFORM set_config('vessel.id', windy_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.cron_process_windy_rec_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
-- Gather user settings
|
||||
user_settings := get_user_settings_from_vesselid_fn(windy_rec.vessel_id::TEXT);
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking user_settings [%]', user_settings;
|
||||
-- Get all metrics from the last windy_last_metric avg by 5 minutes
|
||||
-- TODO json_agg to send all data in once, but issue with py jsonb transformation decimal.
|
||||
FOR metric_rec in
|
||||
SELECT time_bucket('5 minutes', m.time) AS time_bucket,
|
||||
avg((m.metrics->'environment.outside.temperature')::numeric) AS temperature,
|
||||
avg((m.metrics->'environment.outside.pressure')::numeric) AS pressure,
|
||||
avg((m.metrics->'environment.outside.relativeHumidity')::numeric) AS rh,
|
||||
avg((m.metrics->'environment.wind.directionTrue')::numeric) AS winddir,
|
||||
avg((m.metrics->'environment.wind.speedTrue')::numeric) AS wind,
|
||||
max((m.metrics->'environment.wind.speedTrue')::numeric) AS gust,
|
||||
last(latitude, time) AS lat,
|
||||
last(longitude, time) AS lng
|
||||
FROM api.metrics m
|
||||
WHERE vessel_id = windy_rec.vessel_id
|
||||
AND m.time >= windy_rec.last_metric::TIMESTAMPTZ
|
||||
GROUP BY time_bucket
|
||||
ORDER BY time_bucket ASC LIMIT 100
|
||||
LOOP
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking metrics [%]', metric_rec;
|
||||
if metric_rec.wind is null or metric_rec.temperature is null
|
||||
or metric_rec.pressure is null or metric_rec.rh is null then
|
||||
-- Ignore when there is no metrics.
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('windy_error'::TEXT, user_settings::JSONB);
|
||||
-- Disable windy
|
||||
PERFORM api.update_user_preferences_fn('{public_windy}'::TEXT, 'false'::TEXT);
|
||||
RETURN;
|
||||
end if;
|
||||
-- https://community.windy.com/topic/8168/report-your-weather-station-data-to-windy
|
||||
-- temp from kelvin to Celsius
|
||||
-- winddir from radiant to degrees
|
||||
-- rh from ratio to percentage
|
||||
SELECT jsonb_build_object(
|
||||
'dateutc', metric_rec.time_bucket,
|
||||
'station', windy_rec.id,
|
||||
'name', windy_rec.name,
|
||||
'lat', metric_rec.lat,
|
||||
'lon', metric_rec.lng,
|
||||
'wind', metric_rec.wind,
|
||||
'gust', metric_rec.gust,
|
||||
'pressure', metric_rec.pressure,
|
||||
'winddir', radiantToDegrees(metric_rec.winddir::numeric),
|
||||
'temp', kelvinToCel(metric_rec.temperature::numeric),
|
||||
'rh', valToPercent(metric_rec.rh::numeric)
|
||||
) INTO windy_metric;
|
||||
RAISE NOTICE '-> cron_process_windy_fn checking windy_metrics [%]', windy_metric;
|
||||
SELECT windy_pws_py_fn(windy_metric, user_settings, app_settings) into windy_pws;
|
||||
RAISE NOTICE '-> cron_process_windy_fn Windy PWS [%]', ((windy_pws->'header')::JSONB ? 'id');
|
||||
IF NOT((user_settings->'settings')::JSONB ? 'windy') and ((windy_pws->'header')::JSONB ? 'id') then
|
||||
RAISE NOTICE '-> cron_process_windy_fn new Windy PWS [%]', (windy_pws->'header')::JSONB->>'id';
|
||||
-- Send metrics to Windy
|
||||
PERFORM api.update_user_preferences_fn('{windy}'::TEXT, ((windy_pws->'header')::JSONB->>'id')::TEXT);
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('windy'::TEXT, user_settings::JSONB);
|
||||
-- Refresh user settings after first success
|
||||
user_settings := get_user_settings_from_vesselid_fn(windy_rec.vessel_id::TEXT);
|
||||
END IF;
|
||||
-- Record last metrics time
|
||||
SELECT metric_rec.time_bucket INTO last_metric;
|
||||
END LOOP;
|
||||
PERFORM api.update_user_preferences_fn('{windy_last_metric}'::TEXT, last_metric::TEXT);
|
||||
END LOOP;
|
||||
END;
|
||||
$$ language plpgsql;
|
||||
|
||||
-- Add security definer, run this function as admin to avoid weird bug
|
||||
-- ERROR: variable not found in subplan target list
|
||||
CREATE OR REPLACE FUNCTION api.delete_logbook_fn(IN _id integer) RETURNS BOOLEAN AS $delete_logbook$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
previous_stays_id numeric;
|
||||
current_stays_departed text;
|
||||
current_stays_id numeric;
|
||||
current_stays_active boolean;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> delete_logbook_fn invalid input %', _id;
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE id = _id;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> delete_logbook_fn invalid logbook %', _id;
|
||||
RETURN FALSE;
|
||||
END IF;
|
||||
-- Update logbook
|
||||
UPDATE api.logbook l
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE l.vessel_id = current_setting('vessel.id', false)
|
||||
AND id = logbook_rec.id;
|
||||
-- Update metrics status to moored
|
||||
-- This generate an error when run as user_role "variable not found in subplan target list"
|
||||
UPDATE api.metrics
|
||||
SET status = 'moored'
|
||||
WHERE time >= logbook_rec._from_time
|
||||
AND time <= logbook_rec._to_time
|
||||
AND vessel_id = current_setting('vessel.id', false);
|
||||
-- Get related stays
|
||||
SELECT id,departed,active INTO current_stays_id,current_stays_departed,current_stays_active
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived = logbook_rec._to_time;
|
||||
-- Update related stays
|
||||
UPDATE api.stays s
|
||||
SET notes = 'mark for deletion'
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived = logbook_rec._to_time;
|
||||
-- Find previous stays
|
||||
SELECT id INTO previous_stays_id
|
||||
FROM api.stays s
|
||||
WHERE s.vessel_id = current_setting('vessel.id', false)
|
||||
AND s.arrived < logbook_rec._to_time
|
||||
ORDER BY s.arrived DESC LIMIT 1;
|
||||
-- Update previous stays with the departed time from current stays
|
||||
-- and set the active state from current stays
|
||||
UPDATE api.stays
|
||||
SET departed = current_stays_departed::TIMESTAMPTZ,
|
||||
active = current_stays_active
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = previous_stays_id;
|
||||
-- Clean up, remove invalid logbook and stay entry
|
||||
DELETE FROM api.logbook WHERE id = logbook_rec.id;
|
||||
RAISE WARNING '-> delete_logbook_fn delete logbook [%]', logbook_rec.id;
|
||||
DELETE FROM api.stays WHERE id = current_stays_id;
|
||||
RAISE WARNING '-> delete_logbook_fn delete stays [%]', current_stays_id;
|
||||
-- Clean up, Subtract (-1) moorages ref count
|
||||
UPDATE api.moorages
|
||||
SET reference_count = reference_count - 1
|
||||
WHERE vessel_id = current_setting('vessel.id', false)
|
||||
AND id = previous_stays_id;
|
||||
RETURN TRUE;
|
||||
END;
|
||||
$delete_logbook$ LANGUAGE plpgsql security definer;
|
||||
|
||||
-- Allow users to update certain columns on specific TABLES on API schema add reference_count, when deleting a log
|
||||
GRANT UPDATE (name, notes, stay_code, home_flag, reference_count) ON api.moorages TO user_role;
|
||||
|
||||
-- Allow users to update certain columns on specific TABLES on API schema add track_geojson
|
||||
GRANT UPDATE (name, _from, _to, notes, track_geojson) ON api.logbook TO user_role;
|
||||
|
||||
DROP FUNCTION IF EXISTS api.timelapse2_fn;
|
||||
CREATE OR REPLACE FUNCTION api.timelapse2_fn(
|
||||
IN start_log INTEGER DEFAULT NULL,
|
||||
IN end_log INTEGER DEFAULT NULL,
|
||||
IN start_date TEXT DEFAULT NULL,
|
||||
IN end_date TEXT DEFAULT NULL,
|
||||
OUT geojson JSONB) RETURNS JSONB AS $timelapse2$
|
||||
DECLARE
|
||||
_geojson jsonb;
|
||||
BEGIN
|
||||
-- Using sub query to force id order by time
|
||||
-- User can now directly edit the json to add comment or remove track point
|
||||
-- Merge json track_geojson with Geometry Point into a single GeoJSON Points
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND end_log IS NULL THEN
|
||||
end_log := start_log;
|
||||
END IF;
|
||||
IF start_date IS NOT NULL AND end_date IS NULL THEN
|
||||
end_date := start_date;
|
||||
END IF;
|
||||
--raise WARNING 'input % % %' , start_log, end_log, public.isnumeric(end_log::text);
|
||||
IF start_log IS NOT NULL AND public.isnumeric(start_log::text) AND public.isnumeric(end_log::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.id >= start_log
|
||||
AND l.id <= end_log
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
ELSIF start_date IS NOT NULL AND public.isdate(start_date::text) AND public.isdate(end_date::text) THEN
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l._from_time >= start_date::TIMESTAMPTZ
|
||||
AND l._to_time <= end_date::TIMESTAMPTZ + interval '23 hours 59 minutes'
|
||||
AND l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
ELSE
|
||||
SELECT jsonb_agg(
|
||||
jsonb_build_object('type', 'Feature',
|
||||
'properties', f->'properties',
|
||||
'geometry', jsonb_build_object( 'coordinates', f->'geometry'->'coordinates', 'type', 'Point'))
|
||||
) INTO _geojson
|
||||
FROM (
|
||||
SELECT jsonb_array_elements(track_geojson->'features') AS f
|
||||
FROM api.logbook l
|
||||
WHERE l.track_geojson IS NOT NULL
|
||||
ORDER BY l._from_time ASC
|
||||
) AS sub
|
||||
WHERE (f->'geometry'->>'type') = 'Point';
|
||||
END IF;
|
||||
-- Return a GeoJSON MultiLineString
|
||||
-- result _geojson [null, null]
|
||||
--RAISE WARNING 'result _geojson %' , _geojson;
|
||||
SELECT jsonb_build_object(
|
||||
'type', 'FeatureCollection',
|
||||
'features', _geojson ) INTO geojson;
|
||||
END;
|
||||
$timelapse2$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
api.timelapse2_fn
|
||||
IS 'Export all selected logs geojson `track_geojson` to a geojson as points including properties';
|
||||
|
||||
-- Allow timelapse2_fn execution for user_role and api_anonymous (public replay)
|
||||
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO user_role;
|
||||
GRANT EXECUTE ON FUNCTION api.timelapse2_fn TO api_anonymous;
|
||||
|
||||
DROP FUNCTION IF EXISTS public.process_logbook_queue_fn;
|
||||
CREATE OR REPLACE FUNCTION public.process_logbook_queue_fn(IN _id integer) RETURNS void AS $process_logbook_queue$
|
||||
DECLARE
|
||||
logbook_rec record;
|
||||
from_name text;
|
||||
to_name text;
|
||||
log_name text;
|
||||
from_moorage record;
|
||||
to_moorage record;
|
||||
avg_rec record;
|
||||
geo_rec record;
|
||||
log_settings jsonb;
|
||||
user_settings jsonb;
|
||||
geojson jsonb;
|
||||
extra_json jsonb;
|
||||
trip_note jsonb;
|
||||
from_moorage_note jsonb;
|
||||
to_moorage_note jsonb;
|
||||
BEGIN
|
||||
-- If _id is not NULL
|
||||
IF _id IS NULL OR _id < 1 THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid input %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Get the logbook record with all necessary fields exist
|
||||
SELECT * INTO logbook_rec
|
||||
FROM api.logbook
|
||||
WHERE active IS false
|
||||
AND id = _id
|
||||
AND _from_lng IS NOT NULL
|
||||
AND _from_lat IS NOT NULL
|
||||
AND _to_lng IS NOT NULL
|
||||
AND _to_lat IS NOT NULL;
|
||||
-- Ensure the query is successful
|
||||
IF logbook_rec.vessel_id IS NULL THEN
|
||||
RAISE WARNING '-> process_logbook_queue_fn invalid logbook %', _id;
|
||||
RETURN;
|
||||
END IF;
|
||||
|
||||
PERFORM set_config('vessel.id', logbook_rec.vessel_id, false);
|
||||
--RAISE WARNING 'public.process_logbook_queue_fn() scheduler vessel.id %, user.id', current_setting('vessel.id', false), current_setting('user.id', false);
|
||||
|
||||
-- Calculate logbook data average and geo
|
||||
-- Update logbook entry with the latest metric data and calculate data
|
||||
avg_rec := logbook_update_avg_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
geo_rec := logbook_update_geom_distance_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
-- Do we have an existing moorage within 300m of the new log
|
||||
-- generate logbook name, concat _from_location and _to_location from moorage name
|
||||
from_moorage := process_lat_lon_fn(logbook_rec._from_lng::NUMERIC, logbook_rec._from_lat::NUMERIC);
|
||||
to_moorage := process_lat_lon_fn(logbook_rec._to_lng::NUMERIC, logbook_rec._to_lat::NUMERIC);
|
||||
SELECT CONCAT(from_moorage.moorage_name, ' to ' , to_moorage.moorage_name) INTO log_name;
|
||||
|
||||
-- Process `propulsion.*.runTime` and `navigation.log`
|
||||
-- Calculate extra json
|
||||
extra_json := logbook_update_extra_json_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
|
||||
RAISE NOTICE 'Updating valid logbook entry logbook id:[%] start:[%] end:[%]', logbook_rec.id, logbook_rec._from_time, logbook_rec._to_time;
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
duration = (logbook_rec._to_time::TIMESTAMPTZ - logbook_rec._from_time::TIMESTAMPTZ),
|
||||
avg_speed = avg_rec.avg_speed,
|
||||
max_speed = avg_rec.max_speed,
|
||||
max_wind_speed = avg_rec.max_wind_speed,
|
||||
_from = from_moorage.moorage_name,
|
||||
_from_moorage_id = from_moorage.moorage_id,
|
||||
_to_moorage_id = to_moorage.moorage_id,
|
||||
_to = to_moorage.moorage_name,
|
||||
name = log_name,
|
||||
track_geom = geo_rec._track_geom,
|
||||
distance = geo_rec._track_distance,
|
||||
extra = extra_json,
|
||||
notes = NULL -- reset pre_log process
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- GeoJSON require track_geom field
|
||||
geojson := logbook_update_geojson_fn(logbook_rec.id, logbook_rec._from_time::TEXT, logbook_rec._to_time::TEXT);
|
||||
UPDATE api.logbook
|
||||
SET
|
||||
track_geojson = geojson
|
||||
WHERE id = logbook_rec.id;
|
||||
|
||||
-- Add trip details name as note for the first geometry point entry from the GeoJSON
|
||||
SELECT format('{"trip": { "name": "%s", "duration": "%s", "distance": "%s" }}', logbook_rec.name, logbook_rec.duration, logbook_rec.distance) into trip_note;
|
||||
-- Update the properties of the first feature
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 1, properties}',
|
||||
(track_geojson -> 'features' -> 1 -> 'properties' || trip_note)::jsonb
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
and track_geojson -> 'features' -> 1 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Add moorage name as note for the third and last entry of the GeoJSON
|
||||
SELECT format('{"notes": "%s"}', from_moorage.moorage_name) into from_moorage_note;
|
||||
-- Update the properties of the third feature, the second with geometry point
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, 2, properties}',
|
||||
(track_geojson -> 'features' -> 2 -> 'properties' || from_moorage_note)::jsonb
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
AND track_geojson -> 'features' -> 2 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Update the note properties of the last feature with geometry point
|
||||
SELECT format('{"notes": "%s"}', to_moorage.moorage_name) into to_moorage_note;
|
||||
UPDATE api.logbook
|
||||
SET track_geojson = jsonb_set(
|
||||
track_geojson,
|
||||
'{features, -1, properties}',
|
||||
CASE
|
||||
WHEN COALESCE((track_geojson -> 'features' -> -1 -> 'properties' ->> 'notes'), '') = '' THEN
|
||||
(track_geojson -> 'features' -> -1 -> 'properties' || to_moorage_note)::jsonb
|
||||
ELSE
|
||||
track_geojson -> 'features' -> -1 -> 'properties'
|
||||
END
|
||||
)
|
||||
WHERE id = logbook_rec.id
|
||||
AND track_geojson -> 'features' -> -1 -> 'geometry' ->> 'type' = 'Point';
|
||||
|
||||
-- Prepare notification, gather user settings
|
||||
SELECT json_build_object('logbook_name', log_name, 'logbook_link', logbook_rec.id) into log_settings;
|
||||
user_settings := get_user_settings_from_vesselid_fn(logbook_rec.vessel_id::TEXT);
|
||||
SELECT user_settings::JSONB || log_settings::JSONB into user_settings;
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn get_user_settings_from_vesselid_fn [%]', user_settings;
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn log_settings [%]', log_settings;
|
||||
-- Send notification
|
||||
PERFORM send_notification_fn('logbook'::TEXT, user_settings::JSONB);
|
||||
-- Process badges
|
||||
RAISE NOTICE '-> debug process_logbook_queue_fn user_settings [%]', user_settings->>'email'::TEXT;
|
||||
PERFORM set_config('user.email', user_settings->>'email'::TEXT, false);
|
||||
PERFORM badges_logbook_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
PERFORM badges_geom_fn(logbook_rec.id, logbook_rec._to_time::TEXT);
|
||||
END;
|
||||
$process_logbook_queue$ LANGUAGE plpgsql;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.process_logbook_queue_fn
|
||||
IS 'Update logbook details when completed, logbook_update_avg_fn, logbook_update_geom_distance_fn, reverse_geocode_py_fn';
|
||||
|
||||
-- Update the pre.check for the new timelapse function
|
||||
CREATE OR REPLACE FUNCTION public.check_jwt() RETURNS void AS $$
|
||||
-- Prevent unregister user or unregister vessel access
|
||||
-- Allow anonymous access
|
||||
-- Need to be refactor and simplify, specially the anonymous part.
|
||||
DECLARE
|
||||
_role name;
|
||||
_email text;
|
||||
anonymous record;
|
||||
_path name;
|
||||
_vid text;
|
||||
_vname text;
|
||||
boat TEXT;
|
||||
_pid INTEGER := 0; -- public_id
|
||||
_pvessel TEXT := NULL; -- public_type
|
||||
_ptype TEXT := NULL; -- public_type
|
||||
_ppath BOOLEAN := False; -- public_path
|
||||
_pvalid BOOLEAN := False; -- public_valid
|
||||
_pheader text := NULL; -- public_header
|
||||
valid_public_type BOOLEAN := False;
|
||||
account_rec record;
|
||||
vessel_rec record;
|
||||
BEGIN
|
||||
-- Extract email and role from jwt token
|
||||
--RAISE WARNING 'check_jwt jwt %', current_setting('request.jwt.claims', true);
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'email' INTO _email;
|
||||
PERFORM set_config('user.email', _email, false);
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'role' INTO _role;
|
||||
--RAISE WARNING 'jwt email %', current_setting('request.jwt.claims', true)::json->>'email';
|
||||
--RAISE WARNING 'jwt role %', current_setting('request.jwt.claims', true)::json->>'role';
|
||||
--RAISE WARNING 'cur_user %', current_user;
|
||||
|
||||
--TODO SELECT current_setting('request.jwt.uid', true)::json->>'uid' INTO _user_id;
|
||||
--TODO RAISE WARNING 'jwt user_id %', current_setting('request.jwt.uid', true)::json->>'uid';
|
||||
--TODO SELECT current_setting('request.jwt.vid', true)::json->>'vid' INTO _vessel_id;
|
||||
--TODO RAISE WARNING 'jwt vessel_id %', current_setting('request.jwt.vid', true)::json->>'vid';
|
||||
IF _role = 'user_role' THEN
|
||||
-- Check the user exist in the accounts table
|
||||
SELECT * INTO account_rec
|
||||
FROM auth.accounts
|
||||
WHERE auth.accounts.email = _email;
|
||||
IF account_rec.email IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid user'
|
||||
USING HINT = 'Unknown user or password';
|
||||
END IF;
|
||||
-- Set session variables
|
||||
PERFORM set_config('user.id', account_rec.user_id, false);
|
||||
SELECT current_setting('request.path', true) into _path;
|
||||
--RAISE WARNING 'req path %', current_setting('request.path', true);
|
||||
-- Function allow without defined vessel like for anonymous role
|
||||
IF _path ~ '^\/rpc\/(login|signup|recover|reset)$' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Function allow without defined vessel as user role
|
||||
-- openapi doc, user settings, otp code and vessel registration
|
||||
IF _path = '/rpc/settings_fn'
|
||||
OR _path = '/rpc/register_vessel'
|
||||
OR _path = '/rpc/update_user_preferences_fn'
|
||||
OR _path = '/rpc/versions_fn'
|
||||
OR _path = '/rpc/email_fn'
|
||||
OR _path = '/' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Check a vessel and user exist
|
||||
SELECT auth.vessels.* INTO vessel_rec
|
||||
FROM auth.vessels, auth.accounts
|
||||
WHERE auth.vessels.owner_email = auth.accounts.email
|
||||
AND auth.accounts.email = _email;
|
||||
-- check if boat exist yet?
|
||||
IF vessel_rec.owner_email IS NULL THEN
|
||||
-- Return http status code 551 with message
|
||||
RAISE sqlstate 'PT551' using
|
||||
message = 'Vessel Required',
|
||||
detail = 'Invalid vessel',
|
||||
hint = 'Unknown vessel';
|
||||
--RETURN; -- ignore if not exist
|
||||
END IF;
|
||||
-- Redundant?
|
||||
IF vessel_rec.vessel_id IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid vessel'
|
||||
USING HINT = 'Unknown vessel id';
|
||||
END IF;
|
||||
-- Set session variables
|
||||
PERFORM set_config('vessel.id', vessel_rec.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', vessel_rec.name, false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.id [%]', current_setting('vessel.id', false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.name [%]', current_setting('vessel.name', false);
|
||||
ELSIF _role = 'vessel_role' THEN
|
||||
SELECT current_setting('request.path', true) into _path;
|
||||
--RAISE WARNING 'req path %', current_setting('request.path', true);
|
||||
-- Function allow without defined vessel like for anonymous role
|
||||
IF _path ~ '^\/rpc\/(oauth_\w+)$' THEN
|
||||
RETURN;
|
||||
END IF;
|
||||
-- Extract vessel_id from jwt token
|
||||
SELECT current_setting('request.jwt.claims', true)::json->>'vid' INTO _vid;
|
||||
-- Check the vessel and user exist
|
||||
SELECT auth.vessels.* INTO vessel_rec
|
||||
FROM auth.vessels, auth.accounts
|
||||
WHERE auth.vessels.owner_email = auth.accounts.email
|
||||
AND auth.accounts.email = _email
|
||||
AND auth.vessels.vessel_id = _vid;
|
||||
IF vessel_rec.owner_email IS NULL THEN
|
||||
RAISE EXCEPTION 'Invalid vessel'
|
||||
USING HINT = 'Unknown vessel owner_email';
|
||||
END IF;
|
||||
PERFORM set_config('vessel.id', vessel_rec.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', vessel_rec.name, false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.name %', current_setting('vessel.name', false);
|
||||
--RAISE WARNING 'public.check_jwt() user_role vessel.id %', current_setting('vessel.id', false);
|
||||
ELSIF _role = 'api_anonymous' THEN
|
||||
--RAISE WARNING 'public.check_jwt() api_anonymous';
|
||||
-- Check if path is a valid allow anonymous path
|
||||
SELECT current_setting('request.path', true) ~ '^/(logs_view|log_view|rpc/timelapse_fn|rpc/timelapse2_fn|monitoring_view|stats_logs_view|stats_moorages_view|rpc/stats_logs_fn)$' INTO _ppath;
|
||||
if _ppath is True then
|
||||
-- Check is custom header is present and valid
|
||||
SELECT current_setting('request.headers', true)::json->>'x-is-public' into _pheader;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous _pheader [%]', _pheader;
|
||||
if _pheader is null then
|
||||
RAISE EXCEPTION 'Invalid public_header'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
end if;
|
||||
SELECT convert_from(decode(_pheader, 'base64'), 'utf-8')
|
||||
~ '\w+,public_(logs|logs_list|stats|timelapse|monitoring),\d+$' into _pvalid;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous _pvalid [%]', _pvalid;
|
||||
if _pvalid is null or _pvalid is False then
|
||||
RAISE EXCEPTION 'Invalid public_valid'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
end if;
|
||||
WITH regex AS (
|
||||
SELECT regexp_match(
|
||||
convert_from(
|
||||
decode(_pheader, 'base64'), 'utf-8'),
|
||||
'(\w+),(public_(logs|logs_list|stats|timelapse|monitoring)),(\d+)$') AS match
|
||||
)
|
||||
SELECT match[1], match[2], match[4] into _pvessel, _ptype, _pid
|
||||
FROM regex;
|
||||
RAISE WARNING 'public.check_jwt() api_anonymous [%] [%] [%]', _pvessel, _ptype, _pid;
|
||||
if _pvessel is not null and _ptype is not null then
|
||||
-- Everything seem fine, get the vessel_id base on the vessel name.
|
||||
SELECT _ptype::name = any(enum_range(null::public_type)::name[]) INTO valid_public_type;
|
||||
IF valid_public_type IS False THEN
|
||||
-- Ignore entry if type is invalid
|
||||
RAISE EXCEPTION 'Invalid public_type'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
END IF;
|
||||
-- Check if boat name match public_vessel name
|
||||
boat := '^' || _pvessel || '$';
|
||||
IF _ptype ~ '^public_(logs|timelapse)$' AND _pid > 0 THEN
|
||||
WITH log as (
|
||||
SELECT vessel_id from api.logbook l where l.id = _pid
|
||||
)
|
||||
SELECT v.vessel_id, v.name into anonymous
|
||||
FROM auth.accounts a, auth.vessels v, jsonb_each_text(a.preferences) as prefs, log l
|
||||
WHERE v.vessel_id = l.vessel_id
|
||||
AND a.email = v.owner_email
|
||||
AND a.preferences->>'public_vessel'::text ~* boat
|
||||
AND prefs.key = _ptype::TEXT
|
||||
AND prefs.value::BOOLEAN = true;
|
||||
RAISE WARNING '-> ispublic_fn public_logs output boat:[%], type:[%], result:[%]', _pvessel, _ptype, anonymous;
|
||||
IF anonymous.vessel_id IS NOT NULL THEN
|
||||
PERFORM set_config('vessel.id', anonymous.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', anonymous.name, false);
|
||||
RETURN;
|
||||
END IF;
|
||||
ELSE
|
||||
SELECT v.vessel_id, v.name into anonymous
|
||||
FROM auth.accounts a, auth.vessels v, jsonb_each_text(a.preferences) as prefs
|
||||
WHERE a.email = v.owner_email
|
||||
AND a.preferences->>'public_vessel'::text ~* boat
|
||||
AND prefs.key = _ptype::TEXT
|
||||
AND prefs.value::BOOLEAN = true;
|
||||
RAISE WARNING '-> ispublic_fn output boat:[%], type:[%], result:[%]', _pvessel, _ptype, anonymous;
|
||||
IF anonymous.vessel_id IS NOT NULL THEN
|
||||
PERFORM set_config('vessel.id', anonymous.vessel_id, false);
|
||||
PERFORM set_config('vessel.name', anonymous.name, false);
|
||||
RETURN;
|
||||
END IF;
|
||||
END IF;
|
||||
RAISE sqlstate 'PT404' using message = 'unknown resource';
|
||||
END IF; -- end anonymous path
|
||||
END IF;
|
||||
ELSIF _role <> 'api_anonymous' THEN
|
||||
RAISE EXCEPTION 'Invalid role'
|
||||
USING HINT = 'Stop being so evil and maybe you can log in';
|
||||
END IF;
|
||||
END
|
||||
$$ language plpgsql security definer;
|
||||
-- Description
|
||||
COMMENT ON FUNCTION
|
||||
public.check_jwt
|
||||
IS 'PostgREST API db-pre-request check, set_config according to role (api_anonymous,vessel_role,user_role)';
|
||||
|
||||
GRANT EXECUTE ON FUNCTION public.check_jwt() TO api_anonymous;
|
||||
|
||||
-- Update version
|
||||
UPDATE public.app_settings
|
||||
SET value='0.7.2'
|
||||
WHERE "name"='app.version';
|
@@ -28,3 +28,5 @@ ALTER ROLE authenticator WITH PASSWORD '${PGSAIL_AUTHENTICATOR_PASSWORD}';
|
||||
ALTER ROLE grafana WITH PASSWORD '${PGSAIL_GRAFANA_PASSWORD}';
|
||||
ALTER ROLE grafana_auth WITH PASSWORD '${PGSAIL_GRAFANA_AUTH_PASSWORD}';
|
||||
END
|
||||
|
||||
curl -s -XPOST -Hx-pgsail:${PGSAIL_VERSION} https://api.openplotter.cloud/rpc/telemetry_fn
|
||||
|
File diff suppressed because one or more lines are too long
@@ -645,10 +645,10 @@ overpass_py_fn | {"name": "Port de la Ginesta", "type": "multipolygon", "leisure
|
||||
overpass_py_fn | {"name": "Norra hamnen", "leisure": "marina"}
|
||||
|
||||
-[ RECORD 1 ]----------------------------------------------------------------------------------------------------------------------------------------------
|
||||
versions_fn | {"api_version" : "0.7.1", "sys_version" : "PostgreSQL 16.2", "timescaledb" : "2.14.2", "postgis" : "3.4.2", "postgrest" : "PostgREST 12.0.2"}
|
||||
versions_fn | {"api_version" : "0.7.2", "sys_version" : "PostgreSQL 16.2", "timescaledb" : "2.14.2", "postgis" : "3.4.2", "postgrest" : "PostgREST 12.0.2"}
|
||||
|
||||
-[ RECORD 1 ]-----------------
|
||||
api_version | 0.7.1
|
||||
api_version | 0.7.2
|
||||
sys_version | PostgreSQL 16.2
|
||||
timescaledb | 2.14.2
|
||||
postgis | 3.4.2
|
||||
|
Reference in New Issue
Block a user