Skip to content
Snippets Groups Projects
Commit 2daf92a1 authored by Corné Lukken's avatar Corné Lukken
Browse files

Merge branch 'L2SS-1279-trim-timescaledb-from-repo' into 'master'

L2SS-1279: remove timescaledb related code

Closes L2SS-1279

See merge request !561
parents 05693290 73148843
Branches
Tags v0.13.0
1 merge request!561L2SS-1279: remove timescaledb related code
Showing
with 6 additions and 1632 deletions
...@@ -139,10 +139,6 @@ docker_build_image_all: ...@@ -139,10 +139,6 @@ docker_build_image_all:
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-bst latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-bst latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-sst latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-sst latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-xst latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-xst latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh archiver-timescale latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbpp latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbppts-cm latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbppts-es latest
# Build and push custom images on merge request if relevant files changed # Build and push custom images on merge request if relevant files changed
docker_build_image_lofar_device_base: docker_build_image_lofar_device_base:
...@@ -530,50 +526,6 @@ docker_build_image_device_temperature_manager: ...@@ -530,50 +526,6 @@ docker_build_image_device_temperature_manager:
script: script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell # Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-temperature-manager $tag - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-temperature-manager $tag
docker_build_image_archiver_timescale:
extends: .base_docker_images_except
only:
refs:
- merge_requests
changes:
- docker-compose/archiver-timescale.yml
- docker-compose/timescaledb/*
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh archiver-timescale $tag
docker_build_image_hdbpp:
extends: .base_docker_images_except
only:
refs:
- merge_requests
changes:
- docker-compose/archiver-timescale.yml
- docker-compose/hdbpp/*
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbpp $tag
docker_build_image_hdbppts_cm:
extends: .base_docker_images_except
only:
refs:
- merge_requests
changes:
- docker-compose/archiver-timescale.yml
- docker-compose/hdbppts-cm/*
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbppts-cm $tag
docker_build_image_hdbppts_es:
extends: .base_docker_images_except
only:
refs:
- merge_requests
changes:
- docker-compose/archiver-timescale.yml
- docker-compose/hdbppts-es/*
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh hdbppts-es $tag
newline_at_eof: newline_at_eof:
stage: linting stage: linting
before_script: before_script:
...@@ -699,7 +651,6 @@ integration_test_docker: ...@@ -699,7 +651,6 @@ integration_test_docker:
echo "Saving log for container $container" echo "Saving log for container $container"
docker logs "${container}" >& "log/${container}.log" docker logs "${container}" >& "log/${container}.log"
done done
PGPASSWORD=password pg_dump --host=docker --username=postgres hdb 2>log/archiver-timescale-dump.log | gzip > log/archiver-timescale-dump.txt.gz
artifacts: artifacts:
when: always when: always
paths: paths:
......
...@@ -12,7 +12,6 @@ The following files are provided: ...@@ -12,7 +12,6 @@ The following files are provided:
| File | Description | Usage | | File | Description | Usage |
|--------------------------------------------|-------------------------------------------------------------|---------------------| |--------------------------------------------|-------------------------------------------------------------|---------------------|
| `LOFAR_ConfigDb.json` | Generic base configuration, registering all of the devices. | Always | | `LOFAR_ConfigDb.json` | Generic base configuration, registering all of the devices. | Always |
| `tango-archiver-data/archiver-devices.json`| Archiver configuration for TimescaleDB | Always |
| `test_environment_ConfigDb.json` | Base delta for the unit- and integration test suites. | Tests & development | | `test_environment_ConfigDb.json` | Base delta for the unit- and integration test suites. | Tests & development |
| `stations/simulators_ConfigDb.json` | A "station" configuration that points to our simulators. | Tests & development | | `stations/simulators_ConfigDb.json` | A "station" configuration that points to our simulators. | Tests & development |
| `stations/dummy_positions_ConfigDb.json` | An antenna configuration, just to have one (it's CS001). | Tests & development | | `stations/dummy_positions_ConfigDb.json` | An antenna configuration, just to have one (it's CS001). | Tests & development |
......
{
"servers": {
"hdbppes-srv": {
"01": {
"HdbEventSubscriber": {
"archiving/hdbppts/eventsubscriber01": {
"attribute_properties": {},
"properties": {
"CheckPeriodicTimeoutDelay": ["5"],
"PollingThreadPeriod": ["3"],
"LibConfiguration": ["connect_string= user=postgres password=password host=archiver-timescale port=5432 dbname=hdb","host=archiver-timescale","libname=libhdb++timescale.so","dbname=hdb","port=5432", "user=postgres", "password=password"],
"polled_attr": []
}
}
}
}
},
"hdbppcm-srv": {
"01": {
"HdbConfigurationManager": {
"archiving/hdbppts/confmanager01": {
"attribute_properties": {},
"properties": {
"ArchiverList": ["archiving/hdbppts/eventsubscriber01"],
"MaxSearchSize": ["1000"],
"LibConfiguration": ["connect_string= user=postgres password=password host=archiver-timescale port=5432 dbname=hdb","host=archiver-timescale","libname=libhdb++timescale.so","dbname=hdb","port=5432", "user=postgres", "password=password"],
"polled_attr": []
}
}
}
}
}
}
}
...@@ -16,7 +16,6 @@ Station Control software related to Tango devices. \ ...@@ -16,7 +16,6 @@ Station Control software related to Tango devices. \
* [Bootstrap](#bootstrap) * [Bootstrap](#bootstrap)
* [User documentation (ReadTheDocs (Sphinx / ReStructuredText))](tangostationcontrol/docs/README.md) * [User documentation (ReadTheDocs (Sphinx / ReStructuredText))](tangostationcontrol/docs/README.md)
* [Docker compose & station services documentation](docker-compose/README.md) * [Docker compose & station services documentation](docker-compose/README.md)
* [Timescaledb](docker-compose/timescaledb/README.md)
* [Jupyter startup files](docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/README.md) * [Jupyter startup files](docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/README.md)
* [Tango Prometheus exporter](https://git.astron.nl/lofar2.0/ska-tango-grafana-exporter) * [Tango Prometheus exporter](https://git.astron.nl/lofar2.0/ska-tango-grafana-exporter)
* [Developer Documentation](#development) * [Developer Documentation](#development)
...@@ -24,7 +23,6 @@ Station Control software related to Tango devices. \ ...@@ -24,7 +23,6 @@ Station Control software related to Tango devices. \
* [Versioning](#versioning) * [Versioning](#versioning)
* Source code documentation * Source code documentation
* [Attribute wrapper documentation](tangostationcontrol/tangostationcontrol/clients/README.md) * [Attribute wrapper documentation](tangostationcontrol/tangostationcontrol/clients/README.md)
* [Archiver documentation](tangostationcontrol/tangostationcontrol/toolkit/README.md)
* [Adding a new tango device](tangostationcontrol/tangostationcontrol/devices/README.md) * [Adding a new tango device](tangostationcontrol/tangostationcontrol/devices/README.md)
* [HDF5 statistics](tangostationcontrol/tangostationcontrol/statistics/README.md) * [HDF5 statistics](tangostationcontrol/tangostationcontrol/statistics/README.md)
* [Unit tests](tangostationcontrol/tangostationcontrol/test/README.md) * [Unit tests](tangostationcontrol/tangostationcontrol/test/README.md)
...@@ -117,6 +115,7 @@ Next change the version in the following places: ...@@ -117,6 +115,7 @@ Next change the version in the following places:
# Release Notes # Release Notes
* 0.13.0 Remove all `archiver-timescale`, `hdbppts-cm`, `hdbppts-es` functionalities
* 0.12.1 Add `AbstractHierarchy` and `AbstractHierarchyDevice` classes and * 0.12.1 Add `AbstractHierarchy` and `AbstractHierarchyDevice` classes and
functionality functionality
* 0.12.0 Add `Calibration_SDP_Subband_Weights_<XXX>MHz_R` attributes to implement HDF5 calibration tables * 0.12.0 Add `Calibration_SDP_Subband_Weights_<XXX>MHz_R` attributes to implement HDF5 calibration tables
......
...@@ -15,9 +15,6 @@ TANGO_POGO_VERSION=9.6.35 ...@@ -15,9 +15,6 @@ TANGO_POGO_VERSION=9.6.35
TANGO_REST_VERSION=1.14.7 TANGO_REST_VERSION=1.14.7
TANGO_STARTER_VERSION=2021-05-28 TANGO_STARTER_VERSION=2021-05-28
PG_TIMESCALEDB_VERSION=latest-pg12
PG_SUPERUSER_PASSWORD=password
PG_HDB_PASSWORD=hdbpp
MYSQL_ROOT_PASSWORD=secret MYSQL_ROOT_PASSWORD=secret
MYSQL_PASSWORD=tango MYSQL_PASSWORD=tango
......
...@@ -206,7 +206,6 @@ bootstrap: pull build # first start, initialise from scratch ...@@ -206,7 +206,6 @@ bootstrap: pull build # first start, initialise from scratch
$(MAKE) start dsconfig # boot up containers to load configurations $(MAKE) start dsconfig # boot up containers to load configurations
sleep 5 # wait for dsconfig container to come up sleep 5 # wait for dsconfig container to come up
../sbin/update_ConfigDb.sh ../CDB/LOFAR_ConfigDb.json # load default configuration ../sbin/update_ConfigDb.sh ../CDB/LOFAR_ConfigDb.json # load default configuration
../sbin/update_ConfigDb.sh ../CDB/tango-archiver-data/archiver-devices.json # load default archive configuration
../sbin/update_ConfigDb.sh ../CDB/stations/simulators_ConfigDb.json # by default, use simulators ../sbin/update_ConfigDb.sh ../CDB/stations/simulators_ConfigDb.json # by default, use simulators
start: up ## start a service (usage: make start <servicename>) start: up ## start a service (usage: make start <servicename>)
......
...@@ -38,11 +38,6 @@ are used in production. ...@@ -38,11 +38,6 @@ are used in production.
- Services - Services
- databases - databases
- dsconfig - dsconfig
- [timescaledb](timescaledb/README.md)
- archiver-timescale
- hbdpp
- hbdpp-cm (ConfigurationManager)
- hbdpp-es (EventSubscriber)
- prometheus - prometheus
- webservers / user interfaces - webservers / user interfaces
- jupyterlab - jupyterlab
...@@ -78,31 +73,6 @@ registry and uploaded to our own using matching tags. ...@@ -78,31 +73,6 @@ registry and uploaded to our own using matching tags.
Services, same mechanism as devices. Services, same mechanism as devices.
### HDB++ image updates
The hdbpp Docker image is used as a base image for the `hdbppts-cm`
(ConfigurationManager) and `hdbppts-es` (EventSubscriber)
images. If one is developing on a branch and any updates is made
in hdbpp/Dockerfile, those won't be automatically picked up from `hdbppts-cm`
and `hdbppts-es`, because the argument `SOURCE_IMAGE` in the docker-compose
yml file always refers to the remote `hdbpp` image in the repository.
A temporary workaround for locally testing on these archiving containers
is the following:
- Stop and remove any running `hdbpp*` container
- In the archiver-timescale.yml file, replace the `hdbppts-cm` and `hdbppts-es`
`SOURCE_IMAGE` tag 'latest' with the branch name
(e.g. `SOURCE_IMAGE: ${LOCAL_DOCKER_REGISTRY_HOST}/${LOCAL_DOCKER_REGISTRY_USER}/hdbpp:l2ss-new-branch`)
- Rebuild all the hdbpp* container (`make build hdbpp hdbppts-cm hdbppts-es`),
and then start them (`make start hdbpp hdbppts-cm hdbppts-es`)
- Test the new features
After the branch has been correctly developed, tested, the merge has been
approved, and the new images have been built on the repository:
- Put back 'latest' tag on the `archiver-timescale.yml` file, replacing the branch name
- Merge on master
## Gitlab CI/CD ## Gitlab CI/CD
1. [Image tagging and change detection](#image-tagging-and-change-detection) 1. [Image tagging and change detection](#image-tagging-and-change-detection)
......
# Copyright (C) 2022 ASTRON (Netherlands Institute for Radio Astronomy)
# SPDX-License-Identifier: Apache-2.0
version: '2.1'
volumes:
archiver-timescale-data: { }
services:
archiver-timescale:
image: timescaledb
build:
context: timescaledb
args:
SOURCE_IMAGE: timescale/timescaledb:${PG_TIMESCALEDB_VERSION}
container_name: archiver-timescale
networks:
- control
ports:
- "5432:5432/tcp"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- archiver-timescale-data:/var/lib/postgresql/data
depends_on:
- databaseds
environment:
- POSTGRES_PASSWORD=${PG_SUPERUSER_PASSWORD}
- PG_HDB_PASSWORD=${PG_HDB_PASSWORD}
- TANGO_HOST=${TANGO_HOST}
healthcheck:
test: nc -z -v localhost 5432
interval: 1m
timeout: 30s
retries: 3
start_period: 30s
logging:
driver: syslog
options:
syslog-address: udp://${LOG_HOSTNAME}:1514
syslog-format: rfc3164
tag: "{{.Name}}"
restart: unless-stopped
hdbpp:
image: hdbpp
build:
context: hdbpp
args:
SOURCE_IMAGE: ${DOCKER_REGISTRY_HOST}/${DOCKER_REGISTRY_USER}-tango-cpp:${TANGO_CPP_VERSION}
container_name: hdbpp
networks:
- control
depends_on:
- databaseds
- dsconfig
- archiver-timescale
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- TANGO_HOST=${TANGO_HOST}
hdbppts-cm:
image: hdbppts-cm
build:
context: hdbppts-cm
args:
SOURCE_IMAGE: ${LOCAL_DOCKER_REGISTRY_HOST}/${LOCAL_DOCKER_REGISTRY_USER}/hdbpp:latest
container_name: hdbppts-cm
networks:
- control
depends_on:
- databaseds
- dsconfig
- archiver-timescale
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- TANGO_HOST=${TANGO_HOST}
- HdbManager=archiving/hdbppts/confmanager01
command: >
/bin/bash -c "
wait-for-it.sh archiver-timescale:5432 --timeout=30 --strict --
wait-for-it.sh ${TANGO_HOST} --timeout=30 --strict --
hdbppcm-srv 01"
logging:
driver: syslog
options:
syslog-address: udp://${LOG_HOSTNAME}:1514
syslog-format: rfc3164
tag: "{{.Name}}"
hdbppts-es:
image: hdbppts-es
build:
context: hdbppts-es
args:
SOURCE_IMAGE: ${LOCAL_DOCKER_REGISTRY_HOST}/${LOCAL_DOCKER_REGISTRY_USER}/hdbpp:latest
container_name: hdbppts-es
networks:
- control
depends_on:
- hdbppts-cm
- databaseds
- dsconfig
- archiver-timescale
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- TANGO_HOST=${TANGO_HOST}
- HdbManager=archiving/hdbppts/confmanager01
command: >
/bin/bash -c "
wait-for-it.sh archiver-timescale:5432 --timeout=30 --strict --
wait-for-it.sh ${TANGO_HOST} --timeout=30 --strict --
hdbppes-srv 01"
logging:
driver: syslog
options:
syslog-address: udp://${LOG_HOSTNAME}:1514
syslog-format: rfc3164
tag: "{{.Name}}"
restart: unless-stopped
apiVersion: 1
datasources:
# <string, required> name of the datasource. Required
- name: TimescaleDB
# <string, required> datasource type. Required
type: postgres
# <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
access: proxy
# <int> org id. will default to orgId 1 if not specified
orgId: 1
# <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically
uid: timescaledb
# <string> url
url: archiver-timescale
# <string> Deprecated, use secureJsonData.password
password:
# <string> database user, if used
user: postgres
# <string> database name, if used
database: hdb
# <bool> enable/disable basic auth
basicAuth: false
# <string> basic auth username
basicAuthUser:
# <string> Deprecated, use secureJsonData.basicAuthPassword
basicAuthPassword:
# <bool> enable/disable with credentials headers
withCredentials:
# <bool> mark as default datasource. Max one per org
isDefault: false
# <map> fields that will be converted to json and stored in jsonData
jsonData:
# <string> determines whether or with what priority a secure TLS/SSL TCP/IP connection will be negotiated with the server.
sslmode: "disable"
# <bool> enable TimescaleDB
timescaledb: true
# <string> json object of data that will be encrypted.
secureJsonData:
# <string> database password, if used
password: password
version: 1
# <bool> allow users to edit datasources from the UI.
editable: false
ARG SOURCE_IMAGE
FROM ${SOURCE_IMAGE}
USER root
RUN apt-get update && \
apt-get install -y ca-certificates
RUN echo "deb http://deb.debian.org/debian buster-backports main contrib non-free" >> /etc/apt/sources.list && \
more /etc/apt/sources.list && \
apt-get update && \
apt-get install -y \
checkinstall \
git \
cmake \
make \
g++ \
libomniorb4-dev \
libzmq3-dev \
libcos4-dev \
mariadb-server \
libmariadb-dev-compat libmariadb-dev \
libmariadbclient-dev \
postgresql \
postgresql-contrib \
libpq5 \
libpqxx-6.2 \
libpq-dev \
libpqxx-dev
# ----------- LIBHDB++ -----------------------------
RUN git clone https://gitlab.com/tango-controls/hdbpp/libhdbpp.git
RUN cd libhdbpp \
&& mkdir build \
&& cd build \
&& cmake .. -DCMAKE_INCLUDE_PATH=/usr/local/include/tango \
&& make -j4
RUN cd libhdbpp/build \
&& checkinstall \
--install=yes \
--fstrans=no \
--showinstall=no \
--backup=no \
--type=debian \
--pkgsource="https://gitlab.com/tango-controls/hdbpp/libhdbpp" \
--pkglicense="LGPLv3" \
--deldesc=no \
--nodoc \
--strip \
--stripso \
--maintainer="tango" \
--pkgarch=$(dpkg --print-architecture) \
--pkgversion="2.0.0" \
--pkgrelease="SNAPSHOT" \
--pkgname="libhdbpp" \
--requires="libzmq5,libomniorb4-2,libcos4-2,libomnithread4" \
make install
# ----------- LIBHDB++ TIMESCALE ---------------
#RUN git clone -b image_support --recurse-submodules https://github.com/tango-controls-hdbpp/libhdbpp-timescale.git
RUN git clone -b image_support_lofar_fixes --recurse-submodules https://git.astron.nl/lofar2.0/libhdbpp-timescale.git
RUN cd libhdbpp-timescale \
&& mkdir -p build \
&& cd build \
&& cmake .. -DCMAKE_PREFIX_PATH=/usr/local/include/tango -DPostgreSQL_TYPE_INCLUDE_DIR=/usr/local/include/postgresql \
&& make -j4
RUN cd libhdbpp-timescale/build \
&& checkinstall \
--install=yes \
--fstrans=no \
--showinstall=no \
--backup=no \
--type=debian \
--pkgsource="https://github.com/tango-controls-hdbpp/libhdbpp-timescale" \
--pkglicense="LGPLv3" \
--deldesc=no \
--nodoc \
--strip \
--stripso \
--maintainer="tango" \
--pkgarch=$(dpkg --print-architecture) \
--pkgversion="2.0.0" \
--pkgrelease="SNAPSHOT" \
--pkgname="libhdbpp-timescale" \
--requires="libpq5" \
make install
RUN apt-get update && \
apt-get install -y \
build-essential && \
apt-get clean
RUN dpkg -i /libhdbpp/build/libhdbpp_2.0.0-SNAPSHOT_amd64.deb
RUN dpkg -i /libhdbpp-timescale/build/libhdbpp-timescale_2.0.0-SNAPSHOT_amd64.deb
RUN ldconfig
ARG SOURCE_IMAGE
FROM ${SOURCE_IMAGE}
# ----------- HDB++ CONFIGURATION MANAGER ---------------
RUN git clone https://gitlab.com/tango-controls/hdbpp/hdbpp-cm.git
RUN cd hdbpp-cm \
&& mkdir -p build \
&& cd build \
&& cmake .. -DCMAKE_PREFIX_PATH=/usr/local/include/tango \
&& make -j4
RUN cd hdbpp-cm/build \
&& checkinstall \
--install=yes \
--fstrans=no \
--showinstall=no \
--backup=no \
--type=debian \
--pkgsource="https://gitlab.com/tango-controls/hdbpp/hdbpp-cm" \
--pkglicense="GPLv3" \
--deldesc=no \
--nodoc \
--strip \
--stripso \
--maintainer="tango" \
--pkgarch=$(dpkg --print-architecture) \
--pkgversion="2.0.0" \
--pkgrelease="SNAPSHOT" \
--pkgname="hdbpp-cm" \
--requires="libzmq5,libomniorb4-2,libcos4-2,libomnithread4" \
make install
RUN apt-get update && \
apt-get install -y \
build-essential && \
apt-get clean
RUN dpkg -i /libhdbpp/build/libhdbpp_2.0.0-SNAPSHOT_amd64.deb
RUN dpkg -i /libhdbpp-timescale/build/libhdbpp-timescale_2.0.0-SNAPSHOT_amd64.deb
RUN dpkg -i /hdbpp-cm/build/hdbpp-cm_2.0.0-SNAPSHOT_amd64.deb
RUN ldconfig
RUN mv /usr/local/bin/hdb++cm-srv /usr/local/bin/hdbppcm-srv
ARG SOURCE_IMAGE
FROM ${SOURCE_IMAGE}
# ----------- HDB++ EVENT SUBSCRIBER ---------------
RUN git clone https://gitlab.com/tango-controls/hdbpp/hdbpp-es.git
RUN cd hdbpp-es \
&& mkdir -p build \
&& cd build \
&& cmake .. -DCMAKE_PREFIX_PATH=/usr/local/include/tango -DFETCH_LIBHDBPP=OFF -DLIBHDBPP_BACKEND=timescale -DPostgreSQL_TYPE_INCLUDE_DIR=/usr/local/include/postgresql \
&& make -j4
RUN cd hdbpp-es/build \
&& checkinstall \
--install=yes \
--fstrans=no \
--showinstall=no \
--backup=no \
--type=debian \
--pkgsource="https://gitlab.com/tango-controls/hdbpp/hdbpp-es" \
--pkglicense="GPLv3" \
--deldesc=no \
--nodoc \
--strip \
--stripso \
--maintainer="tango" \
--pkgarch=$(dpkg --print-architecture) \
--pkgversion="2.0.0" \
--pkgrelease="SNAPSHOT" \
--pkgname="hdbpp-es" \
--requires="libzmq5,libomniorb4-2,libcos4-2,libomnithread4" \
make install
RUN apt-get update && \
apt-get install -y \
build-essential && \
apt-get clean
RUN dpkg -i /libhdbpp/build/libhdbpp_2.0.0-SNAPSHOT_amd64.deb
RUN dpkg -i /libhdbpp-timescale/build/libhdbpp-timescale_2.0.0-SNAPSHOT_amd64.deb
RUN dpkg -i /hdbpp-es/build/hdbpp-es_2.0.0-SNAPSHOT_amd64.deb
RUN ldconfig
RUN mv /usr/local/bin/hdb++es-srv /usr/local/bin/hdbppes-srv
...@@ -11,7 +11,6 @@ notebook-as-pdf ...@@ -11,7 +11,6 @@ notebook-as-pdf
python-logstash-async python-logstash-async
PyMySQL[rsa] PyMySQL[rsa]
psycopg2-binary >= 2.9.2 #LGPL psycopg2-binary >= 2.9.2 #LGPL
sqlalchemy
pyvisa pyvisa
pyvisa-py pyvisa-py
opcua opcua
......
...@@ -97,16 +97,16 @@ filter { ...@@ -97,16 +97,16 @@ filter {
} }
filter { filter {
# mark all our mariadb instances # mark our tangodb instances
grok { grok {
match => { match => {
"program" => [ "archiver-maria-db", "tangodb" ] "program" => ["tangodb" ]
} }
add_tag => [ "mariadb" ] add_tag => [ "tangodb" ]
} }
# parse mariadb output # parse tangodb output
if "mariadb" in [tags] { if "tangodb" in [tags] {
grok { grok {
match => { match => {
"message" => [ "message" => [
......
ARG SOURCE_IMAGE
FROM ${SOURCE_IMAGE}
# Set admin role to perform DB creation
COPY resources/01_admin.sh docker-entrypoint-initdb.d/002_admin.sh
# Create DB schema (tables, indexes, etc.)
COPY resources/02_hdb_schema.sql docker-entrypoint-initdb.d/003_hdb_schema.sql
COPY resources/03_hdb_images.sql docker-entrypoint-initdb.d/004_hdb_images.sql
# Create DB roles
COPY resources/04_hdb_roles.sql docker-entrypoint-initdb.d/005_hdb_roles.sql
# Create further roles
COPY resources/05_hdb_ext_users.sql docker-entrypoint-initdb.d/006_hdb_ext_users.sql
# Add further functions
COPY resources/06_hdb_ext_import.sql docker-entrypoint-initdb.d/007_hdb_ext_import.sql
# Create timescaledb aggregates
COPY resources/07_hdb_ext_aggregates.sql docker-entrypoint-initdb.d/008_hdb_ext_aggregates.sql
COPY resources/08_hdb_ext_arrays_aggregates_helper.sql docker-entrypoint-initdb.d/009_hdb_ext_arrays_aggregates_helper.sql
COPY resources/09_hdb_ext_arrays_aggregates.sql docker-entrypoint-initdb.d/010_hdb_ext_arrays_aggregates.sql
# Add compress policy
COPY resources/10_hdb_ext_compress_policy.sql docker-entrypoint-initdb.d/011_hdb_ext_compress_policy.sql
# Add reorder policy
COPY resources/11_hdb_ext_reorder_policy.sql docker-entrypoint-initdb.d/012_hdb_ext_reorder_policy.sql
# Add LOFAR functions and views
COPY resources/12_lofar_func.sh docker-entrypoint-initdb.d/013_lofar_func.sh
COPY resources/13_lofar_views.sql docker-entrypoint-initdb.d/014_lofar_views.sql
# Cleanup admin role
COPY resources/14_cleanup.sql docker-entrypoint-initdb.d/015_cleanup.sql
# TimescaleDB Docker Image
The Dockerfile in this directory allows to create a container with a
PostrgreSQL-Timescale DBMS (https://www.timescale.com/), and then initialise
it with the DB schema required by the Tango Archiving framework.
The main image is pulled from the official PostgreSQL repository in the
Docker Hub (https://hub.docker.com/_/postgres). This image offers several
features to customize and extend itself.
## Initialization scripts
If you would like to do additional initialization in an image derived from
the Postgres official one, add one or more *.sql, *.sql.gz, or *.sh scripts
under /docker-entrypoint-initdb.d (creating the directory if necessary).
After the entrypoint calls initdb to create the default postgres user and
database, it will run any *.sql files, run any executable *.sh scripts, and
source any non-executable *.sh scripts found in that directory to do further
initialization before starting the service.
The script files in the directory /docker-entrypoint-initdb.d are sequentially
executed following their preempted number in the filename. Hence, the first
ones (000_install_timescaledb.sh and 001_timescaledb_tune.sh) are provided by
default.
The next ones have been pulled from the official Tango repository in order to
create the desired DB schema. These files are in the 'resources' directory
and they have been pulled from Tango-Hdbpp_Timescale_Project
(https://github.com/tango-controls-hdbpp/hdbpp-timescale-project/tree/master/resources/schema):
- admin.sql creates the admin user that will create the tables
- hdb_schema.sql creates the standard Tango Archiving DB (This is the only
MANDATORY script)
- hdb_roles.sql creates additional roles
- hdb_ext_aggregates.sql creates the continuous aggregate views
(https://docs.timescale.com/timescaledb/latest/how-to-guides/continuous-aggregates/)
- cleanup.sql strips the SUPERUSER trait from hdb_admin
Last updates on these scripts are dated to August 2021 (more info can be found
at https://github.com/tango-controls-hdbpp/hdbpp-timescale-project/blob/master/doc/db-schema-config.md)
#!/bin/bash
psql << EOF
CREATE ROLE hdb_admin WITH LOGIN PASSWORD '${PG_HDB_PASSWORD}';
ALTER USER hdb_admin CREATEDB;
ALTER USER hdb_admin CREATEROLE;
ALTER USER hdb_admin SUPERUSER;
EOF
This diff is collapsed.
-- -----------------------------------------------------------------------------
-- This file is part of the hdbpp-timescale-project
--
-- Copyright (C) : 2014-2019
-- European Synchrotron Radiation Facility
-- BP 220, Grenoble 38043, FRANCE
--
-- libhdb++timescale is free software: you can redistribute it and/or modify
-- it under the terms of the Lesser GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- libhdb++timescale is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser
-- GNU General Public License for more details.
--
-- You should have received a copy of the Lesser GNU General Public License
-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>.
-- -----------------------------------------------------------------------------
\c hdb
CREATE TABLE IF NOT EXISTS att_image_devboolean (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r boolean[][],
value_w boolean[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devboolean IS 'Array Boolean Values Table';
CREATE INDEX IF NOT EXISTS att_image_devboolean_att_conf_id_idx ON att_array_devboolean (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devboolean_att_conf_id_data_time_idx ON att_array_devboolean (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devboolean', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devuchar (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r uchar[][],
value_w uchar[][],
quality smallint,
details json,
att_error_desc_id integer,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devuchar IS 'Array UChar Values Table';
CREATE INDEX IF NOT EXISTS att_image_devuchar_att_conf_id_idx ON att_array_devuchar (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devuchar_att_conf_id_data_time_idx ON att_array_devuchar (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devuchar', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devshort (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r smallint[][],
value_w smallint[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devshort IS 'Array Short Values Table';
CREATE INDEX IF NOT EXISTS att_image_devshort_att_conf_id_idx ON att_array_devshort (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devshort_att_conf_id_data_time_idx ON att_array_devshort (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devshort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devushort (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r ushort[][],
value_w ushort[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devushort IS 'Array UShort Values Table';
CREATE INDEX IF NOT EXISTS att_image_devushort_att_conf_id_idx ON att_array_devushort (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devushort_att_conf_id_data_time_idx ON att_array_devushort (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devushort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devlong (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r integer[][],
value_w integer[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devlong IS 'Array Long Values Table';
CREATE INDEX IF NOT EXISTS att_image_devlong_att_conf_id_idx ON att_array_devlong (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devlong_att_conf_id_data_time_idx ON att_array_devlong (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devlong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devulong (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r ulong[][],
value_w ulong[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devulong IS 'Array ULong Values Table';
CREATE INDEX IF NOT EXISTS att_image_devulong_att_conf_id_idx ON att_array_devulong (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devulong_att_conf_id_data_time_idx ON att_array_devulong (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devulong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devlong64 (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r bigint[][],
value_w bigint[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devlong64 IS 'Array Long64 Values Table';
CREATE INDEX IF NOT EXISTS att_image_devlong64_att_conf_id_idx ON att_array_devlong64 (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devlong64_att_conf_id_data_time_idx ON att_array_devlong64 (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devlong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devulong64 (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r ulong64[][],
value_w ulong64[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devulong64 IS 'Array ULong64 Values Table';
CREATE INDEX IF NOT EXISTS att_image_devulong64_att_conf_id_idx ON att_array_devulong64 (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devulong64_att_conf_id_data_time_idx ON att_array_devulong64 (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devulong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devfloat (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r real[][],
value_w real[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devfloat IS 'Array Float Values Table';
CREATE INDEX IF NOT EXISTS att_image_devfloat_att_conf_id_idx ON att_array_devfloat (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devfloat_att_conf_id_data_time_idx ON att_array_devfloat (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devfloat', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devdouble (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r double precision[][],
value_w double precision[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devdouble IS 'Array Double Values Table';
CREATE INDEX IF NOT EXISTS att_image_devdouble_att_conf_id_idx ON att_array_devdouble (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devdouble_att_conf_id_data_time_idx ON att_array_devdouble (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devdouble', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devstring (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r text[][],
value_w text[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devstring IS 'Array String Values Table';
CREATE INDEX IF NOT EXISTS att_image_devstring_att_conf_id_idx ON att_array_devstring (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devstring_att_conf_id_data_time_idx ON att_array_devstring (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devstring', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devstate (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r integer[][],
value_w integer[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devstate IS 'Array State Values Table';
CREATE INDEX IF NOT EXISTS att_image_devstate_att_conf_id_idx ON att_array_devstate (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devstate_att_conf_id_data_time_idx ON att_array_devstate (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devstate', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devencoded (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r bytea[][],
value_w bytea[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devencoded IS 'Array DevEncoded Values Table';
CREATE INDEX IF NOT EXISTS att_image_devencoded_att_conf_id_idx ON att_array_devencoded (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devencoded_att_conf_id_data_time_idx ON att_array_devencoded (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devencoded', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
CREATE TABLE IF NOT EXISTS att_image_devenum (
att_conf_id integer NOT NULL,
data_time timestamp WITH TIME ZONE NOT NULL,
value_r_label text[][],
value_r smallint[][],
value_w_label text[][],
value_w smallint[][],
quality smallint,
att_error_desc_id integer,
details json,
PRIMARY KEY (att_conf_id, data_time),
FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id),
FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id)
);
COMMENT ON TABLE att_image_devenum IS 'Array Enum Values Table';
CREATE INDEX IF NOT EXISTS att_image_devenum_att_conf_id_idx ON att_array_devenum (att_conf_id);
CREATE INDEX IF NOT EXISTS att_image_devenum_att_conf_id_data_time_idx ON att_array_devenum (att_conf_id,data_time DESC);
SELECT create_hypertable('att_image_devenum', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE);
-- Trigger to set the enum_labels
CREATE OR REPLACE FUNCTION set_enum_label_array() RETURNS TRIGGER AS $$
DECLARE
BEGIN
IF NEW.value_r IS NOT NULL THEN
WITH enum_labels AS (
SELECT enum_labels FROM att_parameter WHERE att_conf_id=NEW.att_conf_id ORDER BY recv_time DESC limit 1
)
SELECT array_agg(res) FROM (SELECT enum_labels[UNNEST(NEW.value_r)+ 1] FROM enum_labels) as res INTO NEW.value_r_label;
END IF;
IF NEW.value_w IS NOT NULL THEN
WITH enum_labels AS (
SELECT enum_labels FROM att_parameter WHERE att_conf_id=NEW.att_conf_id ORDER BY recv_time DESC limit 1
)
SELECT array_agg(res) FROM (SELECT enum_labels[UNNEST(NEW.value_w)+ 1] FROM enum_labels) as res INTO NEW.value_w_label;
END IF;
RETURN NEW;
END
$$ LANGUAGE plpgsql;
CREATE TRIGGER enum_label_trigger BEFORE INSERT ON att_image_devenum FOR EACH ROW EXECUTE PROCEDURE set_enum_label_array();
-- -----------------------------------------------------------------------------
-- This file is part of the hdbpp-timescale-project
--
-- Copyright (C) : 2014-2019
-- European Synchrotron Radiation Facility
-- BP 220, Grenoble 38043, FRANCE
--
-- libhdb++timescale is free software: you can redistribute it and/or modify
-- it under the terms of the Lesser GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- libhdb++timescale is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser
-- GNU General Public License for more details.
--
-- You should have received a copy of the Lesser GNU General Public License
-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>.
-- -----------------------------------------------------------------------------
-- Setup roles to access the hdb database
CREATE ROLE readonly;
CREATE ROLE readwrite;
-- Permissions - readonly
GRANT CONNECT ON DATABASE hdb TO readonly;
GRANT USAGE ON SCHEMA public TO readonly;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly;
-- Permissions - readwrite
GRANT CONNECT ON DATABASE hdb TO readwrite;
GRANT USAGE ON SCHEMA public TO readwrite;
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO readwrite;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO readwrite;
GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO readwrite;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE ON SEQUENCES TO readwrite;
GRANT ALL ON SCHEMA public TO readwrite;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO readwrite;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO readwrite;
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment