diff --git a/CHANGELOG.md b/CHANGELOG.md index 2405288016839fff12ad564e61fe5c6e2252d7f9..09db218cc59830e81f4d1b6e1534389acfa64e2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,18 +7,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. ## [Unreleased] -### Changed - -- Moved some system documentation to hdbpp-timescale-project (the consolidated project). -- Consolidated remaining build/install instructions into README -- Modified build system to use fetch libhdbpp and include it when requested. This is an aid to development. -- Supported LIBHDBPP_PROJECT_BUILD flag, that is injected into the build from hdbpp-timescale-project -- Made compatible with new libhdbpp (namespace, function and path changes) - -### Removed - -- Removed the embedded version of libhdbpp (the build can now source it at build time) - ## [0.11.2] - 2020-01-23 ### Fixed diff --git a/CMakeLists.txt b/CMakeLists.txt index 47082ba0870e643886060e0d852180ca3cd984de..41dba68c3424acc6ad6eae74a3ca76bd0c6a3ab4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -9,7 +9,7 @@ if ( ${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR} ) endif() # Start Build Config ----------------------------------- -cmake_minimum_required(VERSION 3.11) +cmake_minimum_required(VERSION 3.6) set(CMAKE_SKIP_RPATH true) set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_COLOR_MAKEFILE ON) @@ -18,9 +18,9 @@ set(CMAKE_COLOR_MAKEFILE ON) set(LIBHDBPP_TIMESCALE_NAME "libhdb++timescale") # Versioning -set(VERSION_MAJOR "1") -set(VERSION_MINOR "0") -set(VERSION_PATCH "0") +set(VERSION_MAJOR "0") +set(VERSION_MINOR "11") +set(VERSION_PATCH "2") set(VERSION_METADATA "") set(VERSION_STRING ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) @@ -38,9 +38,6 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) # Build options -set(FETCH_LIBHDBPP_TAG "exp-refactor" CACHE STRING "Libhdbpp branch/tag to clone 'master'") -option(FETCH_LIBHDBPP "Download and build using a local copy of libhdb++" ON) -option(FETCH_LIBHDBPP_TAG "When FETCH_LIBHDBPP is enabled, this is the tag fetch ('master')") option(BUILD_UNIT_TESTS "Build unit tests" OFF) option(BUILD_BENCHMARK_TESTS "Build benchmarking tests (Forces RELEASE build)" OFF) option(ENABLE_CLANG "Enable clang code and layout analysis" OFF) @@ -69,6 +66,12 @@ list(APPEND CMAKE_PREFIX_PATH "/usr") # Find Dependencies --------------------- include(cmake/FindLibraries.cmake) +# Attempt to find the various libraries the project is dependent on +if(TDB_LIBRARIES) + find_libraries(LIBRARIES ${TDB_LIBRARIES} SEARCH_PATHS ${LIBRARY_PATHS}) + set(TDB_FOUND_LIBRARIES ${FOUND_LIBRARIES}) +endif(TDB_LIBRARIES) + # First find tango if it has not already been found. Returns an interface library # called TangoInterfaceLibrary set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/cmake") @@ -77,12 +80,6 @@ find_package(Tango) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) -# Attempt to find the various libraries the project is dependent on -if(TDB_LIBRARIES) - find_libraries(LIBRARIES ${TDB_LIBRARIES} SEARCH_PATHS ${LIBRARY_PATHS}) - set(TDB_FOUND_LIBRARIES ${FOUND_LIBRARIES}) -endif(TDB_LIBRARIES) - # Thirdparty Integration ----------------------------------- # build google benchmark (target: benchmark) @@ -94,6 +91,7 @@ add_subdirectory(thirdparty/google/benchmark EXCLUDE_FROM_ALL) add_subdirectory(thirdparty/google/googletest/googletest EXCLUDE_FROM_ALL) # Include the thirdparty projects +add_subdirectory(thirdparty/libhdbpp EXCLUDE_FROM_ALL) add_subdirectory(thirdparty/libpqxx EXCLUDE_FROM_ALL) add_subdirectory(thirdparty/spdlog EXCLUDE_FROM_ALL) add_subdirectory(thirdparty/Catch2 EXCLUDE_FROM_ALL) @@ -139,7 +137,7 @@ add_subdirectory(src) add_library(libhdbpp_timescale_shared_library SHARED ${SRC_FILES}) target_link_libraries(libhdbpp_timescale_shared_library - PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static spdlog::spdlog_header_only Threads::Threads + PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static libhdbpp_headers spdlog::spdlog_header_only Threads::Threads PRIVATE TangoInterfaceLibrary) target_include_directories(libhdbpp_timescale_shared_library @@ -148,7 +146,6 @@ target_include_directories(libhdbpp_timescale_shared_library $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> PRIVATE $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src> - ${INCLUDE_PATHS} "${PROJECT_BINARY_DIR}") set_target_properties(libhdbpp_timescale_shared_library @@ -173,7 +170,7 @@ target_compile_options(libhdbpp_timescale_shared_library add_library(libhdbpp_timescale_static_library STATIC EXCLUDE_FROM_ALL ${SRC_FILES}) target_link_libraries(libhdbpp_timescale_static_library - PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static spdlog Threads::Threads + PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static libhdbpp_headers spdlog Threads::Threads PRIVATE TangoInterfaceLibrary) target_include_directories(libhdbpp_timescale_static_library diff --git a/README.md b/README.md index bbcd545dc14cf358943952247eeaa1c8406bcf65..7e451472eec5c9c6071592cb19bdbb3f9fb4d63b 100644 --- a/README.md +++ b/README.md @@ -3,29 +3,13 @@ [](http://www.tango-controls.org) [](https://www.gnu.org/licenses/gpl-3.0) [](https://github.com/tango-controls-hdbpp/libhdbpp-timescale/releases) - [libhdbpp-timescale](#libhdbpp-timescale) - - [v0.9.0 To v0.10.0 Update](#v090-to-v0100-update) - - [Cloning](#cloning) - - [Bug Reports + Feature Requests](#bug-reports--feature-requests) - - [Documentation](#documentation) - - [Building](#building) - - [Dependencies](#dependencies) - - [Toolchain Dependencies](#toolchain-dependencies) - - [Build Dependencies](#build-dependencies) - - [Building Process](#building-process) - - [Ubuntu](#ubuntu) - - [Build Flags](#build-flags) - - [Standard CMake Flags](#standard-cmake-flags) - - [Project Flags](#project-flags) - - [Running Tests](#running-tests) - - [Unit Tests](#unit-tests) - - [Benchmark Tests](#benchmark-tests) - - [Installing](#installing) - - [System Dependencies](#system-dependencies) - - [Installation](#installation) - - [Configuration](#configuration) - - [Library Configuration Parameters](#library-configuration-parameters) - - [Configuration Example](#configuration-example) - - [License](#license) + - [v0.9.0 To v0.10.0 Update](#v090-To-v0100-Update) + - [Cloning](#Cloning) + - [Bug Reports + Feature Requests](#Bug-Reports--Feature-Requests) + - [Documentation](#Documentation) + - [Building](#Building) + - [Installing](#Installing) + - [License](#License) HDB++ backend library for the TimescaleDb extenstion to Postgresql. This library is loaded by libhdbpp to archive events from a Tango Controls system. Currently in a pre v1 release phase. @@ -62,215 +46,11 @@ Please file the bug reports and feature requests in the issue tracker ## Building -To build the shared library please read the following. - -### Dependencies - -The project has two types of dependencies, those required by the toolchain, and those to do the actual build. Other dependencies are integrated directly into the project as submodules. The following thirdparty modules exists: - -* libpqxx - Modern C++ Postgresql library (submodule) -* spdlog - Logging system (submodule) -* Catch2 - Unit test subsystem (submodule) -* libhdbpp - Configuration can now fetch [original](https://github.com/tango-controls-hdbpp/libhdbpp) to aid development. See build flags. - -#### Toolchain Dependencies - -If wishing to build the project, ensure the following dependencies are met: - -* CMake 3.11 or higher (for FetchContent) -* C++14 compatible compiler (code base is using c++14) - -#### Build Dependencies - -Ensure the development version of the dependencies are installed. These are as follows: - -* Tango Controls 9 or higher development headers and libraries -* omniORB release 4 or higher development headers and libraries -* libzmq3-dev or libzmq5-dev -* libpq-dev - Postgres C development library - -### Building Process - -To compile this library, first ensure it has been recursively cloned so all submodules are present in /thirdparty. The build system uses pkg-config to find some dependencies, for example Tango. If Tango is not installed to a standard location, set PKG_CONFIG_PATH, i.e. - -```bash -export PKG_CONFIG_PATH=/non/standard/tango/install/location -``` - -Then to build just the library: - -```bash -mkdir -p build -cd build -cmake .. -make -``` - -The pkg-config path can also be set with the cmake argument CMAKE_PREFIX_PATH. This can be set on the command line at configuration time, i.e.: - -```bash -... -cmake -DCMAKE_PREFIX_PATH=/non/standard/tango/install/location .. -... -``` - -#### Ubuntu - -When using Postgres from the Ubuntu repositoris, it appears to install its development libraries in a slightly different location. Some info on this issue [here](https://gitlab.kitware.com/cmake/cmake/issues/17223). In this case, we set the PostgreSQL_TYPE_INCLUDE_DIR variable directly when calling cmake: - -``` -cmake -DPostgreSQL_TYPE_INCLUDE_DIR=/usr/include/postgresql/ .. -``` - -This should replace the call to cmake in the previous section. - -### Build Flags - -The following build flags are available - -#### Standard CMake Flags - -The following is a list of common useful CMake flags and their use: - -| Flag | Setting | Description | -|------|-----|-----| -| CMAKE_INSTALL_PREFIX | PATH | Standard CMake flag to modify the install prefix. | -| CMAKE_INCLUDE_PATH | PATH[S] | Standard CMake flag to add include paths to the search path. | -| CMAKE_LIBRARY_PATH | PATH[S] | Standard CMake flag to add paths to the library search path | -| CMAKE_BUILD_TYPE | Debug/Release | Build type to produce | - -#### Project Flags - -| Flag | Setting | Default | Description | -|------|-----|-----|-----| -| BUILD_UNIT_TESTS | ON/OFF | OFF | Build unit tests | -| BUILD_BENCHMARK_TESTS | ON/OFF | OFF | Build benchmark tests (Forces a Release build) | -| ENABLE_CLANG | ON/OFF | OFF | Clang code static analysis, readability, and cppcore guideline enforcement | -| FETCH_LIBHDBPP | ON/OFF | OFF | Enable to have the build fetch and use a local version of libhdbpp | -| FETCH_LIBHDBPP_TAG | | master | When FETCH_LIBHDBPP is enabled, this is the git tag to fetch | - -### Running Tests - -#### Unit Tests - -The project has extensive unit tests to ensure its functioning as expect. Build the project with testing enabled: - -```bash -mkdir -p build -cd build -cmake -DBUILD_UNIT_TESTS=ON .. -make -``` - -To run all unit tests, a postgresql database node is required with the project schema loaded up. There is a default connection string inside test/TestHelpers.hpp: - -``` -user=postgres host=localhost port=5432 dbname=hdb password=password -``` - -If you run the hdb timescale docker image associated with this project locally then this will connect automatically. If you wish to use a different database, edit the string in test/TestHelpers.hpp. - -To run all tests: - -```bash -./test/unit-tests -``` - -To look at the available tests and tags, should you wish to run a subset of the test suite (for example, you do not have a postgresql node to test against), then tests and be listed: - -```bash -./bin/unit-tests --list-tests -``` - -Or: - -```bash -./bin/unit-tests --list-tags -``` - -To see more options for the unit-test command line binary: - -```bash -./bin/unit-tests --help -``` - -#### Benchmark Tests - -These are a work in progress to explore future optimisation point. If built, they can be run as follows: - -```bash -mkdir -p build -cd build -cmake -DBUILD_BENCHMARK_TESTS=ON .. -make -``` - -```bash -./benchmark/benchmark-tests -``` +See [build.md](doc/build.md) in the doc folder ## Installing -All submodules are combined into the final library for ease of deployment. This means just the libhdbpp-timescale.so binary needs deploying to the target system. - -### System Dependencies - -The running system requires libpq5 installed to support the calls Postgresql. On Debian/Ubuntu this can be deployed as follows: - -```bash -sudo apt-get install libpq5 -``` - -### Installation - -After the build has completed, simply run: - -``` -sudo make install -``` - -The shared library will be installed to /usr/local/lib on Debian/Ubuntu systems. - -## Configuration - -### Library Configuration Parameters - -Configuration parameters are as follows: - -| Parameter | Mandatory | Default | Description | -|------|-----|-----|-----| -| libname | true | None | Must be "libhdb++timescale.so" | -| connect_string | true | None | Postgres connection string, eg user=postgres host=localhost port=5432 dbname=hdb password=password | -| logging_level | false | error | Logging level. See table below | -| log_file | false | false | Enable logging to file | -| log_console | false | false | Enable logging to the console | -| log_syslog | false | false | Enable logging to syslog | -| log_file_name | false | None | When logging to file, this is the path and name of file to use. Ensure the path exists otherwise this is an error conditions. | - -The logging_level parameter is case insensitive. Logging levels are as follows: - -| Level | Description | -|------|-----| -| error | Log only error level events (recommended unless debugging) | -| warning | Log only warning level events | -| info | Log only warning level events | -| debug | Log only warning level events. Good for early install debugging | -| trace | Trace level logging. Excessive level of debug, good for involved debugging | -| disabled | Disable logging subsystem | - -### Configuration Example - -Short example LibConfiguration property value on an EventSubscriber or ConfigManager. You will HAVE to change the various parts to match your system: - -```bash -connect_string=user=hdb-user password=password host=hdb-database port=5432 dbname=hdb -logging_level=debug -log_file=true -log_syslog=false -log_console=false -libname=libhdb++timescale.so -log_file_name=/tmp/hdb/es-name.log -``` +See [install.md](doc/install.md) in the doc folder ## License diff --git a/db-schema/cluster.sql b/db-schema/cluster.sql new file mode 100644 index 0000000000000000000000000000000000000000..baee7e7e396ee4cb344b0abf44f96e6218cbf59a --- /dev/null +++ b/db-schema/cluster.sql @@ -0,0 +1,57 @@ +ALTER TABLE att_scalar_devboolean CLUSTER ON att_scalar_devboolean_att_conf_id_data_time_idx; +ALTER TABLE att_array_devboolean CLUSTER ON att_array_devboolean_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devuchar CLUSTER ON att_scalar_devuchar_att_conf_id_data_time_idx; +ALTER TABLE att_array_devuchar CLUSTER ON att_array_devuchar_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devshort CLUSTER ON att_scalar_devshort_att_conf_id_data_time_idx; +ALTER TABLE att_array_devshort CLUSTER ON att_array_devshort_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devushort CLUSTER ON att_scalar_devushort_att_conf_id_data_time_idx; +ALTER TABLE att_array_devushort CLUSTER ON att_array_devushort_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devlong CLUSTER ON att_scalar_devlong_att_conf_id_data_time_idx; +ALTER TABLE att_array_devlong CLUSTER ON att_array_devlong_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devulong CLUSTER ON att_scalar_devulong_att_conf_id_data_time_idx; +ALTER TABLE att_array_devulong CLUSTER ON att_array_devulong_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devlong64 CLUSTER ON att_scalar_devlong64_att_conf_id_data_time_idx; +ALTER TABLE att_array_devlong64 CLUSTER ON att_array_devlong64_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devulong64 CLUSTER ON att_scalar_devulong64_att_conf_id_data_time_idx; +ALTER TABLE att_array_devulong64 CLUSTER ON att_array_devulong64_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devfloat CLUSTER ON att_scalar_devfloat_att_conf_id_data_time_idx; +ALTER TABLE att_array_devfloat CLUSTER ON att_array_devfloat_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devdouble CLUSTER ON att_scalar_devdouble_att_conf_id_data_time_idx; +ALTER TABLE att_array_devdouble CLUSTER ON att_array_devdouble_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devstring CLUSTER ON att_scalar_devstring_att_conf_id_data_time_idx; +ALTER TABLE att_array_devstring CLUSTER ON att_array_devstring_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devstate CLUSTER ON att_scalar_devstate_att_conf_id_data_time_idx; +ALTER TABLE att_array_devstate CLUSTER ON att_array_devstate_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devencoded CLUSTER ON att_scalar_devencoded_att_conf_id_data_time_idx; +ALTER TABLE att_array_devencoded CLUSTER ON att_array_devencoded_att_conf_id_data_time_idx; +ALTER TABLE att_scalar_devenum CLUSTER ON att_scalar_devenum_att_conf_id_data_time_idx; +ALTER TABLE att_array_devenum CLUSTER ON att_array_devenum_att_conf_id_data_time_idx; + +CLUSTER att_scalar_devboolean; +CLUSTER att_array_devboolean; +CLUSTER att_scalar_devuchar; +CLUSTER att_array_devuchar; +CLUSTER att_scalar_devshort; +CLUSTER att_array_devshort; +CLUSTER att_scalar_devushort; +CLUSTER att_array_devushort; +CLUSTER att_scalar_devlong; +CLUSTER att_array_devlong; +CLUSTER att_scalar_devulong; +CLUSTER att_array_devulong; +CLUSTER att_scalar_devlong64; +CLUSTER att_array_devlong64; +CLUSTER att_scalar_devulong64; +CLUSTER att_array_devulong64; +CLUSTER att_scalar_devfloat; +CLUSTER att_array_devfloat; +CLUSTER att_scalar_devdouble; +CLUSTER att_array_devdouble; +CLUSTER att_scalar_devstring; +CLUSTER att_array_devstring; +CLUSTER att_scalar_devstate; +CLUSTER att_array_devstate; +CLUSTER att_scalar_devencoded; +CLUSTER att_array_devencoded; +CLUSTER att_scalar_devenum; +CLUSTER att_array_devenum; \ No newline at end of file diff --git a/db-schema/schema.sql b/db-schema/schema.sql new file mode 100755 index 0000000000000000000000000000000000000000..8cab5645dc6e456b38f1f7065490b12efd7fef59 --- /dev/null +++ b/db-schema/schema.sql @@ -0,0 +1,666 @@ +DROP DATABASE IF EXISTS hdb; + +-- Create the hdb database and use it +CREATE DATABASE hdb; +\c hdb + +-- Add the timescaledb extension (Important) +CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE; + +------------------------------------------------------------------------------- +CREATE DOMAIN uchar AS numeric(3) -- ALT smallint + CHECK(VALUE >= 0 AND VALUE <= 255); + +CREATE DOMAIN ushort AS numeric(5) -- ALT integer + CHECK(VALUE >= 0 AND VALUE <= 65535); + +CREATE DOMAIN ulong AS numeric(10) -- ALT bigint + CHECK(VALUE >= 0 AND VALUE <= 4294967295); + +CREATE DOMAIN ulong64 AS numeric(20) + CHECK(VALUE >= 0 AND VALUE <= 18446744073709551615); + +------------------------------------------------------------------------------- +DROP TABLE IF EXISTS att_conf_type; + +-- Mappings for ths Tango Data Type (used in att_conf) +CREATE TABLE att_conf_type ( + att_conf_type_id serial NOT NULL, + type text NOT NULL, + type_num smallint NOT NULL, + PRIMARY KEY (att_conf_type_id) +); + +COMMENT ON TABLE att_conf_type is 'Attribute data type'; + +INSERT INTO att_conf_type (type, type_num) VALUES +('DEV_BOOLEAN', 1),('DEV_SHORT', 2),('DEV_LONG', 3),('DEV_FLOAT', 4), +('DEV_DOUBLE', 5),('DEV_USHORT', 6),('DEV_ULONG', 7),('DEV_STRING', 8), +('DEV_STATE', 19),('DEV_UCHAR',22),('DEV_LONG64', 23),('DEV_ULONG64', 24), +('DEV_ENCODED', 28),('DEV_ENUM', 29); + +DROP TABLE IF EXISTS att_conf_format; + +-- Mappings for ths Tango Data Format Type (used in att_conf) +CREATE TABLE att_conf_format ( + att_conf_format_id serial NOT NULL, + format text NOT NULL, + format_num smallint NOT NULL, + PRIMARY KEY (att_conf_format_id) +); + +COMMENT ON TABLE att_conf_format is 'Attribute format type'; + +INSERT INTO att_conf_format (format, format_num) VALUES +('SCALAR', 0),('SPECTRUM', 1),('IMAGE', 2); + +DROP TABLE IF EXISTS att_conf_write; + +-- Mappings for the Tango Data Write Type (used in att_conf) +CREATE TABLE att_conf_write ( + att_conf_write_id serial NOT NULL, + write text NOT NULL, + write_num smallint NOT NULL, + PRIMARY KEY (att_conf_write_id) +); + +COMMENT ON TABLE att_conf_write is 'Attribute write type'; + +INSERT INTO att_conf_write (write, write_num) VALUES +('READ', 0),('READ_WITH_WRITE', 1),('WRITE', 2),('READ_WRITE', 3); + +-- The att_conf table contains the primary key for all data tables, the +-- att_conf_id. Expanded on the normal hdb++ tables since we add information +-- about the type. +CREATE TABLE IF NOT EXISTS att_conf ( + att_conf_id serial NOT NULL, + att_name text NOT NULL, + att_conf_type_id smallint NOT NULL, + att_conf_format_id smallint NOT NULL, + att_conf_write_id smallint NOT NULL, + table_name text NOT NULL, + cs_name text NOT NULL DEFAULT '', + domain text NOT NULL DEFAULT '', + family text NOT NULL DEFAULT '', + member text NOT NULL DEFAULT '', + name text NOT NULL DEFAULT '', + ttl int, + hide boolean DEFAULT false, + PRIMARY KEY (att_conf_id), + FOREIGN KEY (att_conf_type_id) REFERENCES att_conf_type (att_conf_type_id), + FOREIGN KEY (att_conf_format_id) REFERENCES att_conf_format (att_conf_format_id), + FOREIGN KEY (att_conf_write_id) REFERENCES att_conf_write (att_conf_write_id), + UNIQUE (att_name) +); + +COMMENT ON TABLE att_conf is 'Attribute Configuration Table'; +CREATE INDEX IF NOT EXISTS att_conf_att_conf_id_idx ON att_conf (att_conf_id); +CREATE INDEX IF NOT EXISTS att_conf_att_conf_type_id_idx ON att_conf (att_conf_type_id); + +------------------------------------------------------------------------------- +DROP TABLE IF EXISTS att_history_event; + +CREATE TABLE att_history_event ( + att_history_event_id serial NOT NULL, + event text NOT NULL, + PRIMARY KEY (att_history_event_id) +); + +COMMENT ON TABLE att_history_event IS 'Attribute history events description'; +CREATE INDEX IF NOT EXISTS att_history_att_history_event_id_idx ON att_history_event (att_history_event_id); + +CREATE TABLE IF NOT EXISTS att_history ( + att_conf_id integer NOT NULL, + att_history_event_id integer NOT NULL, + event_time timestamp WITH TIME ZONE, + details json, + PRIMARY KEY (att_conf_id, event_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_history_event_id) REFERENCES att_history_event (att_history_event_id) +); + +COMMENT ON TABLE att_history is 'Attribute Configuration Events History Table'; +CREATE INDEX IF NOT EXISTS att_history_att_conf_id_inx ON att_history (att_conf_id); + +------------------------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS att_parameter ( + att_conf_id integer NOT NULL, + recv_time timestamp WITH TIME ZONE NOT NULL, + label text NOT NULL DEFAULT '', + unit text NOT NULL DEFAULT '', + standard_unit text NOT NULL DEFAULT '', + display_unit text NOT NULL DEFAULT '', + format text NOT NULL DEFAULT '', + archive_rel_change text NOT NULL DEFAULT '', + archive_abs_change text NOT NULL DEFAULT '', + archive_period text NOT NULL DEFAULT '', + description text NOT NULL DEFAULT '', + details json, + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id) +); + +COMMENT ON TABLE att_parameter IS 'Attribute configuration parameters'; +CREATE INDEX IF NOT EXISTS att_parameter_recv_time_idx ON att_parameter (recv_time); +CREATE INDEX IF NOT EXISTS att_parameter_att_conf_id_idx ON att_parameter (att_conf_id); +SELECT create_hypertable('att_parameter', 'recv_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +------------------------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS att_error_desc ( + att_error_desc_id serial NOT NULL, + error_desc text NOT NULL, + PRIMARY KEY (att_error_desc_id), + UNIQUE (error_desc) +); + +COMMENT ON TABLE att_error_desc IS 'Error Description Table'; +CREATE INDEX IF NOT EXISTS att_error_desc_att_error_desc_id_idx ON att_error_desc (att_error_desc_id); + +------------------------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS att_scalar_devboolean ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r boolean, + value_w boolean, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devboolean IS 'Scalar Boolean Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devboolean_att_conf_id_idx ON att_scalar_devboolean (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devboolean_att_conf_id_data_time_idx ON att_scalar_devboolean (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devboolean', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devboolean ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r boolean[], + value_w boolean[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devboolean IS 'Array Boolean Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devboolean_att_conf_id_idx ON att_array_devboolean (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devboolean_att_conf_id_data_time_idx ON att_array_devboolean (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devboolean', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devuchar ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r uchar, + value_w uchar, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devuchar IS 'Scalar UChar Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devuchar_att_conf_id_idx ON att_scalar_devuchar (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devuchar_att_conf_id_data_time_idx ON att_scalar_devuchar (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devuchar', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devuchar ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r uchar[], + value_w uchar[], + quality smallint, + details json, + att_error_desc_id integer, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devuchar IS 'Array UChar Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devuchar_att_conf_id_idx ON att_array_devuchar (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devuchar_att_conf_id_data_time_idx ON att_array_devuchar (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devuchar', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devshort ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r smallint, + value_w smallint, + quality smallint, + details json, + att_error_desc_id integer, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devshort IS 'Scalar Short Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devshort_att_conf_id_idx ON att_scalar_devshort (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devshort_att_conf_id_data_time_idx ON att_scalar_devshort (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devshort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devshort ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r smallint[], + value_w smallint[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devshort IS 'Array Short Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devshort_att_conf_id_idx ON att_array_devshort (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devshort_att_conf_id_data_time_idx ON att_array_devshort (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devshort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devushort ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ushort, + value_w ushort, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devushort IS 'Scalar UShort Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devushort_att_conf_id_idx ON att_scalar_devushort (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devushort_att_conf_id_data_time_idx ON att_scalar_devushort (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devushort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devushort ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ushort[], + value_w ushort[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devushort IS 'Array UShort Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devushort_att_conf_id_idx ON att_array_devushort (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devushort_att_conf_id_data_time_idx ON att_array_devushort (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devushort', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devlong ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r integer, + value_w integer, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devlong IS 'Scalar Long Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devlong_att_conf_id_idx ON att_scalar_devlong (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devlong_att_conf_id_data_time_idx ON att_scalar_devlong (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devlong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devlong ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r integer[], + value_w integer[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devlong IS 'Array Long Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devlong_att_conf_id_idx ON att_array_devlong (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devlong_att_conf_id_data_time_idx ON att_array_devlong (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devlong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devulong ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ulong, + value_w ulong, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devulong IS 'Scalar ULong Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devulong_att_conf_id_idx ON att_scalar_devulong (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devulong_att_conf_id_data_time_idx ON att_scalar_devulong (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devulong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devulong ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ulong[], + value_w ulong[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devulong IS 'Array ULong Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devulong_att_conf_id_idx ON att_array_devulong (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devulong_att_conf_id_data_time_idx ON att_array_devulong (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devulong', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devlong64 ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r bigint, + value_w bigint, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devlong64 IS 'Scalar Long64 Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devlong64_att_conf_id_idx ON att_scalar_devlong64 (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devlong64_att_conf_id_data_time_idx ON att_scalar_devlong64 (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devlong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devlong64 ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r bigint[], + value_w bigint[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devlong64 IS 'Array Long64 Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devlong64_att_conf_id_idx ON att_array_devlong64 (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devlong64_att_conf_id_data_time_idx ON att_array_devlong64 (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devlong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devulong64 ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ulong64, + value_w ulong64, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devulong64 IS 'Scalar ULong64 Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devulong64_att_conf_id_idx ON att_scalar_devulong64 (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devulong64_att_conf_id_data_time_idx ON att_scalar_devulong64 (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devulong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devulong64 ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r ulong64[], + value_w ulong64[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devulong64 IS 'Array ULong64 Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devulong64_att_conf_id_idx ON att_array_devulong64 (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devulong64_att_conf_id_data_time_idx ON att_array_devulong64 (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devulong64', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devfloat ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r real, + value_w real, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devfloat IS 'Scalar Float Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devfloat_att_conf_id_idx ON att_scalar_devfloat (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devfloat_att_conf_id_data_time_idx ON att_scalar_devfloat (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devfloat', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devfloat ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r real[], + value_w real[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devfloat IS 'Array Float Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devfloat_att_conf_id_idx ON att_array_devfloat (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devfloat_att_conf_id_data_time_idx ON att_array_devfloat (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devfloat', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devdouble ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r double precision, + value_w double precision, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devdouble IS 'Scalar Double Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devdouble_att_conf_id_idx ON att_scalar_devdouble (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devdouble_att_conf_id_data_time_idx ON att_scalar_devdouble (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devdouble', 'data_time', chunk_time_interval => interval '14 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devdouble ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r double precision[], + value_w double precision[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devdouble IS 'Array Double Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devdouble_att_conf_id_idx ON att_array_devdouble (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devdouble_att_conf_id_data_time_idx ON att_array_devdouble (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devdouble', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devstring ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r text, + value_w text, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devstring IS 'Scalar String Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devstring_att_conf_id_idx ON att_scalar_devstring (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devstring_att_conf_id_data_time_idx ON att_scalar_devstring (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devstring', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devstring ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r text[], + value_w text[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devstring IS 'Array String Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devstring_att_conf_id_idx ON att_array_devstring (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devstring_att_conf_id_data_time_idx ON att_array_devstring (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devstring', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devstate ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r integer, + value_w integer, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devstate IS 'Scalar State Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devstate_att_conf_id_idx ON att_scalar_devstate (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devstate_att_conf_id_data_time_idx ON att_scalar_devstate (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devstate', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devstate ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r integer[], + value_w integer[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devstate IS 'Array State Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devstate_att_conf_id_idx ON att_array_devstate (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devstate_att_conf_id_data_time_idx ON att_array_devstate (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devstate', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_scalar_devencoded ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r bytea, + value_w bytea, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); +COMMENT ON TABLE att_scalar_devencoded IS 'Scalar DevEncoded Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devencoded_att_conf_id_idx ON att_scalar_devencoded (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devencoded_att_conf_id_data_time_idx ON att_scalar_devencoded (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devencoded', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devencoded ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r bytea[], + value_w bytea[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); +COMMENT ON TABLE att_array_devencoded IS 'Array DevEncoded Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devencoded_att_conf_id_idx ON att_array_devencoded (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devencoded_att_conf_id_data_time_idx ON att_array_devencoded (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devencoded', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +-- The Enum tables are unique in that they store a value and text label for +-- each data point +CREATE TABLE IF NOT EXISTS att_scalar_devenum ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r_label text, + value_r smallint, + value_w_label text, + value_w smallint, + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_scalar_devenum IS 'Scalar Enum Values Table'; +CREATE INDEX IF NOT EXISTS att_scalar_devenum_att_conf_id_idx ON att_scalar_devenum (att_conf_id); +CREATE INDEX IF NOT EXISTS att_scalar_devenum_att_conf_id_data_time_idx ON att_scalar_devenum (att_conf_id,data_time DESC); +SELECT create_hypertable('att_scalar_devenum', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + +CREATE TABLE IF NOT EXISTS att_array_devenum ( + att_conf_id integer NOT NULL, + data_time timestamp WITH TIME ZONE NOT NULL, + value_r_label text[], + value_r smallint[], + value_w_label text[], + value_w smallint[], + quality smallint, + att_error_desc_id integer, + details json, + PRIMARY KEY (att_conf_id, data_time), + FOREIGN KEY (att_conf_id) REFERENCES att_conf (att_conf_id), + FOREIGN KEY (att_error_desc_id) REFERENCES att_error_desc (att_error_desc_id) +); + +COMMENT ON TABLE att_array_devenum IS 'Array Enum Values Table'; +CREATE INDEX IF NOT EXISTS att_array_devenum_att_conf_id_idx ON att_array_devenum (att_conf_id); +CREATE INDEX IF NOT EXISTS att_array_devenum_att_conf_id_data_time_idx ON att_array_devenum (att_conf_id,data_time DESC); +SELECT create_hypertable('att_array_devenum', 'data_time', chunk_time_interval => interval '28 day', create_default_indexes => FALSE); + diff --git a/db-schema/users.sql b/db-schema/users.sql new file mode 100644 index 0000000000000000000000000000000000000000..2949ef8bc3cbd8245ca2584aca10b84ff4122b9e --- /dev/null +++ b/db-schema/users.sql @@ -0,0 +1,30 @@ +-- Roles +CREATE ROLE readonly; +CREATE ROLE readwrite; + +-- Permissions - readonly +GRANT CONNECT ON DATABASE hdb TO readonly; +GRANT USAGE ON SCHEMA public TO readonly; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly; + +-- Permissions - readwrite +GRANT CONNECT ON DATABASE hdb TO readwrite; +GRANT USAGE ON SCHEMA public TO readwrite; +GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO readwrite; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO readwrite; +GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO readwrite; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE ON SEQUENCES TO readwrite; +GRANT ALL ON SCHEMA public TO readwrite; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO readwrite; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO readwrite; + +-- Users +CREATE ROLE hdb_cfg_man WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_cfg_man; + +CREATE ROLE hdb_event_sub WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_event_sub; + +CREATE ROLE hdb_java_reporter WITH LOGIN PASSWORD 'hdbpp'; +GRANT readonly TO hdb_java_reporter; \ No newline at end of file diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d4368d95bd5686da5bac2e83a218df8b0af3e554 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,26 @@ +# Table of Contents + +The documentation is purely about getting the shared library running on a correctly configured database. Setup of the TimescaleDb cluster and its stack is left to the user. + +- [Table of Contents](#Table-of-Contents) + - [About](#About) + - [Building and Installation](#Building-and-Installation) + - [DB Schema](#DB-Schema) + - [Configuration](#Configuration) + +## About + +The overview is in the main project [README](../README.md). + +## Building and Installation + +* [Build](build.md) instructions. +* [Installation](install.md) guidelines. + +## DB Schema + +* [Schema](db-schema-config) guidelines and setup. + +## Configuration + +* [Configuration](configuration) parameter details. \ No newline at end of file diff --git a/doc/build.md b/doc/build.md new file mode 100644 index 0000000000000000000000000000000000000000..3f6496096e3f2c56538eeb00d623189177452306 --- /dev/null +++ b/doc/build.md @@ -0,0 +1,136 @@ +# Build Instructions + +To build the shared library please read the following. + +## Dependencies + +The project has two types of dependencies, those required by the toolchain, and those to do the actual build. Other dependencies are integrated directly into the project as submodules. The following thirdparty modules exists: + +* libpqxx - Modern C++ Postgresql library (Submodule) +* spdlog - Logging system (Submodule) +* Catch2 - Unit test subsystem (Submodule) +* libhdbpp - Part of the hdb++ library loading chain (Modified version of [original](https://github.com/tango-controls-hdbpp/libhdbpp) project. This will be pushed back to the original repository in time) + +### Toolchain Dependencies + +If wishing to build the project, ensure the following dependencies are met: + +* CMake 3.6 or higher +* C++14 compatible compiler (code base is using c++14) + +### Build Dependencies + +Ensure the development version of the dependencies are installed. These are as follows: + +* Tango Controls 9 or higher development headers and libraries +* omniORB release 4 or higher development headers and libraries +* libzmq3-dev or libzmq5-dev +* libpq-dev - Postgres C development library + +## Building and Installation + +To compile this library, first ensure it has been recursively cloned so all submodules are present in /thirdparty. The build system uses pkg-config to find some dependencies, for example Tango. If Tango is not installed to a standard location, set PKG_CONFIG_PATH, i.e. + +```bash +export PKG_CONFIG_PATH=/non/standard/tango/install/location +``` + +Then to build just the library: + +```bash +mkdir -p build +cd build +cmake .. +make +``` + +The pkg-config path can also be set with the cmake argument CMAKE_PREFIX_PATH. This can be set on the command line at configuration time, i.e.: + +```bash +... +cmake -DCMAKE_PREFIX_PATH=/non/standard/tango/install/location .. +... +``` + +## Build Flags + +The following build flags are available + +### Standard CMake Flags + +The following is a list of common useful CMake flags and their use: + +| Flag | Setting | Description | +|------|-----|-----| +| CMAKE_INSTALL_PREFIX | PATH | Standard CMake flag to modify the install prefix. | +| CMAKE_INCLUDE_PATH | PATH[S] | Standard CMake flag to add include paths to the search path. | +| CMAKE_LIBRARY_PATH | PATH[S] | Standard CMake flag to add paths to the library search path | +| CMAKE_BUILD_TYPE | Debug/Release | Build type to produce | + +### Project Flags + +| Flag | Setting | Default | Description | +|------|-----|-----|-----| +| BUILD_UNIT_TESTS | ON/OFF | OFF | Build unit tests | +| BUILD_BENCHMARK_TESTS | ON/OFF | OFF | Build benchmark tests (Forces a Release build) | +| ENABLE_CLANG | ON/OFF | OFF | Clang code static analysis, readability, and cppcore guideline enforcement | + +## Running Tests + +### Unit Tests + +The project has extensive unit tests to ensure its functioning as expect. Build the project with testing enabled: + +```bash +mkdir -p build +cd build +cmake -DBUILD_UNIT_TESTS=ON .. +make +``` + +To run all unit tests, a postgresql database node is required with the project schema loaded up. There is a default connection string inside test/TestHelpers.hpp: + +``` +user=postgres host=localhost port=5432 dbname=hdb password=password +``` + +If you run the hdb timescale docker image associated with this project locally then this will connect automatically. If you wish to use a different database, edit the string in test/TestHelpers.hpp. + +To run all tests: + +```bash +./test/unit-tests +``` + +To look at the available tests and tags, should you wish to run a subset of the test suite (for example, you do not have a postgresql node to test against), then tests and be listed: + +```bash +./bin/unit-tests --list-tests +``` + +Or: + +```bash +./bin/unit-tests --list-tags +``` + +To see more options for the unit-test command line binary: + +```bash +./bin/unit-tests --help +``` + +### Benchmark Tests + +These are a work in progress to explore future optimisation point. If built, they can be run as follows: + +```bash +mkdir -p build +cd build +cmake -DBUILD_BENCHMARK_TESTS=ON .. +make +``` + +```bash +./benchmark/benchmark-tests +``` \ No newline at end of file diff --git a/doc/configuration.md b/doc/configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..a35efb0fc95be6dcd5b7c25bc99b5ef0d2434ad7 --- /dev/null +++ b/doc/configuration.md @@ -0,0 +1,40 @@ +# Configuration + +## Library Configuration Parameters + +Configuration parameters are as follows: + +| Parameter | Mandatory | Default | Description | +|------|-----|-----|-----| +| libname | true | None | Must be "libhdb++timescale.so" | +| connect_string | true | None | Postgres connection string, eg user=postgres host=localhost port=5432 dbname=hdb password=password | +| logging_level | false | error | Logging level. See table below | +| log_file | false | false | Enable logging to file | +| log_console | false | false | Enable logging to the console | +| log_syslog | false | false | Enable logging to syslog | +| log_file_name | false | None | When logging to file, this is the path and name of file to use. Ensure the path exists otherwise this is an error conditions. | + +The logging_level parameter is case insensitive. Logging levels are as follows: + +| Level | Description | +|------|-----| +| error | Log only error level events (recommended unless debugging) | +| warning | Log only warning level events | +| info | Log only warning level events | +| debug | Log only warning level events. Good for early install debugging | +| trace | Trace level logging. Excessive level of debug, good for involved debugging | +| disabled | Disable logging subsystem | + +## Configuration Example + +Short example LibConfiguration property value on an EventSubscriber or ConfigManager. You will HAVE to change the various parts to match your system: + +``` +connect_string=user=hdb-user password=password host=hdb-database port=5432 dbname=hdb +logging_level=debug +log_file=true +log_syslog=false +log_console=false +libname=libhdb++timescale.so +log_file_name=/tmp/hdb/es-name.log +```` \ No newline at end of file diff --git a/doc/db-schema-config.md b/doc/db-schema-config.md new file mode 100644 index 0000000000000000000000000000000000000000..8c0e6ea16fb90fa02b8894ae24dc1ea3aa8314c6 --- /dev/null +++ b/doc/db-schema-config.md @@ -0,0 +1,114 @@ +# Database Schema Configuration + +Schema setup and management is a very important aspect to running the HDB++ system with TimescaleDb. The following presents guidelines and a setup plan, but it is not exhaustive and additional information is welcome. + +Some of the information assumes familiarity with TimescaleDb terms and technologies. Please to TimescaleDb [documentation](www.timescaledb.com) for more information. + +- [Database Schema Configuration](#Database-Schema-Configuration) + - [Hypperchunk Sizes](#Hypperchunk-Sizes) + - [Schema Import](#Schema-Import) + - [Admin User](#Admin-User) + - [Table Creation](#Table-Creation) + - [Users](#Users) + - [Clean-up](#Clean-up) + - [Clustering](#Clustering) + +## Hypperchunk Sizes + +The [schema](../db-schema/schema.sql) file has default values set for all hyper table chunk sizes. It is assumed initial deployment data load will be smaller than the final fully operational system, so chunk sizes are as follows: + +- 28 days for all data tables, except: +- 14 days for att_scalar_devdouble, since this appears to be used more often than other tables. + +These values can, and should be, adjusted to the deployment situation. Please see the TimescaleDb [documentation](www.timescaledb.com) for information on choosing chunk sizes. + +Important: These are initial values, the expectation is the database will be monitored and values adjusted as it takes on its full load. + +## Schema Import + +General setup steps. + +### Admin User + +Rather than create and manage the tables via a superuser, we create and admin user and have them create the tables: + +```sql +CREATE ROLE hdb_admin WITH LOGIN PASSWORD 'hdbpp'; +ALTER USER hdb_admin CREATEDB; +ALTER USER hdb_admin CREATEROLE; +ALTER USER hdb_admin SUPERUSER; +``` + +Note the SUPERUSER role will be stripped after the tables are set up. + +### Table Creation + +Now import the schema.sql as the hdb_admin user. From pqsl: + +```bash +psql -U hdb_admin -h HOST -p PORT-f schema.sql -d template1 +``` + +Note: we use database template1 since hdb_admin currently has no database to connect to. + +We should now have a hdb database owned by hdb_admin. + +### Users + +Next we need to set up the users (this may require some improvements, pull requests welcome). Connect as a superuser and create two roles, a readonly and a readwrite role: + +```sql +-- Roles +CREATE ROLE readonly; +CREATE ROLE readwrite; + +-- Permissions - readonly +GRANT CONNECT ON DATABASE hdb TO readonly; +GRANT USAGE ON SCHEMA public TO readonly; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO readonly; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly; + +-- Permissions - readwrite +GRANT CONNECT ON DATABASE hdb TO readwrite; +GRANT USAGE ON SCHEMA public TO readwrite; +GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO readwrite; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO readwrite; +GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO readwrite; +ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE ON SEQUENCES TO readwrite; +GRANT ALL ON SCHEMA public TO readwrite; +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO readwrite; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO readwrite; + +-- Users +CREATE ROLE hdb_cfg_man WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_cfg_man; + +CREATE ROLE hdb_event_sub WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_event_sub; + +CREATE ROLE hdb_java_reporter WITH LOGIN PASSWORD 'hdbpp'; +GRANT readonly TO hdb_java_reporter; +``` + +Here we created three users that external applications will use to connect to the database. You may create as many and in what ever role you want. + +## Clean-up + +Finally, strip the SUPERUSER trait from hdb_admin: + +```sql +ALTER USER hdb_admin NOSUPERUSER; +``` + +## Clustering + +To get the levels of performance required to make the solution viable we MUST cluster on the composite index of each data table. the file [cluster.sql](../db-schema/cluster.sql) contains the commands that must be run after the database has been setup. + +Without this step, select performance will degrade on large tables. + +As data is added, the tables will require the new data to be clustered on the index. You may choose the period and time when to do this. The process does lock the tables. Options: + +- Manually +- Cron job + +TimescaleDb supports a more fine grained cluster process. A tool is being developed to utilities this and run as a process to cluster on the index at regular intervals. diff --git a/doc/install.md b/doc/install.md new file mode 100644 index 0000000000000000000000000000000000000000..be187200a9d7a8fa8070d34a7324a3957bae871f --- /dev/null +++ b/doc/install.md @@ -0,0 +1,21 @@ +# Installation Instructions + +All submodules are combined into the final library for ease of deployment. This means just the libhdbpp-timescale.so binary needs deploying to the target system. + +## System Dependencies + +The running system requires libpq5 installed to support the calls Postgresql. On Debian/Ubuntu this can be deployed as follows: + +```bash +sudo apt-get install libpq5 +``` + +## Installation + +After the build has completed, simply run: + +``` +sudo make install +``` + +The shared library will be installed to /usr/local/lib on Debian/Ubuntu systems. \ No newline at end of file diff --git a/include/hdb++/AbstractDB.h b/include/hdb++/AbstractDB.h deleted file mode 100644 index a90b4aae9d407c81737762e6665ae852074a06ba..0000000000000000000000000000000000000000 --- a/include/hdb++/AbstractDB.h +++ /dev/null @@ -1,118 +0,0 @@ -/* Copyright (C) 2014-2017 - Elettra - Sincrotrone Trieste S.C.p.A. - Strada Statale 14 - km 163,5 in AREA Science Park - 34149 Basovizza, Trieste, Italy. - - This file is part of libhdb++. - - libhdb++ is free software: you can redistribute it and/or modify - it under the terms of the Lesser GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - libhdb++ is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser - GNU General Public License for more details. - - You should have received a copy of the Lesser GNU General Public License - along with libhdb++. If not, see <http://www.gnu.org/licenses/>. */ - -#ifndef _HDBPP_ABSTRACTDB_H -#define _HDBPP_ABSTRACTDB_H - -#include <tango.h> -#include <tuple> -#include <vector> - -namespace hdbpp -{ -#define DB_INSERT 0 -#define DB_START 1 -#define DB_STOP 2 -#define DB_REMOVE 3 -#define DB_INSERT_PARAM 4 -#define DB_PAUSE 5 -#define DB_UPDATETTL 6 -#define DB_ADD 7 - -// Data struct used to pass information to the backend -typedef struct HdbEventDataType_ -{ - std::string attr_name; - int max_dim_x; - int max_dim_y; - int data_type; - Tango::AttrDataFormat data_format; - int write_type; - -} HdbEventDataType; - -enum class HdbppFeatures -{ - // Time to live feature. Attributes can be timed out by the database based - // on the configured ttl value - TTL, - - // Backend supports passing of multiple events and batching them into - // the database. This is a performance improvement. - BATCH_INSERTS, -}; - -// Abstract base class that backends are required to implement when offering -// a storage backend to the hdb++ system -class AbstractDB -{ -public: - virtual ~AbstractDB() {} - - // Inserts an attribute archive event for the EventData into the database. If the attribute - // does not exist in the database, then an exception will be raised. If the attr_value - // field of the data parameter if empty, then the attribute is in an error state - // and the error message will be archived. - virtual void insert_event(Tango::EventData *event, const HdbEventDataType &data_type) = 0; - - // Insert multiple attribute archive events. Any attributes that do not exist will - // cause an exception. On failure the fall back is to insert events individually - virtual void insert_events(std::vector<std::tuple<Tango::EventData *, HdbEventDataType>> events) = 0; - - // Inserts the attribute configuration data (Tango Attribute Configuration event data) - // into the database. The attribute must be configured to be stored in HDB++, - // otherwise an exception will be thrown. - virtual void insert_param_event(Tango::AttrConfEventData *param_event, const HdbEventDataType &data_type) = 0; - - // Add an attribute to the database. Trying to add an attribute that already exists will - // cause an exception - virtual void add_attribute(const std::string &name, int type, int format, int write_type) = 0; - - // Update the attribute ttl. The attribute must have been configured to be stored in - // HDB++, otherwise an exception is raised - virtual void update_ttl(const std::string &name, unsigned int ttl) = 0; - - // Inserts a history event for the attribute name passed to the function. The attribute - // must have been configured to be stored in HDB++, otherwise an exception is raised. - virtual void insert_history_event(const std::string &name, unsigned char event) = 0; - - // Check what hdbpp features this library supports. - virtual bool supported(HdbppFeatures feature) = 0; -}; - -// Abstract factory class that backend must implement to help create an instance -// of the storage class deriving from AbstractDB -class DBFactory -{ -public: - // Create a backend database object, and return it as a pointer - virtual AbstractDB *create_db(const string &id, const std::vector<std::string> &configuration) = 0; - virtual ~DBFactory() {}; -}; - -} // namespace hdbpp - -extern "C" -{ - typedef hdbpp::DBFactory *getDBFactory_t(); - hdbpp::DBFactory *getDBFactory(); -} - -#endif // _HDBPP_ABSTRACTDB_H diff --git a/include/hdb++/HdbClient.h b/include/hdb++/HdbClient.h deleted file mode 100644 index 78445f8953f2294733fd7cc40b015a6921d64f21..0000000000000000000000000000000000000000 --- a/include/hdb++/HdbClient.h +++ /dev/null @@ -1,72 +0,0 @@ -/* Copyright (C) : 2014-2019 - European Synchrotron Radiation Facility - BP 220, Grenoble 38043, FRANCE - - This file is part of libhdb++timescale. - - libhdb++timescale is free software: you can redistribute it and/or modify - it under the terms of the Lesser GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - libhdb++timescale is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser - GNU General Public License for more details. - - You should have received a copy of the Lesser GNU General Public License - along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. */ - -#ifndef _HDBPP_TIMESCALE_CLIENT_HPP -#define _HDBPP_TIMESCALE_CLIENT_HPP - -#include "hdb++/AbstractDB.h" - -#include <memory> -#include <string> -#include <vector> - -namespace hdbpp -{ -class HdbClient : public AbstractDB -{ -public: - HdbClient(const string &id, const std::vector<std::string> &configuration); - virtual ~HdbClient() {} - - // Inserts an attribute archive event for the EventData into the database. If the attribute - // does not exist in the database, then an exception will be raised. If the attr_value - // field of the data parameter if empty, then the attribute is in an error state - // and the error message will be archived. - void insert_event(Tango::EventData *event, const HdbEventDataType &data_type) override; - - // Insert multiple attribute archive events. Any attributes that do not exist will - // cause an exception. On failure the fall back is to insert events individually - void insert_events(std::vector<std::tuple<Tango::EventData *, HdbEventDataType>> events) override; - - // Inserts the attribute configuration data (Tango Attribute Configuration event data) - // into the database. The attribute must be configured to be stored in HDB++, - // otherwise an exception will be thrown. - void insert_param_event(Tango::AttrConfEventData *data, const HdbEventDataType &data_type) override; - - // Add an attribute to the database. Trying to add an attribute that already exists will - // cause an exception - void add_attribute(const std::string &name, int type, int format, int write_type) override; - - // Update the attribute ttl. The attribute must have been configured to be stored in - // HDB++, otherwise an exception is raised - void update_ttl(const std::string &name, unsigned int ttl) override; - - // Inserts a history event for the attribute name passed to the function. The attribute - // must have been configured to be stored in HDB++, otherwise an exception is raised. - void insert_history_event(const std::string &name, unsigned char event) override; - - // Check what hdbpp features this library supports. - bool supported(HdbppFeatures feature) override; - -private: - std::unique_ptr<AbstractDB> _db; -}; - -} // namespace hdbpp -#endif // _HDBPP_TIMESCALE_CLIENT_HPP diff --git a/include/hdb++/HdbppTimescaleDb.hpp b/include/hdb++/HdbppTimescaleDb.hpp index 057d79d48998174f6d49919e49106898f16d80ab..e9d03900c35709b9009fc9d0eb093fbe0de91776 100644 --- a/include/hdb++/HdbppTimescaleDb.hpp +++ b/include/hdb++/HdbppTimescaleDb.hpp @@ -20,17 +20,106 @@ #ifndef _HDBPP_TIMESCALE_HPP #define _HDBPP_TIMESCALE_HPP -#include <hdb++/AbstractDB.h> +#include <libhdb++/LibHdb++.h> #include <string> +#include <tango.h> #include <vector> namespace hdbpp { +class HdbppTimescaleDb : public AbstractDB +{ +public: + /** + * @brief HdbppTimescaleDb constructor + * + * The configuration parameters must contain the following strings: + * + * @param configuration A list of configuration parameters to start the driver with. + */ + HdbppTimescaleDb(const std::vector<std::string> &configuration); + + /** + * @brief Destroy the HdbppTimescaleDb library object + */ + virtual ~HdbppTimescaleDb(); + + /** + * @brief Insert an attribute archive event into the database + * + * Inserts an attribute archive event for the EventData into the database. If the attribute + * does not exist in the database, then an exception will be raised. If the attr_value + * field of the data parameter if empty, then the attribute is in an error state + * and the error message will be archived. + * + * @param event_data Tango event data about the attribute. + * @param event_data_type HDB event data for the attribute. + * @throw Tango::DevFailed + */ + virtual void insert_Attr(Tango::EventData *event_data, HdbEventDataType event_data_type); + + /** + * @brief Inserts the attribute configuration data. + * + * Inserts the attribute configuration data (Tango Attribute Configuration event data) + * into the database. The attribute must be configured to be stored in HDB++, + * otherwise an exception will be thrown. + * + * @param conf_event_data Tango event data about the attribute. + * @param event_data_type HDB event data for the attribute. + * @throw Tango::DevFailed + */ + virtual void insert_param_Attr(Tango::AttrConfEventData *conf_event_data, HdbEventDataType /* event_data_type */); + + /** + * @brief Add and configure an attribute in the database. + * + * Trying to reconfigure an existing attribute will result in an exception, and if an + * attribute already exists with the same configuration then the ttl will be updated if + * different. + * + * @param fqdn_attr_name Fully qualified attribute name + * @param type The type of the attribute. + * @param format The format of the attribute. + * @param write_type The read/write access of the type. + * @param ttl The time to live in hour, 0 for infinity + * @throw Tango::DevFailed + */ + virtual void configure_Attr( + std::string fqdn_attr_name, int type, int format, int write_type, unsigned int ttl); + + /** + * @brief Update the ttl value for an attribute. + * + * The attribute must have been configured to be stored in HDB++, otherwise an exception + * is raised + * + * @param fqdn_attr_name Fully qualified attribute nam + * @param ttl The time to live in hours, 0 for infinity + * @throw Tango::DevFailed + */ + virtual void updateTTL_Attr(std::string fqdn_attr_name, unsigned int ttl); + + /** + * @brief Record a start, Stop, Pause or Remove history event for an attribute. + * + * Inserts a history event for the attribute name passed to the function. The attribute + * must have been configured to be stored in HDB++, otherwise an exception is raised. + * This function will also insert an additional CRASH history event before the START + * history event if the given event parameter is DB_START and if the last history event + * stored was also a START event. + * + * @param fqdn_attr_name Fully qualified attribute name + * @param event + * @throw Tango::DevFailed + */ + virtual void event_Attr(std::string fqdn_attr_name, unsigned char event); +}; + class HdbppTimescaleDbFactory : public DBFactory { public: - // return a new HdbppTimescaleDb object - virtual AbstractDB *create_db(const string &id, const std::vector<std::string> &configuration); + virtual AbstractDB *create_db(std::vector<std::string> configuration); }; } // namespace hdbpp diff --git a/src/AttributeName.cpp b/src/AttributeName.cpp index 80293f0c70e72daa30053bf47f9870df54014c26..61b276f69c0ce45c0645a13aff8e28712bacb887 100644 --- a/src/AttributeName.cpp +++ b/src/AttributeName.cpp @@ -59,14 +59,14 @@ void AttributeName::clear() noexcept //============================================================================= //============================================================================= -auto AttributeName::tangoHost() -> const std::string & +const string &AttributeName::tangoHost() { validate(); if (_tango_host_cache.empty()) { - // if tango:// exists on the std::string, strip it off by moving the start in 8 characters - auto start = _fqdn_attr_name.find("tango://") == std::string::npos ? 0 : 8; + // if tango:// exists on the string, strip it off by moving the start in 8 characters + auto start = _fqdn_attr_name.find("tango://") == string::npos ? 0 : 8; auto end = _fqdn_attr_name.find('/', start); _tango_host_cache = _fqdn_attr_name.substr(start, end - start); } @@ -76,17 +76,17 @@ auto AttributeName::tangoHost() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::tangoHostWithDomain() -> const std::string & +const string &AttributeName::tangoHostWithDomain() { validate(); if (_tango_host_with_domain_cache.empty()) { - std::string tango_host = tangoHost(); + string tango_host = tangoHost(); - if (tango_host.find('.') == std::string::npos) + if (tango_host.find('.') == string::npos) { - std::string server_name_with_domain; + string server_name_with_domain; auto server_name = tango_host.substr(0, tango_host.find(':', 0)); struct addrinfo hints = {}; @@ -104,27 +104,21 @@ auto AttributeName::tangoHostWithDomain() -> const std::string & return tangoHost(); } - + if (result == nullptr) { - spdlog::error("Error: Unable to add domain to tango host {}: getaddrinfo didn't return the canonical " - "name (result == nullptr)", - tango_host); - + spdlog::error("Error: Unable to add domain to tango host {}: getaddrinfo didn't return the canonical name (result == nullptr)", tango_host); return tangoHost(); } - + if (result->ai_canonname == nullptr) { - spdlog::error("Error: Unable to add domain to tango host {}: getaddrinfo didn't return the canonical " - "name (result->ai_canonname == nullptr)", - tango_host); - + spdlog::error("Error: Unable to add domain to tango host {}: getaddrinfo didn't return the canonical name (result->ai_canonname == nullptr)", tango_host); freeaddrinfo(result); return tangoHost(); } - - server_name_with_domain = std::string(result->ai_canonname) + tango_host.substr(tango_host.find(':', 0)); + + server_name_with_domain = string(result->ai_canonname) + tango_host.substr(tango_host.find(':', 0)); freeaddrinfo(result); // all done with this structure _tango_host_with_domain_cache = server_name_with_domain; @@ -140,14 +134,14 @@ auto AttributeName::tangoHostWithDomain() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::fullAttributeName() -> const std::string & +const string &AttributeName::fullAttributeName() { validate(); if (_full_attribute_name_cache.empty()) { - // if tango:// exists on the std::string, strip it off by moving the start in 8 characters - auto start = _fqdn_attr_name.find("tango://") == std::string::npos ? 0 : 8; + // if tango:// exists on the string, strip it off by moving the start in 8 characters + auto start = _fqdn_attr_name.find("tango://") == string::npos ? 0 : 8; start = _fqdn_attr_name.find('/', start); start++; _full_attribute_name_cache = _fqdn_attr_name.substr(start); @@ -158,7 +152,7 @@ auto AttributeName::fullAttributeName() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::domain() -> const std::string & +const std::string &AttributeName::domain() { validate(); @@ -170,7 +164,7 @@ auto AttributeName::domain() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::family() -> const std::string & +const std::string &AttributeName::family() { validate(); @@ -182,7 +176,7 @@ auto AttributeName::family() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::member() -> const std::string & +const std::string &AttributeName::member() { validate(); @@ -194,7 +188,7 @@ auto AttributeName::member() -> const std::string & //============================================================================= //============================================================================= -auto AttributeName::name() -> const std::string & +const std::string &AttributeName::name() { validate(); @@ -206,31 +200,31 @@ auto AttributeName::name() -> const std::string & //============================================================================= //============================================================================= -void AttributeName::setDomainFamilyMemberName(const std::string &full_attr_name) +void AttributeName::setDomainFamilyMemberName(const string &full_attr_name) { auto first_slash = full_attr_name.find('/'); - if (first_slash == std::string::npos) + if (first_slash == string::npos) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". There is no slash in attribute name"}; + string msg {"Invalid attribute name: " + full_attr_name + ". There is no slash in attribute name"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } auto second_slash = full_attr_name.find('/', first_slash + 1); - if (second_slash == std::string::npos) + if (second_slash == string::npos) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". There is only one slash in attribute name"}; + string msg {"Invalid attribute name: " + full_attr_name + ". There is only one slash in attribute name"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } auto third_slash = full_attr_name.find('/', second_slash + 1); - if (third_slash == std::string::npos) + if (third_slash == string::npos) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". There are only two slashes in attribute name"}; + string msg {"Invalid attribute name: " + full_attr_name + ". There are only two slashes in attribute name"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } @@ -239,35 +233,35 @@ void AttributeName::setDomainFamilyMemberName(const std::string &full_attr_name) if (last_slash != third_slash) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". Too many slashes provided in attribute name"}; + string msg {"Invalid attribute name: " + full_attr_name + ". Too many slashes provided in attribute name"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } if (first_slash == 0) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". Empty domain"}; + string msg {"Invalid attribute name: " + full_attr_name + ". Empty domain"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } if (second_slash - first_slash - 1 == 0) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". Empty family"}; + string msg {"Invalid attribute name: " + full_attr_name + ". Empty family"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } if (third_slash - second_slash - 1 == 0) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". Empty member"}; + string msg {"Invalid attribute name: " + full_attr_name + ". Empty member"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } if (third_slash + 1 == full_attr_name.length()) { - std::string msg {"Invalid attribute name: " + full_attr_name + ". Empty name"}; + string msg {"Invalid attribute name: " + full_attr_name + ". Empty name"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } @@ -286,7 +280,7 @@ void AttributeName::validate() // it means we just tried to execute a complex operation if (empty()) { - std::string msg {"AttributeName is empty."}; + string msg {"AttributeName is empty."}; spdlog::error("Failed validation for attribute: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); } @@ -301,7 +295,7 @@ void AttributeName::print(ostream &os) const //============================================================================= //============================================================================= -auto AttributeName::operator=(const AttributeName &other) -> AttributeName & +AttributeName &AttributeName::operator=(const AttributeName &other) { // clear the cache clear(); @@ -313,7 +307,7 @@ auto AttributeName::operator=(const AttributeName &other) -> AttributeName & //============================================================================= //============================================================================= -auto AttributeName::operator=(AttributeName &&other) noexcept -> AttributeName & +AttributeName &AttributeName::operator=(AttributeName &&other) noexcept { // clear the cache clear(); diff --git a/src/AttributeName.hpp b/src/AttributeName.hpp index f01aaf96121fc6796995c952ec32165e1fb14f9a..809400a51691095d52a3c3a353a7286884222cc1 100644 --- a/src/AttributeName.hpp +++ b/src/AttributeName.hpp @@ -45,49 +45,49 @@ public: AttributeName(const AttributeName &attr_name) { *this = attr_name; } AttributeName(const std::string &fqdn_attr_name); - auto fqdnAttributeName() const noexcept -> const std::string & { return _fqdn_attr_name; } - auto fullAttributeName() -> const std::string &; + const std::string &fqdnAttributeName() const noexcept { return _fqdn_attr_name; } + const std::string &fullAttributeName(); // tango host info - auto tangoHost() -> const std::string &; - auto tangoHostWithDomain() -> const std::string &; + const std::string &tangoHost(); + const std::string &tangoHostWithDomain(); // attribute name elements - auto domain() -> const std::string &; - auto family() -> const std::string &; - auto member() -> const std::string &; - auto name() -> const std::string &; + const std::string &domain(); + const std::string &family(); + const std::string &member(); + const std::string &name(); // utility functions void set(const std::string &fqdn_attr_name); void clear() noexcept; - auto empty() const noexcept -> bool { return _fqdn_attr_name.empty(); } + bool empty() const noexcept { return _fqdn_attr_name.empty(); } void print(std::ostream &os) const; - auto operator==(const AttributeName &other) -> bool { return _fqdn_attr_name == other._fqdn_attr_name; } - auto operator!=(const AttributeName &other) -> bool { return !(_fqdn_attr_name == other._fqdn_attr_name); } - auto operator=(const AttributeName &other) -> AttributeName &; - auto operator=(AttributeName &&other) noexcept -> AttributeName &; + bool operator==(const AttributeName &other) const { return _fqdn_attr_name == other._fqdn_attr_name; } + bool operator!=(const AttributeName &other) const { return !(_fqdn_attr_name == other._fqdn_attr_name); } + AttributeName &operator=(const AttributeName &other); + AttributeName &operator=(AttributeName &&other) noexcept; private: // extract the full attribute name, i.e. domain/family/member/name - auto getFullAttributeName(const std::string &fqdn_attr_name) -> std::string; + std::string getFullAttributeName(const std::string &fqdn_attr_name); // takes the fqdn and breaks out the various component parts, such // as domain, family etc void setDomainFamilyMemberName(const std::string &full_attr_name); - // combine the local domain and tango host as a std::string - auto addDomainToTangoHost(const std::string &tango_host) -> std::string; + // combine the local domain and tango host as a string + std::string addDomainToTangoHost(const std::string &tango_host); // check if the AttributeName is empty before executing a complex // operation, such as returning the tango host void validate(); - // the fully qualified domain name std::string + // the fully qualified domain name string std::string _fqdn_attr_name; - // each std::string is a cache, and generated only once to save + // each string is a cache, and generated only once to save // on performance std::string _full_attribute_name_cache; std::string _tango_host_cache; diff --git a/src/AttributeTraits.cpp b/src/AttributeTraits.cpp index 61188e53f8172fc35b89894a9702ea8216750b4e..5d7ed552deeac23aedc9f695638f5906bb930821 100644 --- a/src/AttributeTraits.cpp +++ b/src/AttributeTraits.cpp @@ -25,7 +25,7 @@ namespace hdbpp_internal { //============================================================================= //============================================================================= -auto AttributeTraits::isValid() const noexcept -> bool +bool AttributeTraits::isValid() const noexcept { // ensure all the type information is valid return _attr_write_type != Tango::WT_UNKNOWN && _attr_format != Tango::FMT_UNKNOWN && diff --git a/src/AttributeTraits.hpp b/src/AttributeTraits.hpp index b5aa19e8c43896a1bd50b82224ae9951f6b4d096..55e6b70ed1815e1495aabd5a28dde8af4df09fc4 100644 --- a/src/AttributeTraits.hpp +++ b/src/AttributeTraits.hpp @@ -48,42 +48,44 @@ public: ~AttributeTraits() = default; AttributeTraits(Tango::AttrWriteType write_type, Tango::AttrDataFormat format, Tango::CmdArgType data_type) : - _attr_write_type(write_type), _attr_format(format), _attr_type(data_type) + _attr_write_type(write_type), + _attr_format(format), + _attr_type(data_type) {} // general validation - auto isValid() const noexcept -> bool; - auto isInvalid() const noexcept -> bool { return !isValid(); } + bool isValid() const noexcept; + bool isInvalid() const noexcept { return !isValid(); } // format type information - auto isArray() const noexcept -> bool { return _attr_format == Tango::SPECTRUM; } - auto isScalar() const noexcept -> bool { return _attr_format == Tango::SCALAR; } - auto isImage() const noexcept -> bool { return _attr_format == Tango::IMAGE; } + bool isArray() const noexcept { return _attr_format == Tango::SPECTRUM; } + bool isScalar() const noexcept { return _attr_format == Tango::SCALAR; } + bool isImage() const noexcept { return _attr_format == Tango::IMAGE; } // write type information - auto isReadOnly() const noexcept -> bool { return _attr_write_type == Tango::READ; } - auto isWriteOnly() const noexcept -> bool { return _attr_write_type == Tango::WRITE; } - auto isReadWrite() const noexcept -> bool { return _attr_write_type == Tango::READ_WRITE; } - auto isReadWithWrite() const noexcept -> bool { return _attr_write_type == Tango::READ_WITH_WRITE; } - auto hasReadData() const noexcept -> bool { return isReadOnly() || isReadWrite() || isReadWithWrite(); } - auto hasWriteData() const noexcept -> bool { return isWriteOnly() || isReadWrite() || isReadWithWrite(); } + bool isReadOnly() const noexcept { return _attr_write_type == Tango::READ; } + bool isWriteOnly() const noexcept { return _attr_write_type == Tango::WRITE; } + bool isReadWrite() const noexcept { return _attr_write_type == Tango::READ_WRITE; } + bool isReadWithWrite() const noexcept { return _attr_write_type == Tango::READ_WITH_WRITE; } + bool hasReadData() const noexcept { return isReadOnly() || isReadWrite() || isReadWithWrite(); } + bool hasWriteData() const noexcept { return isWriteOnly() || isReadWrite() || isReadWithWrite(); } // type access - auto type() const noexcept -> Tango::CmdArgType { return _attr_type; } - auto writeType() const noexcept -> Tango::AttrWriteType { return _attr_write_type; } - auto formatType() const noexcept -> Tango::AttrDataFormat { return _attr_format; } + Tango::CmdArgType type() const noexcept { return _attr_type; } + Tango::AttrWriteType writeType() const noexcept { return _attr_write_type; } + Tango::AttrDataFormat formatType() const noexcept { return _attr_format; } // various utilities - auto operator=(const AttributeTraits &) -> AttributeTraits & = default; - auto operator=(AttributeTraits &&) -> AttributeTraits & = default; + AttributeTraits &operator=(const AttributeTraits &) = default; + AttributeTraits &operator=(AttributeTraits &&) = default; - auto operator==(const AttributeTraits &other) const -> bool + bool operator==(const AttributeTraits &other) const { return _attr_write_type == other.writeType() && _attr_format == other.formatType() && _attr_type == other.type(); } - auto operator!=(const AttributeTraits &other) const -> bool { return !(*this == other); } + bool operator!=(const AttributeTraits &other) const { return !(*this == other); } void print(std::ostream &os) const noexcept; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index ba862873111265df81514d0ac46648b078c7d487..41698efbb491e8274a8f0735694283cf472e9991 100755 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,27 +1,13 @@ cmake_minimum_required(VERSION 3.6) # source files -set(LOCAL_SRC_FILES +set(SRC_FILES ${SRC_FILES} ${CMAKE_CURRENT_SOURCE_DIR}/AttributeName.cpp ${CMAKE_CURRENT_SOURCE_DIR}/AttributeName.hpp ${CMAKE_CURRENT_SOURCE_DIR}/AttributeTraits.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/HdbppTimescaleDbApi.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/HdbppTimescaleDb.cpp ${CMAKE_CURRENT_SOURCE_DIR}/LibUtils.cpp ${CMAKE_CURRENT_SOURCE_DIR}/DbConnection.cpp ${CMAKE_CURRENT_SOURCE_DIR}/QueryBuilder.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/PqxxExtension.cpp) - -if(NOT BYPASS_LIBHDBPP) - set(LOCAL_SRC_FILES ${LOCAL_SRC_FILES} - ${CMAKE_CURRENT_SOURCE_DIR}/HdbppTimescaleDb.cpp) -endif() - -if(BYPASS_LIBHDBPP) - set(LOCAL_SRC_FILES ${LOCAL_SRC_FILES} - ${CMAKE_CURRENT_SOURCE_DIR}/HdbClient.cpp) -endif() - -set(SRC_FILES - ${SRC_FILES} - ${LOCAL_SRC_FILES} - PARENT_SCOPE) + ${CMAKE_CURRENT_SOURCE_DIR}/PqxxExtension.cpp + PARENT_SCOPE) \ No newline at end of file diff --git a/src/ColumnCache.hpp b/src/ColumnCache.hpp index ad37e6f98f8c9520f55b3b24b4ad876441d4f7f1..97d83cc4778b03a0714a3371581f0ad933a7c13f 100644 --- a/src/ColumnCache.hpp +++ b/src/ColumnCache.hpp @@ -44,13 +44,13 @@ namespace pqxx_conn // query if the reference has a value, if its not cached it will be // loaded from the database - auto valueExists(const TRef &reference) -> bool; + bool valueExists(const TRef &reference); // get the value associated with the reference, throws and exception if it does not // exist either in the cache or database. The caller can check valueExists() // before calling this function to know if its valid to attempt to return // the value - auto value(const TRef &reference) -> TValue; + TValue value(const TRef &reference); // cache a value in the internal maps void cacheValue(const TValue &value, const TRef &reference); @@ -60,7 +60,7 @@ namespace pqxx_conn // utility functions void clear() noexcept { _values.clear(); } - auto size() const noexcept -> int { return _values.size(); } + int size() const noexcept { return _values.size(); } void print(std::ostream &os) const noexcept; private: @@ -157,7 +157,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= template<typename TValue, typename TRef> - auto ColumnCache<TValue, TRef>::valueExists(const TRef &reference) -> bool + bool ColumnCache<TValue, TRef>::valueExists(const TRef &reference) { assert(_conn != nullptr); @@ -231,7 +231,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= template<typename TValue, typename TRef> - auto ColumnCache<TValue, TRef>::value(const TRef &reference) -> TValue + TValue ColumnCache<TValue, TRef>::value(const TRef &reference) { assert(_conn != nullptr); diff --git a/src/ConnectionBase.hpp b/src/ConnectionBase.hpp index 24ae4fac412f7f69447d297bf53e930eace6b16c..58d3783ecb65dbe5cda636c3a58ac6c7872438be 100644 --- a/src/ConnectionBase.hpp +++ b/src/ConnectionBase.hpp @@ -35,8 +35,8 @@ public: // connection API virtual void connect(const std::string &connect_string) = 0; virtual void disconnect() = 0; - virtual auto isOpen() const noexcept -> bool = 0; - virtual auto isClosed() const noexcept -> bool = 0; + virtual bool isOpen() const noexcept = 0; + virtual bool isClosed() const noexcept = 0; }; }; // namespace hdbpp_internal diff --git a/src/DbConnection.cpp b/src/DbConnection.cpp index 6f910d13b8a45cbaea59c149791e0964bc73756a..a2463ae7b733fc2e4ca393b63e01cb38f2f158f8 100644 --- a/src/DbConnection.cpp +++ b/src/DbConnection.cpp @@ -253,7 +253,6 @@ namespace pqxx_conn void DbConnection::storeParameterEvent(const string &full_attr_name, double event_time, const string &label, - const vector<string> &enum_labels, const string &unit, const string &standard_unit, const string &display_unit, @@ -277,7 +276,6 @@ namespace pqxx_conn }; check_parameter("label", label); - check_parameter("enum_labels", enum_labels); check_parameter("unit", unit); check_parameter("standard_unit", standard_unit); check_parameter("display_unit", display_unit); @@ -285,12 +283,11 @@ namespace pqxx_conn check_parameter("archive_abs_change", archive_abs_change); check_parameter("archive_period", archive_period); check_parameter("description", description); -/* - spdlog::trace("Parameter event data: event_time {}, label {}, enum_labels {}, unit {}, standard_unit {}, display_unit {}, " + + spdlog::trace("Parmater event data: event_time {}, label {}, unit {}, standard_unit {}, display_unit {}, " "format {}, archive_rel_change {}, archive_abs_change {}, archive_period {}, description {}", event_time, label, - enum_labels, unit, standard_unit, display_unit, @@ -299,7 +296,7 @@ namespace pqxx_conn archive_abs_change, archive_period, description); -*/ + checkConnection(LOCATION_INFO); checkAttributeExists(full_attr_name, LOCATION_INFO); @@ -309,57 +306,27 @@ namespace pqxx_conn pqxx::perform([&, this]() { pqxx::work tx {(*_conn), StoreParameterEvent}; - if (_db_store_method == DbStoreMethod::InsertString) + if (!tx.prepared(StoreParameterEvent).exists()) { - auto query = hdbpp_internal::pqxx_conn::QueryBuilder::storeParameterEventString( - pqxx::to_string(_conf_id_cache->value(full_attr_name)), - pqxx::to_string(event_time), - pqxx::to_string(label), - enum_labels, - pqxx::to_string(unit), - pqxx::to_string(standard_unit), - pqxx::to_string(display_unit), - pqxx::to_string(format), - pqxx::to_string(archive_rel_change), - pqxx::to_string(archive_abs_change), - pqxx::to_string(archive_period), - pqxx::to_string(description) - ); - - tx.exec0(query); - } - else - { - if (!tx.prepared(StoreParameterEvent).exists()) - { - tx.conn().prepare(StoreParameterEvent, QueryBuilder::storeParameterEventStatement()); - spdlog::trace("Created prepared statement for: {}", StoreParameterEvent); - } - - // a string needs quoting to be stored via this method, so it does not cause - // an error in the prepared statement - vector<string> enum_labels_escaped; - enum_labels_escaped.reserve(enum_labels.size()); - for(const auto &label : enum_labels) - enum_labels_escaped.push_back(tx.esc(label)); - - // no result expected - tx.exec_prepared0(StoreParameterEvent, - _conf_id_cache->value(full_attr_name), - event_time, - label, - enum_labels_escaped, - unit, - standard_unit, - display_unit, - format, - archive_rel_change, - archive_abs_change, - archive_period, - description); - - tx.commit(); + tx.conn().prepare(StoreParameterEvent, QueryBuilder::storeParameterEventStatement()); + spdlog::trace("Created prepared statement for: {}", StoreParameterEvent); } + + // no result expected + tx.exec_prepared0(StoreParameterEvent, + _conf_id_cache->value(full_attr_name), + event_time, + label, + unit, + standard_unit, + display_unit, + format, + archive_rel_change, + archive_abs_change, + archive_period, + description); + + tx.commit(); }); spdlog::debug("Stored parameter event and for attribute {}", full_attr_name); @@ -490,7 +457,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto DbConnection::fetchLastHistoryEvent(const string &full_attr_name) -> string + string DbConnection::fetchLastHistoryEvent(const string &full_attr_name) { assert(!full_attr_name.empty()); assert(_conn != nullptr); @@ -541,7 +508,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto DbConnection::fetchAttributeArchived(const std::string &full_attr_name) -> bool + bool DbConnection::fetchAttributeArchived(const std::string &full_attr_name) { assert(!full_attr_name.empty()); assert(_conn != nullptr); @@ -561,7 +528,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto DbConnection::fetchAttributeTraits(const std::string &full_attr_name) -> AttributeTraits + AttributeTraits DbConnection::fetchAttributeTraits(const std::string &full_attr_name) { assert(!full_attr_name.empty()); assert(_conn != nullptr); diff --git a/src/DbConnection.hpp b/src/DbConnection.hpp index 9e5c268a0dd33cddd3a50720a881794c140a7f1d..2a1b0351f5964f720856d6e328773da9f24e5994 100644 --- a/src/DbConnection.hpp +++ b/src/DbConnection.hpp @@ -61,8 +61,8 @@ namespace pqxx_conn // connection API void connect(const string &connect_string) override; void disconnect() override; - auto isOpen() const noexcept -> bool override { return _connected; } - auto isClosed() const noexcept -> bool override { return !isOpen(); } + bool isOpen() const noexcept override { return _connected; } + bool isClosed() const noexcept override { return !isOpen(); } // storage API @@ -83,7 +83,6 @@ namespace pqxx_conn void storeParameterEvent(const std::string &full_attr_name, double event_time, const std::string &label, - const std::vector<std::string> &enum_labels, const std::string &unit, const std::string &standard_unit, const std::string &display_unit, @@ -117,13 +116,13 @@ namespace pqxx_conn // fetch API // get the last history event for the given attribute - auto fetchLastHistoryEvent(const std::string &full_attr_name) -> std::string; + std::string fetchLastHistoryEvent(const std::string &full_attr_name); // check if the given attribute is stored in the database - auto fetchAttributeArchived(const std::string &full_attr_name) -> bool; + bool fetchAttributeArchived(const std::string &full_attr_name); // get the AttributeTraits of an attribute in the database - auto fetchAttributeTraits(const std::string &full_attr_name) -> AttributeTraits; + AttributeTraits fetchAttributeTraits(const std::string &full_attr_name); private: void storeEvent(const std::string &full_attr_name, const std::string &event); diff --git a/src/DbConnection.tpp b/src/DbConnection.tpp index 587d51733df444831ac3db1a5a5dd5c25b048ef8..00d0e8657b1b7f984968eead312e985355e1cb12 100644 --- a/src/DbConnection.tpp +++ b/src/DbConnection.tpp @@ -159,7 +159,7 @@ namespace pqxx_conn // element and the other an array. Further, the unique_ptr may be // empty and signify a null should be stored in the column instead auto store_value = [&tx, &traits, &inv](auto &value) { - if (value && !value->empty()) + if (value && value->size() > 0) { store_data_utils::Store<T>::run(value, traits, inv, tx); } diff --git a/src/HdbClient.cpp b/src/HdbClient.cpp deleted file mode 100644 index 23455698c582be4db9a16c785640f14f03653f74..0000000000000000000000000000000000000000 --- a/src/HdbClient.cpp +++ /dev/null @@ -1,85 +0,0 @@ -/* Copyright (C) : 2014-2019 - European Synchrotron Radiation Facility - BP 220, Grenoble 38043, FRANCE - - This file is part of libhdb++timescale. - - libhdb++timescale is free software: you can redistribute it and/or modify - it under the terms of the Lesser GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - libhdb++timescale is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser - GNU General Public License for more details. - - You should have received a copy of the Lesser GNU General Public License - along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. */ - -#include "HdbppTimescaleDbApi.hpp" - -#include <hdb++/HdbClient.h> - -using namespace std; - -namespace hdbpp -{ -//============================================================================= -//============================================================================= -HdbClient::HdbClient(const string &id, const vector<string> &configuration) -{ - auto db = new HdbppTimescaleDbApi(id, configuration); - _db = unique_ptr<AbstractDB>(db); -} - -//============================================================================= -//============================================================================= -void HdbClient::insert_event(Tango::EventData *event, const HdbEventDataType &data_type) -{ - _db->insert_event(event, data_type); -} - -//============================================================================= -//============================================================================= -void HdbClient::insert_events(vector<tuple<Tango::EventData *, HdbEventDataType>> events) -{ - _db->insert_events(events); -} - -//============================================================================= -//============================================================================= -void HdbClient::insert_param_event(Tango::AttrConfEventData *data, const HdbEventDataType &data_type) -{ - _db->insert_param_event(data, data_type); -} - -//============================================================================= -//============================================================================= -void HdbClient::add_attribute(const string &name, int type, int format, int write_type) -{ - _db->add_attribute(name, type, format, write_type); -} - -//============================================================================= -//============================================================================= -void HdbClient::update_ttl(const string &name, unsigned int ttl) -{ - _db->update_ttl(name, ttl); -} - -//============================================================================= -//============================================================================= -void HdbClient::insert_history_event(const string &name, unsigned char event) -{ - _db->insert_history_event(move(name), event); -} - -//============================================================================= -//============================================================================= -bool HdbClient::supported(HdbppFeatures feature) -{ - return _db->supported(feature); -} - -} // namespace hdbpp diff --git a/src/HdbppTimescaleDb.cpp b/src/HdbppTimescaleDb.cpp index ec383e4f30df4ebea19f3993290b3312e638baac..a4ad29a41f286c0aa48675d7c3df8daa8cab4e72 100644 --- a/src/HdbppTimescaleDb.cpp +++ b/src/HdbppTimescaleDb.cpp @@ -19,22 +19,271 @@ #include "hdb++/HdbppTimescaleDb.hpp" -#include "HdbppTimescaleDbApi.hpp" +#include "DbConnection.hpp" +#include "HdbppTxDataEvent.hpp" +#include "HdbppTxDataEventError.hpp" +#include "HdbppTxHistoryEvent.hpp" +#include "HdbppTxNewAttribute.hpp" +#include "HdbppTxParameterEvent.hpp" +#include "HdbppTxUpdateTtl.hpp" +#include "LibUtils.hpp" + +#include <locale> +#include <memory> +#include <vector> + +using namespace std; +using namespace hdbpp_internal; namespace hdbpp { +// declaring this variable here removes it from the header, and keeps the header clean. +// It is allocated in the constructor. It can be abstracted further to allow easy plug +// in of different backends at a later point +unique_ptr<pqxx_conn::DbConnection> Conn; + +// simple class to gather utility functions that were previously part of HdbppTimescaleDb, +// removes them from the header and keeps it clean for includes +struct HdbppTimescaleDbUtils +{ + static string getConfigParam(const map<string, string> &conf, const string ¶m, bool mandatory); + static map<string, string> extractConfig(vector<string> config, const string &separator); +}; + +//============================================================================= +//============================================================================= +map<string, string> HdbppTimescaleDbUtils::extractConfig(vector<string> config, const string &separator) +{ + map<string, string> results; + + for (auto &item : config) + { + auto found_separator = item.find_first_of(separator); + + if (found_separator != string::npos && found_separator > 0) + results.insert(make_pair(item.substr(0, found_separator), item.substr(found_separator + 1))); + } + + return results; +} + +//============================================================================= +//============================================================================= +string HdbppTimescaleDbUtils::getConfigParam(const map<string, string> &conf, const string ¶m, bool mandatory) +{ + auto iter = conf.find(param); + + if (iter == conf.end() && mandatory) + { + std::string msg {"Configuration parsing error: mandatory configuration parameter: " + param + " not found"}; + Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); + } + + // for non-mandatory config params that have not been set, just return + // an empty string + return iter == conf.end() ? "" : (*iter).second; +} + +//============================================================================= +//============================================================================= +HdbppTimescaleDb::HdbppTimescaleDb(const vector<string> &configuration) +{ + auto param_to_lower = [](auto param) { + locale loc; + string tmp; + + for (string::size_type i = 0; i < param.length(); ++i) + tmp += tolower(param[i], loc); + + return tmp; + }; + + // convert the config vector to a map + auto libhdb_conf = HdbppTimescaleDbUtils::extractConfig(configuration, "="); + + // logging_level optional config parameter ---- + auto level = param_to_lower(HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "logging_level", false)); + auto log_file = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_file", false); + auto log_console = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_console", false); + auto log_syslog = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_syslog", false); + auto log_file_name = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_file_name", false); + + // init the base logging system + LogConfigurator::initLogging(); + + if (param_to_lower(log_file) == "true") + LogConfigurator::initFileLogging(log_file_name); + + if (param_to_lower(log_console) == "true") + LogConfigurator::initConsoleLogging(); + + if (param_to_lower(log_syslog) == "true") + LogConfigurator::initSyslogLogging(); + + if (level == "error" || level.empty()) + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err); + else if (level == "warning") + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::warn); + else if (level == "info") + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::info); + else if (level == "debug") + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::debug); + else if (level == "trace") + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::trace); + else if (level == "disabled") + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::off); + else + LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err); + + spdlog::info("Logging level: {}", level); + spdlog::info("Logging to console: {}", log_console); + spdlog::info("Logging to syslog: {}", log_syslog); + spdlog::info("Logging to file: {}", log_file); + spdlog::info("Logfile (if any): {}", log_file_name); + + spdlog::info("Starting libhdbpp-timescale shared library..."); + + // connect_string mandatory config parameter ---- + auto connection_string = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "connect_string", true); + spdlog::info("Mandatory config parameter connect_string: {}", connection_string); + + // allocate a connection to store data with + Conn = make_unique<pqxx_conn::DbConnection>(pqxx_conn::DbConnection::DbStoreMethod::PreparedStatement); + + // now bring up the connection + Conn->connect(connection_string); + spdlog::info("Started libhdbpp-timescale shared library successfully"); +} + +//============================================================================= +//============================================================================= +HdbppTimescaleDb::~HdbppTimescaleDb() +{ + if (Conn->isOpen()) + Conn->disconnect(); + + LogConfigurator::shutdownLogging(); +} + +//============================================================================= +//============================================================================= +void HdbppTimescaleDb::insert_Attr(Tango::EventData *event_data, HdbEventDataType event_data_type) +{ + assert(event_data); + assert(event_data->attr_value); + spdlog::trace("Insert data event for attribute: {}", event_data->attr_name); + + // if there is an error, we store an error, since there will be no data passed in + if (event_data->err) + { + spdlog::trace("Event type is error for attribute: {}", event_data->attr_name); + + // no time data is passed for errors, so make something up + struct timeval tv + {}; + + struct Tango::TimeVal tango_tv + {}; + + gettimeofday(&tv, nullptr); + tango_tv.tv_sec = tv.tv_sec; + tango_tv.tv_usec = tv.tv_usec; + tango_tv.tv_nsec = 0; + + Conn->createTx<HdbppTxDataEventError>() + .withName(event_data->attr_name) + .withTraits(static_cast<Tango::AttrWriteType>(event_data_type.write_type), + static_cast<Tango::AttrDataFormat>(event_data_type.data_format), + static_cast<Tango::CmdArgType>(event_data_type.data_type)) + .withError(string(event_data->errors[0].desc)) + .withEventTime(tango_tv) + .withQuality(event_data->attr_value->get_quality()) + .store(); + } + else + { + spdlog::trace("Event type is data for attribute: {}", event_data->attr_name); + + // build a data event request, this will store 0 or more data elements, + // pending on type, format and quality + Conn->createTx<HdbppTxDataEvent>() + .withName(event_data->attr_name) + .withTraits(static_cast<Tango::AttrWriteType>(event_data_type.write_type), + static_cast<Tango::AttrDataFormat>(event_data_type.data_format), + static_cast<Tango::CmdArgType>(event_data_type.data_type)) + .withAttribute(event_data->attr_value) + .withEventTime(event_data->attr_value->get_date()) + .withQuality(event_data->attr_value->get_quality()) + .store(); + } +} + +//============================================================================= +//============================================================================= +void HdbppTimescaleDb::insert_param_Attr( + Tango::AttrConfEventData *conf_event_data, HdbEventDataType /* event_data_type */) +{ + assert(conf_event_data); + spdlog::trace("Insert parameter event request for attribute: {}", conf_event_data->attr_name); + + Conn->createTx<HdbppTxParameterEvent>() + .withName(conf_event_data->attr_name) + .withEventTime(conf_event_data->get_date()) + .withAttrInfo(*(conf_event_data->attr_conf)) + .store(); +} + +//============================================================================= +//============================================================================= +void HdbppTimescaleDb::configure_Attr( + std::string fqdn_attr_name, int type, int format, int write_type, unsigned int ttl) +{ + assert(!fqdn_attr_name.empty()); + spdlog::trace("Insert new attribute request for attribute: {}", fqdn_attr_name); + + // forgive the ugly casting, but for some reason we receive the enum values + // already cast to ints, we cast them back to enums so they function as + // enums again + Conn->createTx<HdbppTxNewAttribute>() + .withName(fqdn_attr_name) + .withTraits(static_cast<Tango::AttrWriteType>(write_type), + static_cast<Tango::AttrDataFormat>(format), + static_cast<Tango::CmdArgType>(type)) + .withTtl(ttl) + .store(); +} + +//============================================================================= +//============================================================================= +void HdbppTimescaleDb::updateTTL_Attr(std::string fqdn_attr_name, unsigned int ttl) +{ + assert(!fqdn_attr_name.empty()); + spdlog::trace("TTL event request for attribute: {}, with ttl: {}", fqdn_attr_name, ttl); + + Conn->createTx<HdbppTxUpdateTtl>().withName(fqdn_attr_name).withTtl(ttl).store(); +} + +//============================================================================= +//============================================================================= +void HdbppTimescaleDb::event_Attr(std::string fqdn_attr_name, unsigned char event) +{ + assert(!fqdn_attr_name.empty()); + spdlog::trace("History event request for attribute: {}", fqdn_attr_name); + Conn->createTx<HdbppTxHistoryEvent>().withName(fqdn_attr_name).withEvent(event).store(); +} + //============================================================================= //============================================================================= -auto HdbppTimescaleDbFactory::create_db(const string &id, const vector<string> &configuration) -> AbstractDB * +AbstractDB *HdbppTimescaleDbFactory::create_db(vector<string> configuration) { - return new hdbpp::HdbppTimescaleDbApi(id, configuration); + return new HdbppTimescaleDb(configuration); } } // namespace hdbpp //============================================================================= //============================================================================= -auto getDBFactory() -> hdbpp::DBFactory * +DBFactory *getDBFactory() { auto *factory = new hdbpp::HdbppTimescaleDbFactory(); - return static_cast<hdbpp::DBFactory *>(factory); + return static_cast<DBFactory *>(factory); } diff --git a/src/HdbppTimescaleDbApi.cpp b/src/HdbppTimescaleDbApi.cpp deleted file mode 100644 index a3fc7b08e27ba7596e23527bbc5805f8f9f28c4f..0000000000000000000000000000000000000000 --- a/src/HdbppTimescaleDbApi.cpp +++ /dev/null @@ -1,286 +0,0 @@ -/* Copyright (C) : 2014-2019 - European Synchrotron Radiation Facility - BP 220, Grenoble 38043, FRANCE - - This file is part of libhdb++timescale. - - libhdb++timescale is free software: you can redistribute it and/or modify - it under the terms of the Lesser GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - libhdb++timescale is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser - GNU General Public License for more details. - - You should have received a copy of the Lesser GNU General Public License - along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. */ - -#include "HdbppTimescaleDbApi.hpp" - -#include "DbConnection.hpp" -#include "HdbppTxDataEvent.hpp" -#include "HdbppTxDataEventError.hpp" -#include "HdbppTxHistoryEvent.hpp" -#include "HdbppTxNewAttribute.hpp" -#include "HdbppTxParameterEvent.hpp" -#include "HdbppTxUpdateTtl.hpp" -#include "LibUtils.hpp" - -#include <locale> - -using namespace std; -using namespace hdbpp_internal; - -namespace hdbpp -{ -// simple class to gather utility functions -struct HdbppTimescaleDbApiUtils -{ - static auto getConfigParam(const map<string, string> &conf, const string ¶m, bool mandatory) -> string; - static auto extractConfig(const vector<string> &config, const string &separator) -> map<string, string>; -}; - -//============================================================================= -//============================================================================= -auto HdbppTimescaleDbApiUtils::extractConfig(const vector<string> &config, const string &separator) -> map<string, string> -{ - map<string, string> results; - - for (auto &item : config) - { - auto found_separator = item.find_first_of(separator); - - if (found_separator != string::npos && found_separator > 0) - results.insert(make_pair(item.substr(0, found_separator), item.substr(found_separator + 1))); - } - - return results; -} - -//============================================================================= -//============================================================================= -auto HdbppTimescaleDbApiUtils::getConfigParam(const map<string, string> &conf, const string ¶m, bool mandatory) -> string -{ - auto iter = conf.find(param); - - if (iter == conf.end() && mandatory) - { - std::string msg {"Configuration parsing error: mandatory configuration parameter: " + param + " not found"}; - Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); - } - - // for non-mandatory config params that have not been set, just return - // an empty string - return iter == conf.end() ? "" : (*iter).second; -} - -//============================================================================= -//============================================================================= -HdbppTimescaleDbApi::HdbppTimescaleDbApi(const string &/*id*/, const vector<string> &configuration) -{ - auto param_to_lower = [](auto param) { - locale loc; - string tmp; - - for (string::size_type i = 0; i < param.length(); ++i) - tmp += tolower(param[i], loc); - - return tmp; - }; - - // convert the config vector to a map - auto libhdb_conf = HdbppTimescaleDbApiUtils::extractConfig(configuration, "="); - - // logging_level optional config parameter ---- - auto level = param_to_lower(HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "logging_level", false)); - auto log_file = HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "log_file", false); - auto log_console = HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "log_console", false); - auto log_syslog = HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "log_syslog", false); - auto log_file_name = HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "log_file_name", false); - - // init the base logging system - LogConfigurator::initLogging(_identity); - - if (param_to_lower(log_file) == "true") - LogConfigurator::initFileLogging(_identity, log_file_name); - - if (param_to_lower(log_console) == "true") - LogConfigurator::initConsoleLogging(_identity); - - if (param_to_lower(log_syslog) == "true") - LogConfigurator::initSyslogLogging(_identity); - - if (level == "error" || level.empty()) - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err); - else if (level == "warning") - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::warn); - else if (level == "info") - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::info); - else if (level == "debug") - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::debug); - else if (level == "trace") - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::trace); - else if (level == "disabled") - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::off); - else - LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err); - - spdlog::info("Logging level: {}", level); - spdlog::info("Logging to console: {}", log_console); - spdlog::info("Logging to syslog: {}", log_syslog); - spdlog::info("Logging to file: {}", log_file); - spdlog::info("Logfile (if any): {}", log_file_name); - - spdlog::info("Starting libhdbpp-timescale shared library..."); - - // connect_string mandatory config parameter ---- - auto connection_string = HdbppTimescaleDbApiUtils::getConfigParam(libhdb_conf, "connect_string", true); - spdlog::info("Mandatory config parameter connect_string: {}", connection_string); - - // allocate a connection to store data with - _conn = make_unique<pqxx_conn::DbConnection>(pqxx_conn::DbConnection::DbStoreMethod::PreparedStatement); - - // now bring up the connection - _conn->connect(connection_string); - - spdlog::info("Started libhdbpp-timescale shared library successfully"); -} - -//============================================================================= -//============================================================================= -HdbppTimescaleDbApi::~HdbppTimescaleDbApi() -{ - if (_conn->isOpen()) - _conn->disconnect(); - - LogConfigurator::shutdownLogging(_identity); -} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::insert_event(Tango::EventData *event_data, const HdbEventDataType &data_type) -{ - assert(event_data); - assert(event_data->attr_value); - spdlog::trace("Insert data event for attribute: {}", event_data->attr_name); - - // if there is an error, we store an error, since there will be no data passed in - if (event_data->err) - { - spdlog::trace("Event type is error for attribute: {}", event_data->attr_name); - - // no time data is passed for errors, so make something up - struct timeval tv - {}; - - struct Tango::TimeVal tango_tv - {}; - - gettimeofday(&tv, nullptr); - tango_tv.tv_sec = tv.tv_sec; - tango_tv.tv_usec = tv.tv_usec; - tango_tv.tv_nsec = 0; - - _conn->createTx<HdbppTxDataEventError>() - .withName(event_data->attr_name) - .withTraits(static_cast<Tango::AttrWriteType>(data_type.write_type), - static_cast<Tango::AttrDataFormat>(data_type.data_format), - static_cast<Tango::CmdArgType>(data_type.data_type)) - .withError(string(event_data->errors[0].desc)) - .withEventTime(tango_tv) - .withQuality(event_data->attr_value->get_quality()) - .store(); - } - else - { - spdlog::trace("Event type is data for attribute: {}", event_data->attr_name); - - // build a data event request, this will store 0 or more data elements, - // pending on type, format and quality - _conn->createTx<HdbppTxDataEvent>() - .withName(event_data->attr_name) - .withTraits(static_cast<Tango::AttrWriteType>(data_type.write_type), - static_cast<Tango::AttrDataFormat>(data_type.data_format), - static_cast<Tango::CmdArgType>(data_type.data_type)) - .withAttribute(event_data->attr_value) - .withEventTime(event_data->attr_value->get_date()) - .withQuality(event_data->attr_value->get_quality()) - .store(); - } -} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::insert_events(vector<tuple<Tango::EventData *, HdbEventDataType>> events) {} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::insert_param_event( - Tango::AttrConfEventData *param_event, const HdbEventDataType & /* data_type */) -{ - assert(param_event); - spdlog::trace("Insert parameter event request for attribute: {}", param_event->attr_name); - - _conn->createTx<HdbppTxParameterEvent>() - .withName(param_event->attr_name) - .withEventTime(param_event->get_date()) - .withAttrInfo(*(param_event->attr_conf)) - .store(); -} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::add_attribute(const std::string &fqdn_attr_name, int type, int format, int write_type) -{ - assert(!fqdn_attr_name.empty()); - spdlog::trace("Insert new attribute request for attribute: {}", fqdn_attr_name); - - // forgive the ugly casting, but for some reason we receive the enum values - // already cast to ints, we cast them back to enums so they function as - // enums again - _conn->createTx<HdbppTxNewAttribute>() - .withName(fqdn_attr_name) - .withTraits(static_cast<Tango::AttrWriteType>(write_type), - static_cast<Tango::AttrDataFormat>(format), - static_cast<Tango::CmdArgType>(type)) - .withTtl(0) - .store(); -} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::update_ttl(const std::string &fqdn_attr_name, unsigned int ttl) -{ - assert(!fqdn_attr_name.empty()); - spdlog::trace("TTL event request for attribute: {}, with ttl: {}", fqdn_attr_name, ttl); - _conn->createTx<HdbppTxUpdateTtl>().withName(fqdn_attr_name).withTtl(ttl).store(); -} - -//============================================================================= -//============================================================================= -void HdbppTimescaleDbApi::insert_history_event(const std::string &fqdn_attr_name, unsigned char event) -{ - assert(!fqdn_attr_name.empty()); - spdlog::trace("History event request for attribute: {}", fqdn_attr_name); - _conn->createTx<HdbppTxHistoryEvent>().withName(fqdn_attr_name).withEvent(event).store(); -} - -//============================================================================= -//============================================================================= -auto HdbppTimescaleDbApi::supported(HdbppFeatures feature) -> bool -{ - auto supported = false; - - switch (feature) - { - case HdbppFeatures::TTL: supported = true; break; - - case HdbppFeatures::BATCH_INSERTS: supported = true; break; - } - - return supported; -} - -} // namespace hdbpp diff --git a/src/HdbppTimescaleDbApi.hpp b/src/HdbppTimescaleDbApi.hpp deleted file mode 100644 index 07d405301fb619768b6343cf56b9fa43b7e9acf6..0000000000000000000000000000000000000000 --- a/src/HdbppTimescaleDbApi.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright (C) : 2014-2019 - European Synchrotron Radiation Facility - BP 220, Grenoble 38043, FRANCE - - This file is part of libhdb++timescale. - - libhdb++timescale is free software: you can redistribute it and/or modify - it under the terms of the Lesser GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - libhdb++timescale is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser - GNU General Public License for more details. - - You should have received a copy of the Lesser GNU General Public License - along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. */ - -#ifndef _HDBPP_TIMESCALE_IMPL_HPP -#define _HDBPP_TIMESCALE_IMPL_HPP - -#include "DbConnection.hpp" - -#include <hdb++/AbstractDB.h> -#include <memory> -#include <string> -#include <tango.h> -#include <vector> - -namespace hdbpp -{ -class HdbppTimescaleDbApi : public AbstractDB -{ -public: - // Takes a list of configuration parameters to start the driver with - HdbppTimescaleDbApi(const string &id, const std::vector<std::string> &configuration); - - ~HdbppTimescaleDbApi() override; - - // Inserts an attribute archive event for the EventData into the database. If the attribute - // does not exist in the database, then an exception will be raised. If the attr_value - // field of the data parameter if empty, then the attribute is in an error state - // and the error message will be archived. - void insert_event(Tango::EventData *event_data, const HdbEventDataType &data_type) override; - - // Insert multiple attribute archive events. Any attributes that do not exist will - // cause an exception. On failure the fall back is to insert events individually - void insert_events(std::vector<std::tuple<Tango::EventData *, HdbEventDataType>> events) override; - - // Inserts the attribute configuration data (Tango Attribute Configuration event data) - // into the database. The attribute must be configured to be stored in HDB++, - // otherwise an exception will be thrown. - void insert_param_event(Tango::AttrConfEventData *param_event, const HdbEventDataType & /* data_type */) override; - - // Add an attribute to the database. Trying to add an attribute that already exists will - // cause an exception - void add_attribute(const std::string &fqdn_attr_name, int type, int format, int write_type) override; - - // Update the attribute ttl. The attribute must have been configured to be stored in - // HDB++, otherwise an exception is raised - void update_ttl(const std::string &fqdn_attr_name, unsigned int ttl) override; - - // Inserts a history event for the attribute name passed to the function. The attribute - // must have been configured to be stored in HDB++, otherwise an exception is raised. - // This function will also insert an additional CRASH history event before the START - // history event if the given event parameter is DB_START and if the last history event - // stored was also a START event. - void insert_history_event(const std::string &fqdn_attr_name, unsigned char event) override; - - // Check what hdbpp features this library supports. This library supports: TTL, BATCH_INSERTS - auto supported(HdbppFeatures feature) -> bool override; - -private: - std::unique_ptr<hdbpp_internal::pqxx_conn::DbConnection> _conn; - std::string _identity; -}; - -} // namespace hdbpp -#endif // _HDBPP_TIMESCALE_IMPL_HPP diff --git a/src/HdbppTxBase.hpp b/src/HdbppTxBase.hpp index 7440d0674b438eea72862b423052a3fc682e632b..3716390a0837a831c609db79be3ccddd23681abb 100644 --- a/src/HdbppTxBase.hpp +++ b/src/HdbppTxBase.hpp @@ -47,21 +47,21 @@ public: // simple feedback that the transaction was successfull. Most // errors are handled with exceptions which are thrown to the // tx creator. - auto result() const noexcept -> bool { return _result; }; + bool result() const noexcept { return _result; }; virtual void print(std::ostream &os) const noexcept { os << "HdbppTxBase(_result: " << _result << ")"; } protected: // access functions for the connection the transaction // is templated with - auto connection() -> Conn & { return _conn; } - auto connection() const -> const Conn & { return _conn; } + Conn &connection() { return _conn; } + const Conn &connection() const { return _conn; } void setResult(bool state) noexcept { _result = state; } // small helper to generate the attribute name for the db consistently // across all the different tx classes - static auto attrNameForStorage(AttributeName &attr_name) -> std::string + static std::string attrNameForStorage(AttributeName &attr_name) { return "tango://" + attr_name.tangoHostWithDomain() + "/" + attr_name.fullAttributeName(); } diff --git a/src/HdbppTxDataEvent.hpp b/src/HdbppTxDataEvent.hpp index 4500cbc31cf9abaf0cadfc3e66498e8ea2bca44c..47f352ea2c20fde3bbf0fa4c6e253fb6e77d11d1 100644 --- a/src/HdbppTxDataEvent.hpp +++ b/src/HdbppTxDataEvent.hpp @@ -38,7 +38,7 @@ private: public: HdbppTxDataEvent(Conn &conn) : HdbppTxDataEventBase<Conn, HdbppTxDataEvent>(conn) {} - auto withAttribute(Tango::DeviceAttribute *dev_attr) -> HdbppTxDataEvent<Conn> & + HdbppTxDataEvent<Conn> &withAttribute(Tango::DeviceAttribute *dev_attr) { // just set the pointer here, we will do a full event data extraction at // point of storage, this reduces complexity but limits the functionality, i.e @@ -48,7 +48,7 @@ public: } // trigger the database storage routines - auto store() -> HdbppTxDataEvent<Conn> &; + HdbppTxDataEvent<Conn> &store(); void print(std::ostream &os) const noexcept override; @@ -66,7 +66,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxDataEvent<Conn>::store() -> HdbppTxDataEvent<Conn> & +HdbppTxDataEvent<Conn> &HdbppTxDataEvent<Conn>::store() { if (Base::attributeName().empty()) { @@ -143,7 +143,7 @@ auto HdbppTxDataEvent<Conn>::store() -> HdbppTxDataEvent<Conn> & break; - case Tango::DEV_ENUM: this->template doStore<int16_t>(read_extractor, write_extractor); break; + //case Tango::DEV_ENUM: this->template doStoreEnum<?>(); break; // TODO //case Tango::DEV_ENCODED: this->template doStoreEncoded<vector<uint8_t>>(); break; // TODO default: std::string msg { @@ -230,7 +230,7 @@ void HdbppTxDataEvent<Conn>::print(std::ostream &os) const noexcept os << "HdbppTxDataEvent(base: "; HdbppTxDataEventBase<Conn, HdbppTxDataEvent>::print(os); -os << ")"; + os << ")"; } } // namespace hdbpp_internal diff --git a/src/HdbppTxDataEventBase.hpp b/src/HdbppTxDataEventBase.hpp index bb74c6a279be190c966ceb71e785cd771603760c..8d0fac95574a25370e5b205c6ca8e5adaf559621 100644 --- a/src/HdbppTxDataEventBase.hpp +++ b/src/HdbppTxDataEventBase.hpp @@ -42,32 +42,32 @@ public: HdbppTxDataEventBase(Conn &conn) : HdbppTxBase<Conn>(conn) {} - auto withName(const std::string &fqdn_attr_name) -> Derived<Conn> & + Derived<Conn> &withName(const std::string &fqdn_attr_name) { _attr_name = AttributeName {fqdn_attr_name}; return static_cast<Derived<Conn> &>(*this); } - auto withTraits(Tango::AttrWriteType write, Tango::AttrDataFormat format, Tango::CmdArgType type) -> Derived<Conn> & + Derived<Conn> &withTraits(Tango::AttrWriteType write, Tango::AttrDataFormat format, Tango::CmdArgType type) { _traits = AttributeTraits(write, format, type); return static_cast<Derived<Conn> &>(*this); } - auto withTraits(AttributeTraits &traits) -> Derived<Conn> & + Derived<Conn> &withTraits(AttributeTraits &traits) { _traits = traits; return static_cast<Derived<Conn> &>(*this); } - auto withEventTime(Tango::TimeVal tv) -> Derived<Conn> & + Derived<Conn> &withEventTime(Tango::TimeVal tv) { // convert to something more usable _event_time = tv.tv_sec + tv.tv_usec / 1.0e6; return static_cast<Derived<Conn> &>(*this); } - auto withQuality(Tango::AttrQuality quality) -> Derived<Conn> & + Derived<Conn> &withQuality(Tango::AttrQuality quality) { _quality = quality; return static_cast<Derived<Conn> &>(*this); @@ -78,10 +78,10 @@ public: protected: // release the private data safely for the derived classes - auto attributeName() -> AttributeName & { return _attr_name; } - auto attributeTraits() const -> const AttributeTraits & { return _traits; } - auto quality() const -> Tango::AttrQuality { return _quality; } - auto eventTime() const -> double { return _event_time; } + AttributeName &attributeName() { return _attr_name; } + const AttributeTraits &attributeTraits() const { return _traits; } + Tango::AttrQuality quality() const { return _quality; } + double eventTime() const { return _event_time; } private: AttributeName _attr_name; diff --git a/src/HdbppTxDataEventError.hpp b/src/HdbppTxDataEventError.hpp index fd4de28b3c360238f67c8aa5f8cdd2180ea0adc3..69820f1668cd723a60eda4a036acb37338030570 100644 --- a/src/HdbppTxDataEventError.hpp +++ b/src/HdbppTxDataEventError.hpp @@ -37,14 +37,14 @@ private: public: HdbppTxDataEventError(Conn &conn) : HdbppTxDataEventBase<Conn, HdbppTxDataEventError>(conn) {} - auto withError(const std::string &error_msg) -> HdbppTxDataEventError<Conn> & + HdbppTxDataEventError<Conn> &withError(const std::string &error_msg) { _error_msg = error_msg; return *this; } // trigger the database storage routines - auto store() -> HdbppTxDataEventError<Conn> &; + HdbppTxDataEventError<Conn> &store(); /// @brief Print the HdbppTxDataEventError object to the stream void print(std::ostream &os) const noexcept override; @@ -56,7 +56,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxDataEventError<Conn>::store() -> HdbppTxDataEventError<Conn> & +HdbppTxDataEventError<Conn> &HdbppTxDataEventError<Conn>::store() { if (Base::attributeName().empty()) { diff --git a/src/HdbppTxFactory.hpp b/src/HdbppTxFactory.hpp index d93395e7d0e6519376c6ef67ee72a572d4753f04..eb5a510b4c5061c41822437ac349aafe7d925c31 100644 --- a/src/HdbppTxFactory.hpp +++ b/src/HdbppTxFactory.hpp @@ -31,7 +31,7 @@ public: // this generic method creates transaction objects based on the template // parameter. Any parameters are forward directly to the new object template<template<typename> class Class, typename... Params> - auto createTx(Params &&... params) -> Class<Conn> + Class<Conn> createTx(Params &&... params) { return Class<Conn>((static_cast<Conn &>(*this)), std::forward<Params>(params)...); } diff --git a/src/HdbppTxHistoryEvent.hpp b/src/HdbppTxHistoryEvent.hpp index 27b02afeacae34eb8f1a148c5a758df18818c507..95041f19e79717b3c7b4377958ab44ef64c90f88 100644 --- a/src/HdbppTxHistoryEvent.hpp +++ b/src/HdbppTxHistoryEvent.hpp @@ -39,7 +39,7 @@ public: HdbppTxHistoryEvent(Conn &conn) : HdbppTxBase<Conn>(conn) {} - auto withName(const std::string &fqdn_attr_name) -> HdbppTxHistoryEvent<Conn> & + HdbppTxHistoryEvent<Conn> &withName(const std::string &fqdn_attr_name) { _attr_name = AttributeName {fqdn_attr_name}; return *this; @@ -47,17 +47,17 @@ public: // this overload converts the event types defined in libhdb to // usable strings - auto withEvent(unsigned char event) -> HdbppTxHistoryEvent<Conn> &; + HdbppTxHistoryEvent<Conn> &withEvent(unsigned char event); // allow the adding of any type of event - auto withEvent(const std::string &event) -> HdbppTxHistoryEvent<Conn> & + HdbppTxHistoryEvent<Conn> &withEvent(const std::string &event) { _event = event; return *this; } // trigger the database storage routines - auto store() -> HdbppTxHistoryEvent<Conn> &; + HdbppTxHistoryEvent<Conn> &store(); /// @brief Print the HdbppTxHistoryEvent object to the stream void print(std::ostream &os) const noexcept override; @@ -70,7 +70,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxHistoryEvent<Conn>::withEvent(unsigned char event) -> HdbppTxHistoryEvent<Conn> & +HdbppTxHistoryEvent<Conn> &HdbppTxHistoryEvent<Conn>::withEvent(unsigned char event) { // convert the unsigned char history type of a string, we will store the event // based on this string, so its simpler to extract the data at a later point @@ -84,7 +84,8 @@ auto HdbppTxHistoryEvent<Conn>::withEvent(unsigned char event) -> HdbppTxHistory case libhdbpp_compatibility::HdbppInsertParam: _event = events::InsertParamEvent; break; case libhdbpp_compatibility::HdbppPause: _event = events::PauseEvent; break; case libhdbpp_compatibility::HdbppUpdateTTL: _event = events::UpdateTTLEvent; break; - default: { + default: + { std::string msg {"Unknown event type passed, unable to convert this into known event system"}; spdlog::error("Error: {}", msg); Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO); @@ -97,7 +98,7 @@ auto HdbppTxHistoryEvent<Conn>::withEvent(unsigned char event) -> HdbppTxHistory //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxHistoryEvent<Conn>::store() -> HdbppTxHistoryEvent<Conn> & +HdbppTxHistoryEvent<Conn> &HdbppTxHistoryEvent<Conn>::store() { if (_attr_name.empty()) { diff --git a/src/HdbppTxNewAttribute.hpp b/src/HdbppTxNewAttribute.hpp index 587114cf2d9e75d40ba88478769d3cc03c9000ca..bbcced70b259a9a9ec9ab38f69c5a0a45ec922ef 100644 --- a/src/HdbppTxNewAttribute.hpp +++ b/src/HdbppTxNewAttribute.hpp @@ -37,27 +37,27 @@ class HdbppTxNewAttribute : public HdbppTxBase<Conn> public: HdbppTxNewAttribute(Conn &conn) : HdbppTxBase<Conn>(conn) {} - auto withName(const std::string &fqdn_attr_name) -> HdbppTxNewAttribute<Conn> & + HdbppTxNewAttribute<Conn> &withName(const std::string &fqdn_attr_name) { _attr_name = AttributeName {fqdn_attr_name}; return *this; } - auto withTraits( - Tango::AttrWriteType write, Tango::AttrDataFormat format, Tango::CmdArgType type) -> HdbppTxNewAttribute<Conn> & + HdbppTxNewAttribute<Conn> &withTraits( + Tango::AttrWriteType write, Tango::AttrDataFormat format, Tango::CmdArgType type) { _traits = AttributeTraits(write, format, type); return *this; } - auto withTtl(unsigned int ttl) -> HdbppTxNewAttribute<Conn> & + HdbppTxNewAttribute<Conn> &withTtl(unsigned int ttl) { _ttl = ttl; return *this; } // trigger the database storage routines - auto store() -> HdbppTxNewAttribute<Conn> &; + HdbppTxNewAttribute<Conn> &store(); /// @brief Print the HdbppTxNewAttribute object to the stream void print(std::ostream &os) const noexcept override; @@ -71,7 +71,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxNewAttribute<Conn>::store() -> HdbppTxNewAttribute<Conn> & +HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store() { if (_attr_name.empty()) { @@ -107,7 +107,7 @@ auto HdbppTxNewAttribute<Conn>::store() -> HdbppTxNewAttribute<Conn> & } // unsupported types - if (_traits.type() == Tango::DEV_ENCODED) + if (_traits.type() == Tango::DEV_ENUM || _traits.type() == Tango::DEV_ENCODED) { std::string msg {"Unsupported attribute type: " + tangoEnumToString(_traits.type()) + ". For attribute: " + _attr_name.fqdnAttributeName()}; diff --git a/src/HdbppTxParameterEvent.hpp b/src/HdbppTxParameterEvent.hpp index 2ee37b9ae3480c1770494e657d74f1d9f68f6f72..709e3998851a7c3ab0abcddca3cca18daf771c65 100644 --- a/src/HdbppTxParameterEvent.hpp +++ b/src/HdbppTxParameterEvent.hpp @@ -38,20 +38,20 @@ class HdbppTxParameterEvent : public HdbppTxBase<Conn> public: HdbppTxParameterEvent(Conn &conn) : HdbppTxBase<Conn>(conn) {} - auto withName(const std::string &fqdn_attr_name) -> HdbppTxParameterEvent<Conn> & + HdbppTxParameterEvent<Conn> &withName(const std::string &fqdn_attr_name) { _attr_name = AttributeName {fqdn_attr_name}; return *this; } - auto withAttrInfo(const Tango::AttributeInfoEx &attr_conf) -> HdbppTxParameterEvent<Conn> & + HdbppTxParameterEvent<Conn> &withAttrInfo(const Tango::AttributeInfoEx &attr_conf) { _attr_info_ex = attr_conf; _attr_info_ex_set = true; return *this; } - auto withEventTime(Tango::TimeVal tv) -> HdbppTxParameterEvent<Conn> & + HdbppTxParameterEvent<Conn> &withEventTime(Tango::TimeVal tv) { // convert to a double that can be passed on to the storage api _event_time = tv.tv_sec + tv.tv_usec / 1.0e6; @@ -59,7 +59,7 @@ public: } // trigger the database storage routines - auto store() -> HdbppTxParameterEvent<Conn> &; + HdbppTxParameterEvent<Conn> &store(); /// @brief Print the HdbppTxParameterEvent object to the stream void print(std::ostream &os) const noexcept override; @@ -83,7 +83,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxParameterEvent<Conn>::store() -> HdbppTxParameterEvent<Conn> & +HdbppTxParameterEvent<Conn> &HdbppTxParameterEvent<Conn>::store() { if (_attr_name.empty()) { @@ -118,7 +118,6 @@ auto HdbppTxParameterEvent<Conn>::store() -> HdbppTxParameterEvent<Conn> & HdbppTxBase<Conn>::connection().storeParameterEvent(HdbppTxBase<Conn>::attrNameForStorage(_attr_name), _event_time, _attr_info_ex.label, - _attr_info_ex.enum_labels, _attr_info_ex.unit, _attr_info_ex.standard_unit, _attr_info_ex.display_unit, diff --git a/src/HdbppTxUpdateTtl.hpp b/src/HdbppTxUpdateTtl.hpp index 883976dcf1634cdf066ed97e879fff4793effd97..d957585bb22cc6e4d6e4bf3688cf0c05531f3040 100644 --- a/src/HdbppTxUpdateTtl.hpp +++ b/src/HdbppTxUpdateTtl.hpp @@ -36,20 +36,20 @@ class HdbppTxUpdateTtl : public HdbppTxBase<Conn> public: HdbppTxUpdateTtl(Conn &conn) : HdbppTxBase<Conn>(conn) {} - auto withName(const std::string &fqdn_attr_name) -> HdbppTxUpdateTtl<Conn> & + HdbppTxUpdateTtl<Conn> &withName(const std::string &fqdn_attr_name) { _attr_name = AttributeName {fqdn_attr_name}; return *this; } - auto withTtl(unsigned int ttl) -> HdbppTxUpdateTtl<Conn> & + HdbppTxUpdateTtl<Conn> &withTtl(unsigned int ttl) { _ttl = ttl; return *this; } // trigger the database storage routines - auto store() -> HdbppTxUpdateTtl<Conn> &; + HdbppTxUpdateTtl<Conn> &store(); /// @brief Print the HdbppTxUpdateTtl object to the stream void print(std::ostream &os) const noexcept override; @@ -64,7 +64,7 @@ private: //============================================================================= //============================================================================= template<typename Conn> -auto HdbppTxUpdateTtl<Conn>::store() -> HdbppTxUpdateTtl<Conn> & +HdbppTxUpdateTtl<Conn> &HdbppTxUpdateTtl<Conn>::store() { if (_attr_name.empty()) { diff --git a/src/LibUtils.cpp b/src/LibUtils.cpp index 72492b8706d71e1ea650de2d655ef7e2ffecde89..84e1e98177bdd029598fd50a7ad11a7891e6ecf5 100644 --- a/src/LibUtils.cpp +++ b/src/LibUtils.cpp @@ -30,7 +30,7 @@ namespace hdbpp_internal { //============================================================================= //============================================================================= -auto tangoEnumToString(Tango::AttrWriteType write_type) -> string +string tangoEnumToString(Tango::AttrWriteType write_type) { switch (write_type) { @@ -46,7 +46,7 @@ auto tangoEnumToString(Tango::AttrWriteType write_type) -> string //============================================================================= //============================================================================= -auto tangoEnumToString(Tango::AttrDataFormat format) -> string +string tangoEnumToString(Tango::AttrDataFormat format) { switch (format) { @@ -61,7 +61,7 @@ auto tangoEnumToString(Tango::AttrDataFormat format) -> string //============================================================================= //============================================================================= -auto tangoEnumToString(Tango::CmdArgType type) -> string +string tangoEnumToString(Tango::CmdArgType type) { switch (type) { @@ -87,7 +87,7 @@ auto tangoEnumToString(Tango::CmdArgType type) -> string //============================================================================= //============================================================================= -auto tangoEnumToString(Tango::AttrQuality quality) -> string +string tangoEnumToString(Tango::AttrQuality quality) { switch (quality) { @@ -103,7 +103,7 @@ auto tangoEnumToString(Tango::AttrQuality quality) -> string //============================================================================= //============================================================================= -auto operator<<(ostream &os, Tango::AttrWriteType write_type) -> ostream & +ostream &operator<<(ostream &os, Tango::AttrWriteType write_type) { os << tangoEnumToString(write_type); return os; @@ -111,7 +111,7 @@ auto operator<<(ostream &os, Tango::AttrWriteType write_type) -> ostream & //============================================================================= //============================================================================= -auto operator<<(ostream &os, Tango::AttrDataFormat format) -> ostream & +ostream &operator<<(ostream &os, Tango::AttrDataFormat format) { os << tangoEnumToString(format); return os; @@ -119,7 +119,7 @@ auto operator<<(ostream &os, Tango::AttrDataFormat format) -> ostream & //============================================================================= //============================================================================= -auto operator<<(ostream &os, Tango::CmdArgType type) -> ostream & +ostream &operator<<(ostream &os, Tango::CmdArgType type) { os << tangoEnumToString(type); return os; @@ -127,7 +127,7 @@ auto operator<<(ostream &os, Tango::CmdArgType type) -> ostream & //============================================================================= //============================================================================= -auto operator<<(ostream &os, Tango::AttrQuality quality) -> ostream & +ostream &operator<<(ostream &os, Tango::AttrQuality quality) { os << tangoEnumToString(quality); return os; @@ -135,9 +135,9 @@ auto operator<<(ostream &os, Tango::AttrQuality quality) -> ostream & //============================================================================= //============================================================================= -void LogConfigurator::initLogging(const std::string &identity) +void LogConfigurator::initLogging() { - auto logger = spdlog::get(logging_utils::LibLoggerName + "_" + identity); + auto logger = spdlog::get(logging_utils::LibLoggerName); if (!logger) { @@ -147,7 +147,7 @@ void LogConfigurator::initLogging(const std::string &identity) auto dist_sink = make_shared<spdlog::sinks::dist_sink_mt>(); - auto logger = make_shared<spdlog::async_logger>(logging_utils::LibLoggerName + "_" + identity, + auto logger = make_shared<spdlog::async_logger>(logging_utils::LibLoggerName, dist_sink, spdlog::thread_pool(), spdlog::async_overflow_policy::overrun_oldest); @@ -169,15 +169,13 @@ void LogConfigurator::initLogging(const std::string &identity) //============================================================================= //============================================================================= -void LogConfigurator::initSyslogLogging(const std::string &identity) +void LogConfigurator::initSyslogLogging() { try { - auto logger = spdlog::get(logging_utils::LibLoggerName + "_" + identity); + auto logger = spdlog::get(logging_utils::LibLoggerName); auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks(); - - sinks_tmp.push_back( - make_shared<spdlog::sinks::syslog_sink_mt>(logging_utils::SyslogIdent + identity, 0, LOG_USER, false)); + sinks_tmp.push_back(make_shared<spdlog::sinks::syslog_sink_mt>(logging_utils::SyslogIdent, 0, LOG_USER, false)); } catch (const spdlog::spdlog_ex &ex) { @@ -189,11 +187,11 @@ void LogConfigurator::initSyslogLogging(const std::string &identity) //============================================================================= //============================================================================= -void LogConfigurator::initConsoleLogging(const std::string &identity) +void LogConfigurator::initConsoleLogging() { try { - auto logger = spdlog::get(logging_utils::LibLoggerName + "_" + identity); + auto logger = spdlog::get(logging_utils::LibLoggerName); auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks(); sinks_tmp.push_back(make_shared<spdlog::sinks::stdout_color_sink_mt>()); } @@ -207,11 +205,11 @@ void LogConfigurator::initConsoleLogging(const std::string &identity) //============================================================================= //============================================================================= -void LogConfigurator::initFileLogging(const std::string &identity, const std::string &log_file_name) +void LogConfigurator::initFileLogging(const std::string &log_file_name) { try { - auto logger = spdlog::get(logging_utils::LibLoggerName + "_" + identity); + auto logger = spdlog::get(logging_utils::LibLoggerName); auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks(); sinks_tmp.push_back(make_shared<spdlog::sinks::rotating_file_sink_mt>(log_file_name, 1024 * 1024 * 10, 3)); } @@ -225,9 +223,9 @@ void LogConfigurator::initFileLogging(const std::string &identity, const std::st //============================================================================= //============================================================================= -void LogConfigurator::shutdownLogging(const std::string &identity) +void LogConfigurator::shutdownLogging() { - auto logger = spdlog::get(logging_utils::LibLoggerName + "_" + identity); + auto logger = spdlog::get(logging_utils::LibLoggerName); if (!logger) { diff --git a/src/LibUtils.hpp b/src/LibUtils.hpp index 19f1cfffa369d109f4a71d261ccf1c2224808c6f..8ec200f4a64bfd76bb0762bbfeba738cf839caca 100644 --- a/src/LibUtils.hpp +++ b/src/LibUtils.hpp @@ -47,25 +47,25 @@ auto operator<<(std::ostream &os, const T &t) -> decltype(t.print(os), static_ca } // to_string functions for tango enums -auto tangoEnumToString(Tango::AttrWriteType write_type) -> std::string; -auto tangoEnumToString(Tango::AttrDataFormat format) -> std::string; -auto tangoEnumToString(Tango::CmdArgType type) -> std::string; -auto tangoEnumToString(Tango::AttrQuality quality) -> std::string; +std::string tangoEnumToString(Tango::AttrWriteType write_type); +std::string tangoEnumToString(Tango::AttrDataFormat format); +std::string tangoEnumToString(Tango::CmdArgType type); +std::string tangoEnumToString(Tango::AttrQuality quality); // some output operators for tango enums -auto operator<<(std::ostream &os, Tango::AttrWriteType write_type) -> std::ostream &; -auto operator<<(std::ostream &os, Tango::AttrDataFormat format) -> std::ostream &; -auto operator<<(std::ostream &os, Tango::CmdArgType type) -> std::ostream &; -auto operator<<(std::ostream &os, Tango::AttrQuality quality) -> std::ostream &; +std::ostream &operator<<(std::ostream &os, Tango::AttrWriteType write_type); +std::ostream &operator<<(std::ostream &os, Tango::AttrDataFormat format); +std::ostream &operator<<(std::ostream &os, Tango::CmdArgType type); +std::ostream &operator<<(std::ostream &os, Tango::AttrQuality quality); struct LogConfigurator { - static void initLogging(const std::string &identity); - static void initSyslogLogging(const std::string &identity); - static void initConsoleLogging(const std::string &identity); - static void initFileLogging(const std::string &identity, const std::string &log_file_name); + static void initLogging(); + static void initSyslogLogging(); + static void initConsoleLogging(); + static void initFileLogging(const std::string &log_file_name); - static void shutdownLogging(const std::string &identity); + static void shutdownLogging(); static void setLoggingLevel(spdlog::level::level_enum level); }; @@ -73,10 +73,10 @@ namespace logging_utils { // SPDLOG config and setup const std::string LibLoggerName = "hdbpp"; - const std::string SyslogIdent = "hdbpp-timescale-"; + const std::string SyslogIdent = "hdbpp-timescale"; // get the file name from the __FILE__ variable for error messages - constexpr auto getFileName(const char *const path) -> auto * + constexpr auto *getFileName(const char *const path) { // We silence clang warnings for this funciton, this is a quick and simple // way to produce the file name, and yes we use pointer arithmetic, but diff --git a/src/PqxxExtension.hpp b/src/PqxxExtension.hpp index 285e497b8669a18ee742207632b20dc5e806abdb..1b7092914025c02bdbdd5a455f721f3f64ad6b61 100644 --- a/src/PqxxExtension.hpp +++ b/src/PqxxExtension.hpp @@ -144,7 +144,7 @@ template<typename T> struct string_traits<std::vector<T>> { public: - static constexpr auto name() noexcept -> const char * { return internal::type_name<T>::value; } + static constexpr const char *name() noexcept { return internal::type_name<T>::value; } // NOLINTNEXTLINE (readability-identifier-naming) static constexpr bool has_null() noexcept { return false; } @@ -152,7 +152,7 @@ public: // NOLINTNEXTLINE (readability-identifier-naming) static bool is_null(const std::vector<T> & /*unused*/) { return false; } - [[noreturn]] static auto null() -> std::vector<T> { internal::throw_null_conversion(name()); } + [[noreturn]] static std::vector<T> null() { internal::throw_null_conversion(name()); } // NOLINTNEXTLINE (readability-identifier-naming) static void from_string(const char str[], std::vector<T> &value) @@ -209,7 +209,7 @@ template<> struct string_traits<std::vector<std::string>> { public: - static constexpr auto name() noexcept -> const char * { return "vector<string>"; } + static constexpr const char *name() noexcept { return "vector<string>"; } // NOLINTNEXTLINE (readability-identifier-naming) static constexpr bool has_null() noexcept { return false; } @@ -217,7 +217,7 @@ public: // NOLINTNEXTLINE (readability-identifier-naming) static bool is_null(const std::vector<std::string> & /*unused*/) { return false; } - [[noreturn]] static auto null() -> std::vector<std::string> { internal::throw_null_conversion(name()); } + [[noreturn]] static std::vector<std::string> null() { internal::throw_null_conversion(name()); } // NOLINTNEXTLINE (readability-identifier-naming) static void from_string(const char str[], std::vector<std::string> &value) @@ -272,7 +272,7 @@ template<> struct string_traits<std::vector<bool>> { public: - static constexpr auto name() noexcept -> const char * { return "std::vector<bool>"; } + static constexpr const char *name() noexcept { return "std::vector<bool>"; } // NOLINTNEXTLINE (readability-identifier-naming) static constexpr bool has_null() noexcept { return false; } @@ -280,7 +280,7 @@ public: // NOLINTNEXTLINE (readability-identifier-naming) static bool is_null(const std::vector<bool> & /*unused*/) { return false; } - [[noreturn]] static auto null() -> std::vector<bool> { internal::throw_null_conversion(name()); } + [[noreturn]] static std::vector<bool> null() { internal::throw_null_conversion(name()); } // NOLINTNEXTLINE (readability-identifier-naming) static void from_string(const char str[], std::vector<bool> &value) diff --git a/src/QueryBuilder.cpp b/src/QueryBuilder.cpp index d360bce0eebbedc13e00e755cc1f51082b4731d7..625eef4aad17f3bacc3bc165dc5c51282dd8d788 100644 --- a/src/QueryBuilder.cpp +++ b/src/QueryBuilder.cpp @@ -34,79 +34,79 @@ namespace pqxx_conn // this is important for the custom types, since the library libpqxx and postgres will // not know how to store them. template<> - auto postgresCast<double>(bool is_array) -> std::string + std::string postgresCast<double>(bool is_array) { return is_array ? "float8[]" : "float8"; } template<> - auto postgresCast<float>(bool is_array) -> std::string + std::string postgresCast<float>(bool is_array) { return is_array ? "float4[]" : "float4"; } template<> - auto postgresCast<string>(bool is_array) -> std::string + std::string postgresCast<string>(bool is_array) { return is_array ? "text[]" : "text"; } template<> - auto postgresCast<bool>(bool is_array) -> std::string + std::string postgresCast<bool>(bool is_array) { return is_array ? "bool[]" : "bool"; } template<> - auto postgresCast<int32_t>(bool is_array) -> std::string + std::string postgresCast<int32_t>(bool is_array) { return is_array ? "int4[]" : "int4"; } template<> - auto postgresCast<uint32_t>(bool is_array) -> std::string + std::string postgresCast<uint32_t>(bool is_array) { return is_array ? "ulong[]" : "ulong"; } template<> - auto postgresCast<int64_t>(bool is_array) -> std::string + std::string postgresCast<int64_t>(bool is_array) { return is_array ? "int8[]" : "int8"; } template<> - auto postgresCast<uint64_t>(bool is_array) -> std::string + std::string postgresCast<uint64_t>(bool is_array) { return is_array ? "ulong64[]" : "ulong64"; } template<> - auto postgresCast<int16_t>(bool is_array) -> std::string + std::string postgresCast<int16_t>(bool is_array) { return is_array ? "int2[]" : "int2"; } template<> - auto postgresCast<uint16_t>(bool is_array) -> std::string + std::string postgresCast<uint16_t>(bool is_array) { return is_array ? "ushort[]" : "ushort"; } template<> - auto postgresCast<uint8_t>(bool is_array) -> std::string + std::string postgresCast<uint8_t>(bool is_array) { return is_array ? "uchar[]" : "uchar"; } template<> - auto postgresCast<vector<uint8_t>>(bool is_array) -> std::string + std::string postgresCast<vector<uint8_t>>(bool is_array) { return is_array ? "bytea[]" : "bytea"; } template<> - auto postgresCast<Tango::DevState>(bool is_array) -> std::string + std::string postgresCast<Tango::DevState>(bool is_array) { return is_array ? "int4[]" : "int4"; } @@ -114,7 +114,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeDataEventName(const AttributeTraits &traits) -> const string & + const string &QueryBuilder::storeDataEventName(const AttributeTraits &traits) { // generic check and emplace for new items return handleCache(_data_event_query_names, traits, StoreDataEvent); @@ -122,7 +122,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeDataEventErrorName(const AttributeTraits &traits) -> const string & + const string &QueryBuilder::storeDataEventErrorName(const AttributeTraits &traits) { // generic check and emplace for new items return handleCache(_data_event_error_query_names, traits, StoreDataEventError); @@ -130,7 +130,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeAttributeStatement() -> const string & + const string &QueryBuilder::storeAttributeStatement() { // clang-format off static string query = @@ -168,7 +168,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeHistoryStringStatement() -> const string & + const string &QueryBuilder::storeHistoryStringStatement() { // clang-format off static string query = @@ -182,7 +182,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeHistoryEventStatement() -> const string & + const string &QueryBuilder::storeHistoryEventStatement() { // clang-format off static string query = @@ -201,7 +201,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeParameterEventStatement() -> const string & + const string &QueryBuilder::storeParameterEventStatement() { // clang-format off static string query = @@ -210,7 +210,6 @@ namespace pqxx_conn schema::ParamColId + "," + schema::ParamColEvTime + "," + schema::ParamColLabel + "," + - schema::ParamColEnumLabels + "," + schema::ParamColUnit + "," + schema::ParamColStandardUnit + "," + schema::ParamColDisplayUnit + "," + @@ -219,76 +218,15 @@ namespace pqxx_conn schema::ParamColArchiveAbsChange + "," + schema::ParamColArchivePeriod + "," + schema::ParamColDescription + ") " + - - "VALUES ($1, TO_TIMESTAMP($2), $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)"; + "VALUES ($1, TO_TIMESTAMP($2), $3, $4, $5, $6, $7, $8, $9, $10, $11)"; // clang-format on - return query; - } - - //============================================================================= - //============================================================================= - auto QueryBuilder::storeParameterEventString(const std::string &full_attr_name, - const std::string &event_time, - const std::string &label, - const std::vector<std::string> &enum_labels, - const std::string &unit, - const std::string &standard_unit, - const std::string &display_unit, - const std::string &format, - const std::string &archive_rel_change, - const std::string &archive_abs_change, - const std::string &archive_period, - const std::string &description) -> const string & - { - // clang-format off - static string query = - "INSERT INTO " + - schema::ParamTableName + " (" + - schema::ParamColId + "," + - schema::ParamColEvTime + "," + - schema::ParamColLabel + "," + - schema::ParamColEnumLabels + "," + - schema::ParamColUnit + "," + - schema::ParamColStandardUnit + "," + - schema::ParamColDisplayUnit + "," + - schema::ParamColFormat + "," + - schema::ParamColArchiveRelChange + "," + - schema::ParamColArchiveAbsChange + "," + - schema::ParamColArchivePeriod + "," + - schema::ParamColDescription + ") " + - "VALUES ('" + full_attr_name + "'"; - - query = query + ",TO_TIMESTAMP(" + event_time + ")"; - - query = query + ",'" + label + "'"; - auto iter = enum_labels.begin(); - string result = "ARRAY["; - - result = result + "$$" + pqxx::to_string((*iter)) + "$$"; - - for (++iter; iter != enum_labels.end(); ++iter) - { - result += ","; - result += "$$" + pqxx::to_string((*iter)) + "$$"; - } - result += "]"; - query = query + "," + result + "::text[]"; - query = query + ",'" + unit + "'"; - query = query + ",'" + standard_unit + "'"; - query = query + ",'" + display_unit + "'"; - query = query + ",'" + format + "'"; - query = query + ",'" + archive_rel_change + "'"; - query = query + ",'" + archive_abs_change + "'"; - query = query + ",'" + archive_period + "'"; - query = query + ",'" + description + "')"; - return query; } //============================================================================= //============================================================================= - auto QueryBuilder::storeDataEventErrorStatement(const AttributeTraits &traits) -> const string & + const string &QueryBuilder::storeDataEventErrorStatement(const AttributeTraits &traits) { // search the cache for a previous entry auto result = _data_event_error_queries.find(traits); @@ -325,7 +263,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeErrorStatement() -> const string & + const string &QueryBuilder::storeErrorStatement() { // clang-format off static string query = @@ -338,7 +276,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::storeTtlStatement() -> const string & + const std::string &QueryBuilder::storeTtlStatement() { // clang-format off static string query = @@ -351,23 +289,23 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::fetchAllValuesStatement( - const string &column_name, const string &table_name, const string &reference) -> const string + const string QueryBuilder::fetchAllValuesStatement( + const string &column_name, const string &table_name, const string &reference) { return "SELECT " + column_name + ", " + reference + " " + "FROM " + table_name; } //============================================================================= //============================================================================= - auto QueryBuilder::fetchValueStatement( - const string &column_name, const string &table_name, const string &reference) -> const string + const string QueryBuilder::fetchValueStatement( + const string &column_name, const string &table_name, const string &reference) { return "SELECT " + column_name + " " + "FROM " + table_name + " WHERE " + reference + "=$1"; } //============================================================================= //============================================================================= - auto QueryBuilder::fetchLastHistoryEventStatement() -> const string & + const string &QueryBuilder::fetchLastHistoryEventStatement() { // clang-format off static string query = @@ -385,7 +323,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::fetchAttributeTraitsStatement() -> const string & + const std::string &QueryBuilder::fetchAttributeTraitsStatement() { // clang-format off static string query = @@ -415,7 +353,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::tableName(const AttributeTraits &traits) -> string + string QueryBuilder::tableName(const AttributeTraits &traits) { return schema::SchemaTablePrefix + [&traits]() { @@ -453,8 +391,8 @@ namespace pqxx_conn //============================================================================= //============================================================================= - auto QueryBuilder::handleCache( - map<AttributeTraits, string> &cache, const AttributeTraits &traits, const string &stub) -> const string & + const string &QueryBuilder::handleCache( + map<AttributeTraits, string> &cache, const AttributeTraits &traits, const string &stub) { auto result = cache.find(traits); diff --git a/src/QueryBuilder.hpp b/src/QueryBuilder.hpp index e7bbe0792cc81c43f0e7f4d50a15abc5430dd268..0dc52f9543a55347826a70a767fcd79068207844 100644 --- a/src/QueryBuilder.hpp +++ b/src/QueryBuilder.hpp @@ -37,7 +37,7 @@ namespace std template<> struct less<hdbpp_internal::AttributeTraits> { - auto operator()(const hdbpp_internal::AttributeTraits &lhs, const hdbpp_internal::AttributeTraits &rhs) -> bool + bool operator()(const hdbpp_internal::AttributeTraits &lhs, const hdbpp_internal::AttributeTraits &rhs) const { auto a = lhs.type(); auto b = lhs.writeType(); @@ -59,7 +59,7 @@ namespace pqxx_conn // This function generates the postgres cast for the event data insert // queries, it is specialized for all possible tango types template<typename T> - auto postgresCast(bool is_array) -> std::string; + std::string postgresCast(bool is_array); // Convert the given data into a string suitable for storing in the database. These calls // are used to build the string version of the insert command, they are required since we @@ -68,9 +68,9 @@ namespace pqxx_conn template<typename T> struct DataToString { - static auto run(std::unique_ptr<std::vector<T>> &value, bool is_array) -> std::string + static std::string run(std::unique_ptr<std::vector<T>> &value, const AttributeTraits &traits) { - if (! is_array) + if (traits.isScalar()) return pqxx::to_string((*value)[0]); return "'" + pqxx::to_string(value) + "'"; @@ -81,13 +81,13 @@ namespace pqxx_conn template<> struct DataToString<bool> { - static auto run(std::unique_ptr<std::vector<bool>> &value, bool is_array) -> std::string + static std::string run(std::unique_ptr<std::vector<bool>> &value, const AttributeTraits &traits) { // a vector<bool> is not actually a vector<bool>, rather its some kind of bitfield. When // trying to return an element, we appear to get some kind of bitfield reference, // so we return the value to a local variable to remove the reference to the bitfield and // this ensure its actually a bool passed into the conversion framework - if (!is_array) + if (traits.isScalar()) { bool v = (*value)[0]; return pqxx::to_string(v); @@ -103,12 +103,12 @@ namespace pqxx_conn template<> struct DataToString<std::string> { - static auto run(std::unique_ptr<std::vector<std::string>> &value, bool is_array) -> std::string + static std::string run(std::unique_ptr<std::vector<std::string>> &value, const AttributeTraits &traits) { // arrays of strings need both the ARRAY keywords and dollar escaping, this is so we // do not have to rely on the postgres escape functions that double and then store // escaped characters. This is a mess when extracting the array of strings. - if (!is_array) + if (traits.isScalar()) { // use dollars to ensure it saves return "$$" + pqxx::to_string((*value)[0]) + "$$"; @@ -156,69 +156,56 @@ namespace pqxx_conn // these builder functions require no caching, so can be simple static // functions - static auto tableName(const AttributeTraits &traits) -> std::string; - static auto storeAttributeStatement() -> const std::string &; - static auto storeHistoryEventStatement() -> const std::string &; - static auto storeHistoryStringStatement() -> const std::string &; - static auto storeErrorStatement() -> const std::string &; - static auto storeTtlStatement() -> const std::string &; - static auto fetchLastHistoryEventStatement() -> const std::string &; - static auto fetchAttributeTraitsStatement() -> const std::string &; - - static auto storeParameterEventStatement() -> const std::string &; - static auto storeParameterEventString(const std::string &full_attr_name, - const std::string &event_time, - const std::string &label, - const std::vector<std::string> &enum_labels, - const std::string &unit, - const std::string &standard_unit, - const std::string &display_unit, - const std::string &format, - const std::string &archive_rel_change, - const std::string &archive_abs_change, - const std::string &archive_period, - const std::string &description - ) -> const std::string &; - static auto fetchValueStatement( - const std::string &column_name, const std::string &table_name, const std::string &reference) -> const std::string; - - static auto fetchAllValuesStatement( - const std::string &column_name, const std::string &table_name, const std::string &reference) -> const std::string; + static std::string tableName(const AttributeTraits &traits); + static const std::string &storeAttributeStatement(); + static const std::string &storeHistoryEventStatement(); + static const std::string &storeHistoryStringStatement(); + static const std::string &storeParameterEventStatement(); + static const std::string &storeErrorStatement(); + static const std::string &storeTtlStatement(); + static const std::string &fetchLastHistoryEventStatement(); + static const std::string &fetchAttributeTraitsStatement(); + + static const std::string fetchValueStatement( + const std::string &column_name, const std::string &table_name, const std::string &reference); + + static const std::string fetchAllValuesStatement( + const std::string &column_name, const std::string &table_name, const std::string &reference); // Non-static prepared statements // these builder functions cache the built queries, therefore they // are not static like the others sincethey require data storage - auto storeDataEventName(const AttributeTraits &traits) -> const std::string &; - auto storeDataEventErrorName(const AttributeTraits &traits) -> const std::string &; + const std::string &storeDataEventName(const AttributeTraits &traits); + const std::string &storeDataEventErrorName(const AttributeTraits &traits); // Builds a prepared statement for the given traits, the statement is cached // internally to improve execution time template<typename T> - auto storeDataEventStatement(const AttributeTraits &traits) -> const std::string &; + const std::string &storeDataEventStatement(const AttributeTraits &traits); // A variant of storeDataEventStatement that builds a string based on the // parameters, this is then passed back to the caller to be executed. No // internal caching, so its less efficient, but can be chained in a pipe // to batch data to the database. template<typename T> - auto storeDataEventString(const std::string &full_attr_name, + const std::string storeDataEventString(const std::string &full_attr_name, const std::string &event_time, const std::string &quality, std::unique_ptr<vector<T>> &value_r, std::unique_ptr<vector<T>> &value_w, - const AttributeTraits &traits) -> const std::string; + const AttributeTraits &traits); // Builds a prepared statement for data event errors - auto storeDataEventErrorStatement(const AttributeTraits &traits) -> const std::string &; + const std::string &storeDataEventErrorStatement(const AttributeTraits &traits); // Utility void print(std::ostream &os) const noexcept; private: // generic function to handle caching items into the cache maps - auto handleCache( - std::map<AttributeTraits, std::string> &cache, const AttributeTraits &traits, const std::string &stub) -> const std::string &; + const string &handleCache( + std::map<AttributeTraits, std::string> &cache, const AttributeTraits &traits, const std::string &stub); // cached query names, these are built from the traits object std::map<AttributeTraits, std::string> _data_event_query_names; @@ -232,7 +219,7 @@ namespace pqxx_conn //============================================================================= //============================================================================= template<typename T> - auto QueryBuilder::storeDataEventStatement(const AttributeTraits &traits) -> const std::string & + const string &QueryBuilder::storeDataEventStatement(const AttributeTraits &traits) { // search the cache for a previous entry auto result = _data_event_queries.find(traits); @@ -283,12 +270,12 @@ namespace pqxx_conn } template<typename T> - auto QueryBuilder::storeDataEventString(const std::string &full_attr_name, + const std::string QueryBuilder::storeDataEventString(const std::string &full_attr_name, const std::string &event_time, const std::string &quality, std::unique_ptr<vector<T>> &value_r, std::unique_ptr<vector<T>> &value_w, - const AttributeTraits &traits) -> const std::string + const AttributeTraits &traits) { auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + schema::DatColId + "," + schema::DatColDataTime; @@ -312,7 +299,7 @@ namespace pqxx_conn } else { - query = query + "," + query_utils::DataToString<T>::run(value_r, traits.isArray()) + + query = query + "," + query_utils::DataToString<T>::run(value_r, traits) + "::" + query_utils::postgresCast<T>(traits.isArray()); } } @@ -326,7 +313,7 @@ namespace pqxx_conn } else { - query = query + "," + query_utils::DataToString<T>::run(value_w, traits.isArray()) + + query = query + "," + query_utils::DataToString<T>::run(value_w, traits) + "::" + query_utils::postgresCast<T>(traits.isArray()); } } diff --git a/src/TimescaleSchema.hpp b/src/TimescaleSchema.hpp index 472162c710b4b449caba2aefae4c502d0f19e58e..6a8e48b20ca22c15540b2da51aa83cc2cbbde3c2 100644 --- a/src/TimescaleSchema.hpp +++ b/src/TimescaleSchema.hpp @@ -102,7 +102,6 @@ namespace pqxx_conn const std::string ParamColInsTime = "insert_time"; const std::string ParamColEvTime = "recv_time"; const std::string ParamColLabel = "label"; - const std::string ParamColEnumLabels = "enum_labels"; const std::string ParamColUnit = "unit"; const std::string ParamColStandardUnit = "standard_unit"; const std::string ParamColDisplayUnit = "display_unit"; diff --git a/test/DbConnectionTests.cpp b/test/DbConnectionTests.cpp index 84267ab67c5235148d18107d4e75c9ae91fbefd3..95ad208a851f6c03af337bfca499e4c35298190d 100644 --- a/test/DbConnectionTests.cpp +++ b/test/DbConnectionTests.cpp @@ -26,7 +26,6 @@ #include "catch2/catch.hpp" #include <cfloat> -#include <locale> #include <pqxx/pqxx> #include <string> #include <tuple> @@ -478,69 +477,6 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, SUCCEED("Passed"); } -TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, - "Storing Attributes in the database in uppercase", - "[db-access][hdbpp-db-access][db-connection]") -{ - DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement); - AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_STRING}; - - REQUIRE_NOTHROW(clearTables()); - - auto param_to_upper = [](auto param) { - locale loc; - string tmp; - - for (string::size_type i = 0; i < param.length(); ++i) - tmp += toupper(param[i], loc); - - return tmp; - }; - - testConn().storeAttribute(param_to_upper(attr_name::TestAttrFinalName), - param_to_upper(attr_name::TestAttrCs), - param_to_upper(attr_name::TestAttrDomain), - param_to_upper(attr_name::TestAttrFamily), - param_to_upper(attr_name::TestAttrMember), - param_to_upper(attr_name::TestAttrName), - 100, - traits); - - { - pqxx::work tx {verifyConn()}; - auto attr_row(tx.exec1("SELECT * FROM " + schema::ConfTableName)); - - auto type_row(tx.exec1("SELECT " + schema::ConfTypeColTypeId + " FROM " + schema::ConfTypeTableName + - " WHERE " + schema::ConfTypeColTypeNum + " = " + std::to_string(traits.type()))); - - auto format_row(tx.exec1("SELECT " + schema::ConfFormatColFormatId + " FROM " + schema::ConfFormatTableName + - " WHERE " + schema::ConfFormatColFormatNum + " = " + std::to_string(traits.formatType()))); - - auto access_row(tx.exec1("SELECT " + schema::ConfWriteColWriteId + " FROM " + schema::ConfWriteTableName + - " WHERE " + schema::ConfWriteColWriteNum + " = " + std::to_string(traits.writeType()))); - - tx.commit(); - - REQUIRE(attr_row.at(schema::ConfColName).as<string>() == param_to_upper(attr_name::TestAttrFQDName)); - REQUIRE(attr_row.at(schema::ConfColCsName).as<string>() == param_to_upper(attr_name::TestAttrCs)); - REQUIRE(attr_row.at(schema::ConfColDomain).as<string>() == param_to_upper(attr_name::TestAttrDomain)); - REQUIRE(attr_row.at(schema::ConfColFamily).as<string>() == param_to_upper(attr_name::TestAttrFamily)); - REQUIRE(attr_row.at(schema::ConfColMember).as<string>() == param_to_upper(attr_name::TestAttrMember)); - REQUIRE(attr_row.at(schema::ConfColLastName).as<string>() == param_to_upper(attr_name::TestAttrName)); - REQUIRE(attr_row.at(schema::ConfColTableName).as<string>() == QueryBuilder().tableName(traits)); - - REQUIRE(attr_row.at(schema::ConfColTypeId).as<int>() == type_row.at(schema::ConfTypeColTypeId).as<int>()); - - REQUIRE(attr_row.at(schema::ConfColFormatTypeId).as<int>() == - format_row.at(schema::ConfFormatColFormatId).as<int>()); - - REQUIRE( - attr_row.at(schema::ConfColWriteTypeId).as<int>() == access_row.at(schema::ConfWriteColWriteId).as<int>()); - } - - SUCCEED("Passed"); -} - TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, "Storing a series of the same History Events in the database successfully", "[db-access][hdbpp-db-access][db-connection]") @@ -657,7 +593,6 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, REQUIRE_NOTHROW(testConn().storeParameterEvent(attr_name::TestAttrFinalName, event_time, attr_info::AttrInfoLabel, - attr_info::AttrInfoEnumLabels, attr_info::AttrInfoUnit, attr_info::AttrInfoStandardUnit, attr_info::AttrInfoDisplayUnit, @@ -676,8 +611,6 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, // TODO check event time //REQUIRE(param_row.at(schema::ParamColEvTime).as<double>() == event_time); REQUIRE(param_row.at(schema::ParamColLabel).as<string>() == attr_info::AttrInfoLabel); - // TODO check enum labels - REQUIRE(param_row.at(schema::ParamColEnumLabels).as<vector<string>>() == attr_info::AttrInfoEnumLabels); REQUIRE(param_row.at(schema::ParamColUnit).as<string>() == attr_info::AttrInfoUnit); REQUIRE(param_row.at(schema::ParamColStandardUnit).as<string>() == attr_info::AttrInfoStandardUnit); REQUIRE(param_row.at(schema::ParamColDisplayUnit).as<string>() == attr_info::AttrInfoDisplayUnit); @@ -694,7 +627,6 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, REQUIRE_NOTHROW(testConn().storeParameterEvent(attr_name::TestAttrFinalName, event_time, attr_info::AttrInfoLabel, - attr_info::AttrInfoEnumLabels, attr_info::AttrInfoUnit, attr_info::AttrInfoStandardUnit, attr_info::AttrInfoDisplayUnit, @@ -730,7 +662,6 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, REQUIRE_THROWS_AS(conn.storeParameterEvent(attr_name::TestAttrFinalName, event_time, attr_info::AttrInfoLabel, - attr_info::AttrInfoEnumLabels, attr_info::AttrInfoUnit, attr_info::AttrInfoStandardUnit, attr_info::AttrInfoDisplayUnit, @@ -1285,4 +1216,4 @@ TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture, REQUIRE_NOTHROW(clearTables()); REQUIRE_THROWS(testConn().storeAttributeTtl(attr_name::TestAttrFQDName, 100)); SUCCEED("Passed"); -} +} \ No newline at end of file diff --git a/test/HdbppTxDataEventTests.cpp b/test/HdbppTxDataEventTests.cpp index 6531309a0d68977f3bc21f85e0e512971561e680..4d06bce9128c1952c1e75064efebb8d77489286f 100644 --- a/test/HdbppTxDataEventTests.cpp +++ b/test/HdbppTxDataEventTests.cpp @@ -85,9 +85,8 @@ Tango::DeviceAttribute createDeviceAttribute(const AttributeTraits &traits) return Tango::DeviceAttribute( TestAttrFQDName.c_str(), *generateSpectrumData<Tango::DEV_STATE>(false, size_x + size_y)); - case Tango::DEV_ENUM: - return Tango::DeviceAttribute( - TestAttrFQDName.c_str(), *generateSpectrumData<Tango::DEV_ENUM>(false, size_x + size_y)); + //case Tango::DEV_ENUM: + //return Tango::DeviceAttribute(TestAttrFQDName.c_str(), *generateSpectrumData<Tango::DEV_ENUM>(false, size_x + size_y)); //case Tango::DEV_ENCODED: //return Tango::DeviceAttribute(TestAttrFQDName.c_str(), *generateSpectrumData<Tango::DEV_ENCODED>(false, size_x + size_y)); @@ -440,8 +439,8 @@ TEST_CASE("Creating HdbppTxDataEvents for each tango type and storing them", "[d Tango::DEV_USHORT, Tango::DEV_UCHAR, Tango::DEV_STATE, - /* Tango::DEV_ENCODED,*/ - Tango::DEV_ENUM}; + /* Tango::DEV_ENCODED, + Tango::DEV_ENUM */}; vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE}; vector<Tango::AttrDataFormat> format_types {Tango::SCALAR, Tango::SPECTRUM}; diff --git a/test/HdbppTxParameterEventTests.cpp b/test/HdbppTxParameterEventTests.cpp index 1d42215e7c2606514fbc21387b15b1d6452d3d24..90e9eca28000f3dfa412e9b0f4af7d05641f6ac2 100644 --- a/test/HdbppTxParameterEventTests.cpp +++ b/test/HdbppTxParameterEventTests.cpp @@ -44,7 +44,6 @@ AttributeInfoEx createAttributeInfoEx() AttributeInfoEx attr_info; attr_info.description = AttrInfoDescription; attr_info.label = AttrInfoLabel; - attr_info.enum_labels = AttrInfoEnumLabels; attr_info.unit = AttrInfoUnit; attr_info.standard_unit = AttrInfoStandardUnit; attr_info.display_unit = AttrInfoDisplayUnit; @@ -69,7 +68,6 @@ public: void storeParameterEvent(const string &full_attr_name, double event_time, const string &label, - const vector<string> &enum_labels, const string &unit, const string &standard_unit, const string &display_unit, @@ -85,7 +83,6 @@ public: att_name = full_attr_name; att_event_time = event_time; att_label = label; - att_enum_labels = enum_labels; att_unit = unit; att_standard_unit = standard_unit; att_display_unit = display_unit; @@ -101,7 +98,6 @@ public: string att_name; double att_event_time = 0; string att_label; - vector<string> att_enum_labels; string att_unit; string att_standard_unit; string att_display_unit; @@ -155,7 +151,6 @@ SCENARIO("Construct and store HdbppTxParameterEvent data without error", "[hdbpp REQUIRE(conn.att_name == TestAttrFinalName); REQUIRE(conn.att_event_time == (tango_tv.tv_sec + tango_tv.tv_usec / 1.0e6)); REQUIRE(conn.att_label == AttrInfoLabel); - REQUIRE(conn.att_enum_labels == AttrInfoEnumLabels); REQUIRE(conn.att_unit == AttrInfoUnit); REQUIRE(conn.att_standard_unit == AttrInfoStandardUnit); REQUIRE(conn.att_display_unit == AttrInfoDisplayUnit); @@ -275,4 +270,4 @@ SCENARIO("HdbppTxParameterEvent Simulated exception received", "[hdbpp-tx][hdbpp THEN("An exception is raised") { REQUIRE_THROWS_AS(tx.store(), runtime_error); } } } -} +} \ No newline at end of file diff --git a/test/QueryBuilderTests.cpp b/test/QueryBuilderTests.cpp index f7bf0bbbc9f96aa93d9cb39f3928b269ad605ebf..3daaf18169959df25440bb2fa93e64d507c985ef 100644 --- a/test/QueryBuilderTests.cpp +++ b/test/QueryBuilderTests.cpp @@ -49,7 +49,7 @@ SCENARIO("storeDataEventString() returns the correct Value fields for the given { REQUIRE_THAT(result, Contains(schema::DatColValueR)); REQUIRE_THAT(result, !Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits))); } } WHEN("Requesting a query string for traits configured for Tango::WRITE") @@ -63,7 +63,7 @@ SCENARIO("storeDataEventString() returns the correct Value fields for the given { REQUIRE_THAT(result, !Contains(schema::DatColValueR)); REQUIRE_THAT(result, Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits))); } } WHEN("Requesting a query string for traits configured for Tango::READ_WRITE") @@ -77,8 +77,8 @@ SCENARIO("storeDataEventString() returns the correct Value fields for the given { REQUIRE_THAT(result, Contains(schema::DatColValueR)); REQUIRE_THAT(result, Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits.isArray()))); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits))); } } WHEN("Requesting a query string for traits configured for Tango::READ_WITH_WRITE") @@ -92,8 +92,8 @@ SCENARIO("storeDataEventString() returns the correct Value fields for the given { REQUIRE_THAT(result, Contains(schema::DatColValueR)); REQUIRE_THAT(result, Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits.isArray()))); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits))); } } } @@ -120,7 +120,7 @@ SCENARIO("storeDataEventString() adds a null when value is size zero", "[query-s { REQUIRE_THAT(result, Contains(schema::DatColValueR)); REQUIRE_THAT(result, Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits))); REQUIRE_THAT(result, Contains("NULL")); } } @@ -135,7 +135,7 @@ SCENARIO("storeDataEventString() adds a null when value is size zero", "[query-s { REQUIRE_THAT(result, Contains(schema::DatColValueR)); REQUIRE_THAT(result, Contains(schema::DatColValueW)); - REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits.isArray()))); + REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits))); REQUIRE_THAT(result, Contains("NULL")); } } diff --git a/test/TestHelpers.cpp b/test/TestHelpers.cpp index 2a26f0a7c23607979c9b331a783291157d6f1adb..7450bf531916b8689b2ef2ab404d035f410a67e0 100644 --- a/test/TestHelpers.cpp +++ b/test/TestHelpers.cpp @@ -234,13 +234,5 @@ namespace data_gen return move(value); } - - //============================================================================= - //============================================================================= - template<> - typename TangoTypeTraits<Tango::DEV_ENUM>::array data<Tango::DEV_ENUM>(int size) - { - return move(genericData<int16_t>(size)); - } } // namespace data_gen -} // namespace hdbpp_test +} // namespace hdbpp_test \ No newline at end of file diff --git a/test/TestHelpers.hpp b/test/TestHelpers.hpp index 295a786aee2d8c00a2b053a47823f72db2b8d716..3000fceb95a3d98fb6003cea9dd4027d02a19cce 100644 --- a/test/TestHelpers.hpp +++ b/test/TestHelpers.hpp @@ -72,7 +72,6 @@ namespace attr_info "Description about attribute, its \"quoted\", and 'quoted', yet does it work?"; const std::string AttrInfoLabel = "Label"; - const std::vector<std::string> AttrInfoEnumLabels = {"label1", "label2"}; const std::string AttrInfoUnit = "Unit %"; const std::string AttrInfoStandardUnit = "Standard Unit"; const std::string AttrInfoDisplayUnit = "Display Unit $"; @@ -171,14 +170,6 @@ namespace data_gen using type = Tango::DevState; using array = std::unique_ptr<std::vector<Tango::DevState>>; }; - - template<> - struct TangoTypeTraits<Tango::DEV_ENUM> - { - using type = int16_t; - using array = std::unique_ptr<std::vector<int16_t>>; - }; - template<Tango::CmdArgType Type> typename TangoTypeTraits<Type>::array data(int size); @@ -218,9 +209,6 @@ namespace data_gen template<> typename TangoTypeTraits<Tango::DEV_STATE>::array data<Tango::DEV_STATE>(int size); - - template<> - typename TangoTypeTraits<Tango::DEV_ENUM>::array data<Tango::DEV_ENUM>(int size); template<typename T> std::unique_ptr<std::vector<T>> genericData(int size) diff --git a/thirdparty/libhdbpp/.gitignore b/thirdparty/libhdbpp/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d11e5f0f9014da1c6ea125b2a0e1c88263c06bc8 --- /dev/null +++ b/thirdparty/libhdbpp/.gitignore @@ -0,0 +1,39 @@ +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app +bin/ +lib/ + +.nse_depinfo + +# Eclipse +.cproject +.project +.settings + diff --git a/thirdparty/libhdbpp/CHANGELOG.md b/thirdparty/libhdbpp/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..508a544a0c889cc5805875ff0806558684746baf --- /dev/null +++ b/thirdparty/libhdbpp/CHANGELOG.md @@ -0,0 +1,29 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.0.0] - 2017-09-13 + +### Added + +* CHANGELOG.md file. +* INSTALL.md file. +* LICENCE file. +* License headers in code. +* Debian Package build files under debian/ +* CMake build and configuration files. + +### Changed + +* Moved build system from Make to CMake. +* README.md - Added lots of new information. +* Source file headers changed for correct licence. + +### Removed + +* Makefile build system. diff --git a/thirdparty/libhdbpp/CMakeLists.txt b/thirdparty/libhdbpp/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..9de091843b041b9f97756e025f3ac43bca6d8135 --- /dev/null +++ b/thirdparty/libhdbpp/CMakeLists.txt @@ -0,0 +1,115 @@ +# Stop messy in source builds +set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) +set(CMAKE_DISABLE_SOURCE_CHANGES ON) + +if ( ${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR} ) + message( FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt." ) +endif() + +# Start Build Config ----------------------------------- +cmake_minimum_required(VERSION 3.0) +set(CMAKE_VERBOSE_MAKEFILE ON) +set(CMAKE_COLOR_MAKEFILE ON) + +project(libhdbpp) + +# We only support Release for now +#set(CMAKE_BUILD_TYPE "Release") + +include(cmake/ReleaseVersion.cmake) + +# arch install definitions +include(GNUInstallDirs) + +# Create a list of the source files for this build to be given to +# the target later +set(SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/src/LibHdb++.cpp) + +# Set the output directory to lib to stay consistent with the old build +set(OUTPUT_DIR "lib") +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/${OUTPUT_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_SOURCE_DIR}/${OUTPUT_DIR}) + +# set up a configuration file to pass variables into the build +configure_file( + "${PROJECT_SOURCE_DIR}/cmake/LibHdb++Config.h.in" + "${PROJECT_BINARY_DIR}/LibHdb++Config.h") + +message(STATUS "Searching for libraries...") + +# Variable to contain a list of all the libs we depend on +set(HDBPP_LIBRARIES dl) + +# allow pkg-config to search the CMAKE_PREFIX_PATH +set(PKG_CONFIG_USE_CMAKE_PREFIX_PATH ON) +list(APPEND CMAKE_PREFIX_PATH "/usr") + +# Find Dependencies --------------------- +include(cmake/FindLibraries.cmake) + +# Find any libraries the user requested +if(HDBPP_LIBRARIES) + find_libraries(LIBRARIES ${HDBPP_LIBRARIES}) + set(HDBPP_FOUND_LIBRARIES ${FOUND_LIBRARIES}) +endif(HDBPP_LIBRARIES) + +# First find tango if it has not already been found. Returns an interface library +# called TangoInterfaceLibrary +set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${CMAKE_CURRENT_SOURCE_DIR}/cmake") +find_package(Tango) + +# Build Targets ----------------------------------- + +# Libhdbpp header library -------- +add_library(libhdbpp_headers INTERFACE) + +target_include_directories(libhdbpp_headers + INTERFACE ${PROJECT_SOURCE_DIR}/include) + +# Libhdbpp shared library -------- +add_library(libhdbpp_shared_library SHARED ${SRC_FILES}) + +target_link_libraries(libhdbpp_shared_library + PUBLIC ${HDBPP_FOUND_LIBRARIES} + PRIVATE TangoInterfaceLibrary) + +target_include_directories(libhdbpp_shared_library + PUBLIC + $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> + $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> + PRIVATE + "${PROJECT_BINARY_DIR}") + +set_target_properties(libhdbpp_shared_library + PROPERTIES + OUTPUT_NAME hdb++ + LINK_FLAGS "-Wl,--no-undefined" + POSITION_INDEPENDENT_CODE 1 + VERSION ${LIBRARY_VERSION_STRING} + SOVERSION ${LIBRARY_VERSION_MAJOR}) + +# Libhdbpp shared library -------- +add_library(libhdbpp_static_library STATIC ${SRC_FILES}) + +target_link_libraries(libhdbpp_static_library + PUBLIC ${HDBPP_FOUND_LIBRARIES} + PRIVATE TangoInterfaceLibrary) + +set_target_properties(libhdbpp_static_library + PROPERTIES + OUTPUT_NAME hdb++ + LINK_FLAGS "-Wl,--no-undefined" + EXCLUDE_FROM_ALL 1) + +# Install Config ----------------------------------- +install(TARGETS libhdbpp_shared_library + EXPORT libhdbpp-export + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +install(EXPORT libhdbpp-export + FILE LibhdbppTargets.cmake + NAMESPACE LIBHDBPP:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Libhdbpp) + +install(DIRECTORY include/libhdb++ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) diff --git a/thirdparty/libhdbpp/INSTALL.md b/thirdparty/libhdbpp/INSTALL.md new file mode 100644 index 0000000000000000000000000000000000000000..d713a54c838a07e1e6aec343e5351ccd4ea53ced --- /dev/null +++ b/thirdparty/libhdbpp/INSTALL.md @@ -0,0 +1,100 @@ +# Building and Installation + +## Dependencies + +Ensure the development version of the dependencies are installed. These are as follows: + +* Tango Controls 9 or higher. +* omniORB release 4 - libomniorb4 and libomnithread. +* libzmq - libzmq3-dev or libzmq5-dev. + +If they have not been installed in a standard location, then use standard CMake flags below to inform the build where to search for them. + +The library can be built with Tango Controls install via debian package or source distribution. Supported Tango Controls release is currently 9.2.5a. Ensure build flags are used if tango is installed to a custom location. + +Toolchain dependencies: + +* CMake 3.0.0 or greater is required to perform the build. + +## Standard flags + +The build system is CMake therefore standard CMake flags can be used to influence the build and installation process. Several custom flags are defined to build the correct library. They are: + +| Flag | Default | Use | +|------|---------|-----| +| HDBPP_BUILD_SHARED | ON | Build the shared library. This will also be installed if make install is run. | +| HDBPP_BUILD_STATIC | OFF | Build the static library. This will also be installed if make install is run. | +| HDBPP_DEV_INSTALL | OFF | Install development files and libraries | + +The following is a list of common useful CMake flags and their use: + +| Flag | Use | +|------|-----| +| CMAKE_INSTALL_PREFIX | Standard CMake flag to modify the install prefix. | +| CMAKE_INCLUDE_PATH | Standard CMake flag to add include paths to the search path. | +| CMAKE_LIBRARY_PATH | Standard CMake flag to add paths to the library search path | + +Using the above CMake flags above it´s possible to use tango and other libraries from non-standard locations. Just add all paths to the correct flag. + +### Passing CMake Lists + +Note: to pass multiple paths (i.e. a string list to cmake), either an escaped semi colon must be used, or the list must be enclosed in quotes. Examples: + +* `-DCMAKE_INCLUDE_PATH=/here/there\;/some/where/else` +* `-DCMAKE_INCLUDE_PATH="/here/there;/some/where/else"` +* `-DCMAKE_INCLUDE_PATH='/here/tehre;/some/where/else'` + +## Building + +### Building Against Tango Controls 9.2.5a + +**The debian package and source install place the headers under /usr/include/tango, yet the code includes tango via `#include <tango.h>` (to be compatible with Tango Controls 10 when it is released), so it´s likely you will need to pass at least one path via CMAKE_INCLUDE_PATH. In this case, set CMAKE_INCLUDE_PATH=/usr/include/tango or CMAKE_INCLUDE_PATH=/usr/local/include/tango, depending on your install method. Example:** + +```bash +cmake -DCMAKE_INCLUDE_PATH=/usr/include/tango .. +``` + +### Example Build Sequence + +First clone the repository: + +```bash +git clone http://github.com/tango-controls-hdbpp/libhdbpp.git +``` + +An out of source build is required by the CMakeLists file, so create a build directory to run CMake from: + +```bash +mkdir libhdbpp/build +cd libhdbpp/build +``` + +Then configure with cmake: + +```bash +cmake .. +``` + +Or for something with non-standard dependencies: + +```bash +cmake \ + -DCMAKE_INSTALL_PREFIX=/my/custom/location \ + -DCMAKE_INCLUDE_PATH=/path/to/custom/include \ + -DCMAKE_LIBRARY_PATH=/path/to/custom/library \ + .. +``` + +Then build: + +```bash +make +``` + +## Installation + +After the build has completed, simply run: + +``` +make install +``` \ No newline at end of file diff --git a/thirdparty/libhdbpp/LICENSE b/thirdparty/libhdbpp/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..816c0409ee772027e12c5b15dfb97fa662c43f62 --- /dev/null +++ b/thirdparty/libhdbpp/LICENSE @@ -0,0 +1,166 @@ + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. \ No newline at end of file diff --git a/thirdparty/libhdbpp/README.md b/thirdparty/libhdbpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e7df720ed9ae5658ef6f2737d9d434d0eaaeb0aa --- /dev/null +++ b/thirdparty/libhdbpp/README.md @@ -0,0 +1,41 @@ +# Libhdbpp + +[](http://www.tango-controls.org) [](https://www.gnu.org/licenses/lgpl-3.0) [](https://github.com/tango-controls-hdbpp/libhdbpp/releases) [](https://bintray.com/tango-controls/debian/libhdb%2B%2B6/_latestVersion) + +Interface library for the HDB++ archiving system. Libhdbpp provides an abstract means to archive data events to a Cassandra database, MySQL database, or potentially other database backends. + +## Version + +The current release version is 1.0.0 + +### **Important Changes** 0.9.1 -> 1.0.0 + +* The build system has been moved to CMake. Details on building it using CMake are below. + +## Documentation + +* See the Tango documentation [here](http://tango-controls.readthedocs.io/en/latest/administration/services/hdbpp/index.html#hdb-an-archiving-historian-service) for broader information about the HB++ archiving system and its integration into Tango Controls +* libhdbpp [CHANGELOG.md](https://github.com/tango-controls-hdbpp/libhdbpp/blob/master/CHANGELOG.md) contains the latest changes both released and in development. + +## Bugs Reports + +Pleae file bug reports above in the issues section. + +## Building and Installation + +In its simplest form, clone the repository and assuming a standard install for all dependencies: + +``` +cd libhdbpp +mkdir build +cd build +cmake ../ +make +make install +``` + +See the [INSTALL.md](https://github.com/tango-controls-hdbpp/libhdbpp/blob/master/INSTALL.md) file for more detailed instructions on how to build and install libhdbpp. + +## License + +The source code is released under the LGPL3 license and a copy of this license is provided with the code. diff --git a/thirdparty/libhdbpp/cmake/FindLibraries.cmake b/thirdparty/libhdbpp/cmake/FindLibraries.cmake new file mode 100644 index 0000000000000000000000000000000000000000..4db63a57cbba49278997b1283a674d2361babfb8 --- /dev/null +++ b/thirdparty/libhdbpp/cmake/FindLibraries.cmake @@ -0,0 +1,32 @@ +include(CMakeParseArguments) + +function(find_libraries) + + # Parse the parameters + set(MULTIVALUEARGS LIBRARIES SEARCH_PATHS) + cmake_parse_arguments(FIND_LIBRARIES "" "" "${MULTIVALUEARGS}" ${ARGN}) + + # Clear the found libraries + unset(FOUND_LIBRARIES PARENT_SCOPE) + + foreach(LIB ${FIND_LIBRARIES_LIBRARIES}) + + # try the user provided paths first + find_library(FOUND_LIB_${LIB} ${LIB} PATHS ${FIND_LIBRARIES_SEARCH_PATHS} NO_DEFAULT_PATH) + + # if we could not find it, drop to the system paths + if(NOT FOUND_LIB_${LIB}) + find_library(FOUND_LIB_${LIB} ${LIB}) + endif(NOT FOUND_LIB_${LIB}) + + if(FOUND_LIB_${LIB}) + message(STATUS "Found " ${LIB} " at: " ${FOUND_LIB_${LIB}}) + list(APPEND FOUND_LIBRARIES ${FOUND_LIB_${LIB}}) + else() + message(FATAL "Could not find " ${LIB}) + endif(FOUND_LIB_${LIB}) + + endforeach(LIB ${LIBRARIES}) + + set(FOUND_LIBRARIES ${FOUND_LIBRARIES} PARENT_SCOPE) +endfunction(find_libraries) \ No newline at end of file diff --git a/thirdparty/libhdbpp/cmake/FindTango.cmake b/thirdparty/libhdbpp/cmake/FindTango.cmake new file mode 100644 index 0000000000000000000000000000000000000000..857d5c6e7012bc62fcf80db89d0cbaa7926054f7 --- /dev/null +++ b/thirdparty/libhdbpp/cmake/FindTango.cmake @@ -0,0 +1,26 @@ +if(NOT TARGET TangoInterfaceLibrary) + + # Ensure pkg-config is installed + find_package(PkgConfig REQUIRED) + + # Now search for the tango.pc file, this is a required dependency + message(STATUS "Search for TANGO package config...") + pkg_search_module(TANGO REQUIRED tango>=9.2.5) + message(STATUS "Found tango version ${TANGO_VERSION} at ${TANGO_PREFIX}") + + include(FindLibraries) + find_libraries(LIBRARIES ${TANGO_LIBRARIES} SEARCH_PATHS ${TANGO_LIBRARY_DIRS}) + + # Create an interface library to represent the tango linkage + add_library(TangoInterfaceLibrary INTERFACE) + #target_include_directories(TangoInterfaceLibrary INTERFACE $<BUILD_INTERFACE:${TANGO_INCLUDE_DIRS}>) + #target_link_libraries(TangoInterfaceLibrary INTERFACE $<BUILD_INTERFACE:${FOUND_LIBRARIES}>) + #target_compile_options(TangoInterfaceLibrary INTERFACE $<BUILD_INTERFACE:${TANGO_CFLAGS}>) + set_target_properties(TangoInterfaceLibrary + PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${TANGO_INCLUDE_DIRS}" + INTERFACE_LINK_LIBRARIES "${FOUND_LIBRARIES}" + INTERFACE_COMPILE_OPTIONS "${TANGO_CFLAGS}") + + message(STATUS "Configured Tango Interface for TANGO version ${TANGO_VERSION}") +endif(NOT TARGET TangoInterfaceLibrary) \ No newline at end of file diff --git a/thirdparty/libhdbpp/cmake/LibHdb++Config.h.in b/thirdparty/libhdbpp/cmake/LibHdb++Config.h.in new file mode 100644 index 0000000000000000000000000000000000000000..e35803326fadf5227a8d53e9170e858ce9a0ba96 --- /dev/null +++ b/thirdparty/libhdbpp/cmake/LibHdb++Config.h.in @@ -0,0 +1,9 @@ +#ifndef LIBHDPPP_CONFIG_H +#define LIBHDPPP_CONFIG_H + +#define VERSION_MAJOR "${LIBHDBPP_VERSION_MAJOR}" +#define VERSION_MINOR "${LIBHDBPP_VERSION_MINOR}" +#define VERSION_REVISION "${LIBHDBPP_VERSION_REVISION}" +#define BUILD_TIME "${LIBHDBPP_TIMESTAMP}" + +#endif /* LIBHDPPP_CONFIG_H */ \ No newline at end of file diff --git a/thirdparty/libhdbpp/cmake/ReleaseVersion.cmake b/thirdparty/libhdbpp/cmake/ReleaseVersion.cmake new file mode 100644 index 0000000000000000000000000000000000000000..c700b37bb0851f46915d3fd21ab1895d96e1fce1 --- /dev/null +++ b/thirdparty/libhdbpp/cmake/ReleaseVersion.cmake @@ -0,0 +1,13 @@ +# Project version +set(LIBHDBPP_VERSION_MAJOR "1") +set(LIBHDBPP_VERSION_MINOR "0") +set(LIBHDBPP_VERSION_REVISION "0") +string(TIMESTAMP LIBHDBPP_TIMESTAMP "%Y-%m-%d %H:%M:%S") + +# Version the shared library +set(LIBRARY_VERSION_MAJOR 6) +set(LIBRARY_VERSION_MINOR 0) +set(LIBRARY_VERSION_PATCH 0) + +set(LIBRARY_VERSION_STRING + ${LIBRARY_VERSION_MAJOR}.${LIBRARY_VERSION_MINOR}.${LIBRARY_VERSION_PATCH}) \ No newline at end of file diff --git a/thirdparty/libhdbpp/include/libhdb++/LibHdb++.h b/thirdparty/libhdbpp/include/libhdb++/LibHdb++.h new file mode 100755 index 0000000000000000000000000000000000000000..d645b273a8e76ec496d15d5e68dd60056fe7e0b6 --- /dev/null +++ b/thirdparty/libhdbpp/include/libhdb++/LibHdb++.h @@ -0,0 +1,129 @@ +/* Copyright (C) 2014-2017 + Elettra - Sincrotrone Trieste S.C.p.A. + Strada Statale 14 - km 163,5 in AREA Science Park + 34149 Basovizza, Trieste, Italy. + + This file is part of libhdb++. + + libhdb++ is free software: you can redistribute it and/or modify + it under the terms of the Lesser GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + libhdb++ is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser + GNU General Public License for more details. + + You should have received a copy of the Lesser GNU General Public License + along with libhdb++. If not, see <http://www.gnu.org/licenses/>. */ + +#ifndef _HDBPP_H +#define _HDBPP_H + +#include <tango.h> +#include <vector> +#include <stdint.h> + +#define DB_INSERT 0 +#define DB_START 1 +#define DB_STOP 2 +#define DB_REMOVE 3 +#define DB_INSERT_PARAM 4 +#define DB_PAUSE 5 +#define DB_UPDATETTL 6 + +typedef struct HdbEventDataType_ +{ + string attr_name; + int max_dim_x; + int max_dim_y; + int data_type; + Tango::AttrDataFormat data_format; + int write_type; + +} HdbEventDataType; + +class HdbCmdData +{ +public: + HdbCmdData(Tango::EventData *ev_data_, HdbEventDataType ev_data_type_){ev_data=ev_data_; ev_data_param=NULL; ev_data_type=ev_data_type_; op_code=DB_INSERT;}; + HdbCmdData(Tango::AttrConfEventData *ev_data_param_, HdbEventDataType ev_data_type_){ev_data=NULL; ev_data_param=ev_data_param_; ev_data_type=ev_data_type_; op_code=DB_INSERT_PARAM;}; + HdbCmdData(uint8_t op_code_, string attr_name_){op_code=op_code_; attr_name=attr_name_; ev_data=NULL; ev_data_param=NULL;}; + HdbCmdData(uint8_t op_code_, unsigned int ttl_, string attr_name_){op_code=op_code_; attr_name=attr_name_; ttl=ttl_; ev_data=NULL; ev_data_param=NULL;}; + ~HdbCmdData(){if(ev_data) delete ev_data; if(ev_data_param) delete ev_data_param;}; + Tango::EventData *ev_data; + Tango::AttrConfEventData *ev_data_param; + HdbEventDataType ev_data_type; + uint8_t op_code; //operation code + unsigned int ttl; + string attr_name; + +} ; + +class AbstractDB +{ + +public: + + + virtual void insert_Attr(Tango::EventData *data, HdbEventDataType ev_data_type) = 0; + + virtual void insert_param_Attr(Tango::AttrConfEventData *data, HdbEventDataType ev_data_type) = 0; + + virtual void configure_Attr(string name, int type/*DEV_DOUBLE, DEV_STRING, ..*/, int format/*SCALAR, SPECTRUM, ..*/, int write_type/*READ, READ_WRITE, ..*/, unsigned int ttl/*hours, 0=infinity*/) = 0; + + virtual void updateTTL_Attr(string name, unsigned int ttl/*hours, 0=infinity*/) = 0; + + virtual void event_Attr(string name, unsigned char event) = 0; + + virtual ~AbstractDB() {} + +}; + +class DBFactory +{ + +public: + + virtual AbstractDB* create_db(vector<string> configuration) = 0; + virtual ~DBFactory(){}; + +}; + +class HdbClient +{ + +private: + AbstractDB *db; + DBFactory *db_factory; + void* hLib; + + DBFactory *getDBFactory(); + void string_explode(string str, string separator, vector<string>* results); + void string_vector2map(vector<string> str, string separator, map<string,string>* results); + +public: + HdbClient(vector<string> configuration); + + void insert_Attr(Tango::EventData *data, HdbEventDataType ev_data_type); + + void insert_param_Attr(Tango::AttrConfEventData *data, HdbEventDataType ev_data_type); + + void configure_Attr(string name, int type/*DEV_DOUBLE, DEV_STRING, ..*/, int format/*SCALAR, SPECTRUM, ..*/, int write_type/*READ, READ_WRITE, ..*/, unsigned int ttl/*hours, 0=infinity*/); + + void updateTTL_Attr(string name, unsigned int ttl/*hours, 0=infinity*/); + + void event_Attr(string name, unsigned char event); + + ~HdbClient(); + +}; + +extern "C" +{ + typedef DBFactory * getDBFactory_t(); + DBFactory *getDBFactory(); +} + +#endif // _HDBPP_H \ No newline at end of file diff --git a/thirdparty/libhdbpp/src/LibHdb++.cpp b/thirdparty/libhdbpp/src/LibHdb++.cpp new file mode 100755 index 0000000000000000000000000000000000000000..228221de8f70ac96ce4fc84f17cb0a1f5f16fc14 --- /dev/null +++ b/thirdparty/libhdbpp/src/LibHdb++.cpp @@ -0,0 +1,132 @@ +/* Copyright (C) 2014-2017 + Elettra - Sincrotrone Trieste S.C.p.A. + Strada Statale 14 - km 163,5 in AREA Science Park + 34149 Basovizza, Trieste, Italy. + + This file is part of libhdb++. + + libhdb++ is free software: you can redistribute it and/or modify + it under the terms of the Lesser GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + libhdb++ is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser + GNU General Public License for more details. + + You should have received a copy of the Lesser GNU General Public License + along with libhdb++. If not, see <http://www.gnu.org/licenses/>. */ + +#include <libhdb++/LibHdb++.h> +#include "LibHdb++Config.h" +#include <dlfcn.h> + +HdbClient::HdbClient(vector<string> configuration) +{ + cout << "Starting version: " + << VERSION_MAJOR << "." << VERSION_MINOR << "." << VERSION_REVISION << ":" + << BUILD_TIME << endl; + + map<string,string> db_conf; + string_vector2map(configuration,"=",&db_conf); + string libname; + + try + { + libname = db_conf.at("libname"); + } + catch(const std::out_of_range& e) + { + cout << __func__<< ": " << "Configuration parsing error looking for key 'libname='" << endl; + exit(1); + } + + if ((hLib = dlopen(libname.c_str(), RTLD_NOW/*|RTLD_GLOBAL*/))) + { + if (getDBFactory_t* create_factory = (getDBFactory_t*)dlsym(hLib, "getDBFactory")) + { + db_factory = create_factory(); + db = db_factory->create_db(configuration); + if(db == NULL) + { + cout << __func__<<": Error creating db" << endl; + exit(1); + } + } + else + { + cout << __func__<<": Error loading symbol getDBFactory from library " << libname << endl; + exit(1); + } + } + else + { + db = NULL; + cout << __func__<<": Error loading library " << libname << ". Error report: " << dlerror() << endl; + exit(1); + } +} + +void HdbClient::insert_Attr(Tango::EventData *data, HdbEventDataType ev_data_type) +{ + db->insert_Attr(data, ev_data_type); +} + +void HdbClient::insert_param_Attr(Tango::AttrConfEventData *data, HdbEventDataType ev_data_type) +{ + db->insert_param_Attr(data, ev_data_type); +} + +void HdbClient::configure_Attr(string name, int type/*DEV_DOUBLE, DEV_STRING, ..*/, int format/*SCALAR, SPECTRUM, ..*/, int write_type/*READ, READ_WRITE, ..*/, unsigned int ttl/*hours, 0=infinity*/) +{ + db->configure_Attr(name, type, format, write_type, ttl); +} + +void HdbClient::updateTTL_Attr(string name, unsigned int ttl/*hours, 0=infinity*/) +{ + db->updateTTL_Attr(name, ttl); +} + +void HdbClient::event_Attr(string name, unsigned char event) +{ + db->event_Attr(name, event); +} + +HdbClient::~HdbClient() +{ + delete db; + delete db_factory; + dlclose(hLib); +} + +void HdbClient::string_explode(string str, string separator, vector<string>* results) +{ + string::size_type found; + + found = str.find_first_of(separator); + while(found != string::npos) + { + if(found > 0) + { + results->push_back(str.substr(0,found)); + } + str = str.substr(found+1); + found = str.find_first_of(separator); + } + if(str.length() > 0) + { + results->push_back(str); + } +} + +void HdbClient::string_vector2map(vector<string> str, string separator, map<string,string>* results) +{ + for(vector<string>::iterator it=str.begin(); it != str.end(); it++) + { + string::size_type found_eq; + found_eq = it->find_first_of(separator); + if(found_eq != string::npos && found_eq > 0) + results->insert(make_pair(it->substr(0,found_eq),it->substr(found_eq+1))); + } +}