diff --git a/.clang-tidy b/.clang-tidy
index bffacb17a0e31f62c3bdfccd1a038fccfa40e315..af9b18e704e61c996f61e622faeaf4a8d6ed44e0 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -1,7 +1,7 @@
 ---
 Checks:          '-*,clang-diagnostic-*,clang-analyzer-*,-*,modernize*,performance*,readability*,bugprone*,clang-analyzer*,cppcoreguidelines*,misc*,-readability-braces-around-statements,-cppcoreguidelines-pro-bounds-array-to-pointer-decay'
 WarningsAsErrors: ''
-HeaderFilterRegex: ''
+HeaderFilterRegex: '(src|test)/.*'
 AnalyzeTemporaryDtors: false
 FormatStyle:     none
 User:            sjames
@@ -199,6 +199,8 @@ CheckOptions:
     value:           CamelCase
   - key:             readability-identifier-naming.ClassMemberCase
     value:           lower_case
+  - key:             readability-identifier-naming.ClassMemberPrefix
+    value:           _
   - key:             readability-identifier-naming.ClassMethodCase
     value:           camelBack
   - key:             readability-identifier-naming.ConstantCase
@@ -229,5 +231,7 @@ CheckOptions:
     value:           CamelCase
   - key:             readability-identifier-naming.VariableCase
     value:           lower_case
+  - key:             readability-identifier-naming.StaticVariableCase
+    value:           lower_case
 ...
 
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 61d48aaebd50731104612da10f4c3d35864c9d5c..7883609082fd5b611d9aa4e97498c8e0282c7e19 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,16 +7,34 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
 
 ## [Unreleased]
 
+## [0.10.0] - 2019-12-06
+
+### Added
+
+- Configuration documentation 
+- Added support for syslog logging.
+- Added support for storing data via insert strings rather than prepared statements:
+  - Tango string spectrum types are always stored via insert strings to remove escape characters
+  - This work provides the basis for future batch saving of data events.
+
 ### Fixed
 
 - Close logging down in destructor so linked device server can be restarted.
+- All unit tests now pass!
 
 ### Changed
 
+- Completely redone DbConnectionTest unit tests
+  - Speeds up execution of tests
+  - Prevents database deadlock due to repeated rapid truncation of tables.
 - Build system correctly produces a major version shared object
 - Removed Clang path from build (CMake checks PATH)
 - Corrected static library build
 - Install now places header in include/hdb++/
+- Entire library now uses the global default logger from spdlog.
+- Updated spdlog submodule to release v1.4.2
+- Modern code via clang tidy
+- DbConnection::storeParameterEvent() no longer asserts on empty parameters, it now warns in the log.
 
 ## [0.9.1] - 2019-07-18
 
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0602e9b62a08f2c4cc10f82dbc4d76520437c5b9..fc968d02d0709adc2f54f06b13f4e7dd74715418 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -19,8 +19,8 @@ set(LIBHDBPP_TIMESCALE_NAME "libhdb++timescale")
 
 # Versioning
 set(VERSION_MAJOR "0")
-set(VERSION_MINOR "9")
-set(VERSION_PATCH "2")
+set(VERSION_MINOR "10")
+set(VERSION_PATCH "0")
 set(VERSION_METADATA "")
 set(VERSION_STRING ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH})
 
@@ -137,7 +137,7 @@ add_subdirectory(src)
 add_library(libhdbpp_timescale_shared_library SHARED ${SRC_FILES})
 
 target_link_libraries(libhdbpp_timescale_shared_library 
-    PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static libhdbpp_headers spdlog Threads::Threads
+    PUBLIC ${TDB_FOUND_LIBRARIES} pqxx_static libhdbpp_headers spdlog::spdlog_header_only Threads::Threads
     PRIVATE TangoInterfaceLibrary)
 
 target_include_directories(libhdbpp_timescale_shared_library 
@@ -212,4 +212,4 @@ endif(BUILD_UNIT_TESTS)
 
 if(BUILD_BENCHMARK_TESTS)
     add_subdirectory(benchmark)
-endif(BUILD_BENCHMARK_TESTS)
\ No newline at end of file
+endif(BUILD_BENCHMARK_TESTS)
diff --git a/README.md b/README.md
index 7e97541f24281b518e0460a83d26803c465fb90b..7e451472eec5c9c6071592cb19bdbb3f9fb4d63b 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,7 @@
 [![TangoControls](https://img.shields.io/badge/-Tango--Controls-7ABB45.svg?style=flat&logo=%20data%3Aimage%2Fpng%3Bbase64%2CiVBORw0KGgoAAAANSUhEUgAAACAAAAAkCAYAAADo6zjiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAsFJREFUWIXtl01IFVEYht9zU%2FvTqOxShLowlOgHykWUGEjUKqiocB1FQURB0KJaRdGiaFM7gzZRLWpTq2olhNQyCtpYCP1gNyIoUTFNnxZzRs8dzvw4Q6564XLnfOf73vedc2a%2BmZEKALgHrC3CUUR8CxZFeEoFalsdM4uLmMgFoIlZLJp3A9ZE4S2oKehhlaR1BTnyg2ocnW%2FxsxEDhbYij4EPVncaeASMAavnS%2FwA8NMaqACNQCew3f4as3KZOYh2SuqTVJeQNiFpn6QGSRVjTH9W%2FiThvcCn6H6n4BvQDvQWFT%2BSIDIFDAKfE3KOAQeBfB0XGPeQvgE67P8ZoB44DvTHmFgJdOQRv%2BUjc%2BavA9siNTWemgfA3TwGquCZ3w8szFIL1ALngIZorndvgJOR0GlP2gtJkzH%2Bd0fGFxW07NqY%2FCrx5QRXcYjbCbmxF1dkBSbi8kpACah3Yi2Sys74cVyxMWY6bk5BTwgRe%2BYlSzLmxNpU3aBeJogk4XWWpJKUeiap3RJYCpQj4QWZDQCuyIAk19Auj%2BAFYGZZjTGjksaBESB8P9iaxUBIaJzjZcCQcwHdj%2BS2Al0xPOeBYYKHk4vfmQ3Y8YkIwRUb7wQGU7j2ePrA1URx93ayd8UpD8klyPbSQfCOMIO05MbI%2BDvwBbjsMdGTwlX21AAMZzEerkaI9zFkP4AeYCPBg6gNuEb6I%2FthFgN1KSQupqzoRELOSed4DGiJala1UmOMr2U%2Bl%2FTWEy9Japa%2Fy41IWi%2FJ3d4%2FkkaAw0Bz3AocArqApwTvet3O3GbgV8qqjAM7bf4N4KMztwTodcYVyelywKSCD5V3xphNXoezuTskNSl4bgxJ6jPGVJJqbN0aSV%2Bd0M0aO7FCs19Jo2lExphXaTkxdRVgQFK7DZVDZ8%2BcpdmQh3wuILh7ut3AEyt%2B51%2BL%2F0cUfwFOX0t0StltmQAAAABJRU5ErkJggg%3D%3D)](http://www.tango-controls.org) [![License: GPL v3](https://img.shields.io/badge/License-GPL%20v3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) [![](https://img.shields.io/github/release/tango-controls-hdbpp/libhdbpp-timescale.svg)](https://github.com/tango-controls-hdbpp/libhdbpp-timescale/releases)
 
 - [libhdbpp-timescale](#libhdbpp-timescale)
+  - [v0.9.0 To v0.10.0 Update](#v090-To-v0100-Update)
   - [Cloning](#Cloning)
   - [Bug Reports + Feature Requests](#Bug-Reports--Feature-Requests)
   - [Documentation](#Documentation)
@@ -20,6 +21,12 @@ The library has been build against a number of other projects, these have been i
 * spdlog - Logging system
 * Catch2 - Unit test subsystem
 
+## v0.9.0 To v0.10.0 Update 
+
+This revision changes how both scalar and spectrum strings are stored. In 0.9.0 strings were escaped and quoted before being stored in the database. This had the effect that when the strings were retrieved they were still escaped/quoted. For consistency scalar strings were stored escaped/quoted.
+
+To fix this, spectrum's of strings are now stored via insert strings using both the ARRAY syntax and dollar escape method. This means when they are retrieved frm the database they are longer escaped/quoted. To match this, scalar strings are also no longer stored escaped/quoted.
+
 ## Cloning
 
 Currently this project is configured to express its dependencies as submodules. This may change in future if there is time to explore, for example, the Meson build system. To successfully clone the project and all its dependencies use the following:
diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt
index b69cb050aa18a056b9753b1aa32368ffc02044d8..14e78c0fa104c7610389436bdcf08bbc0e1c5dc8 100644
--- a/benchmark/CMakeLists.txt
+++ b/benchmark/CMakeLists.txt
@@ -21,5 +21,11 @@ target_compile_definitions(benchmark-tests
 set_target_properties(benchmark-tests
     PROPERTIES 
         LINK_FLAGS "-Wl,--no-undefined"
-        CXX_CLANG_TIDY ${DO_CLANG_TIDY}
-        CXX_STANDARD 14)
\ No newline at end of file
+        CXX_STANDARD 14)
+
+if(DO_CLANG_TIDY)
+    set_target_properties(unit-tests  
+        PROPERTIES 
+        CXX_CLANG_TIDY ${DO_CLANG_TIDY})
+endif(DO_CLANG_TIDY)
+
diff --git a/benchmark/QueryBuilderTests.cpp b/benchmark/QueryBuilderTests.cpp
index 1a02b5a7702d4d0206aec979ea60e77b9db98d2e..24970008c1e8e951ea57d525574bc4a97c5535cb 100644
--- a/benchmark/QueryBuilderTests.cpp
+++ b/benchmark/QueryBuilderTests.cpp
@@ -26,8 +26,7 @@ void bmAllocateQueryBuilder(benchmark::State& state)
 {
     // Test - Testing the time it takes to allocate a QueryBuilder, mainly for future test
     // reference
-    hdbpp_internal::LogConfigurator::initLoggingMetrics(false, false);
-    hdbpp_internal::LogConfigurator::setLoggingLevel(spdlog::level::err);
+    hdbpp_internal::LogConfigurator::initLogging();
 
     for (auto _ : state)
         hdbpp_internal::pqxx_conn::QueryBuilder query_builder;
@@ -100,8 +99,7 @@ void bmStoreDataEventQueryNoCache(benchmark::State& state)
 {
     // TEST - Testing how long it takes to build an Insert Data Event query with
     // an empty cache (this forces the full string to be built)
-    hdbpp_internal::LogConfigurator::initLoggingMetrics(false, false);
-    hdbpp_internal::LogConfigurator::setLoggingLevel(spdlog::level::err);
+    hdbpp_internal::LogConfigurator::initLogging();
 
     hdbpp_internal::AttributeTraits traits 
         {static_cast<Tango::AttrWriteType>(state.range(0)), Tango::SCALAR, Tango::DEV_DOUBLE};
@@ -110,7 +108,7 @@ void bmStoreDataEventQueryNoCache(benchmark::State& state)
     {
         // define the builder here so its cache is always fresh
         hdbpp_internal::pqxx_conn::QueryBuilder query_builder;
-        query_builder.storeDataEventQuery<T>(traits);
+        query_builder.storeDataEventStatement<T>(traits);
     }
 }
 
@@ -121,8 +119,7 @@ void bmStoreDataEventQueryCache(benchmark::State& state)
 {
     // TEST - Testing the full lookup for an Insert Data QueryEvent query when the cache
     // map is fully populated 
-    hdbpp_internal::LogConfigurator::initLoggingMetrics(false, false);
-    hdbpp_internal::LogConfigurator::setLoggingLevel(spdlog::level::err);
+    hdbpp_internal::LogConfigurator::initLogging();
 
     hdbpp_internal::AttributeTraits traits 
         {static_cast<Tango::AttrWriteType>(state.range(0)), Tango::SCALAR, Tango::DEV_DOUBLE};
@@ -150,13 +147,13 @@ void bmStoreDataEventQueryCache(benchmark::State& state)
     for (auto &type : types)
         for (auto &format : format_types)
             for (auto &write : write_types)
-                query_builder.storeDataEventQuery<T>(hdbpp_internal::AttributeTraits{write, format, type});
+                query_builder.storeDataEventStatement<T>(hdbpp_internal::AttributeTraits{write, format, type});
 
     for (auto _ : state)
-        query_builder.storeDataEventQuery<T>(traits);
+        query_builder.storeDataEventStatement<T>(traits);
 }
 
 BENCHMARK_TEMPLATE(bmStoreDataEventQueryNoCache, bool)->Apply(writeTypeArgs);
 BENCHMARK_TEMPLATE(bmStoreDataEventQueryCache, bool)->Apply(writeTypeArgs);
 
-BENCHMARK_MAIN();
\ No newline at end of file
+BENCHMARK_MAIN();
diff --git a/doc/README.md b/doc/README.md
index 73dbc154a000beda96361aa5e1ba84021c24ee0a..d4368d95bd5686da5bac2e83a218df8b0af3e554 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -6,6 +6,7 @@ The documentation is purely about getting the shared library running on a correc
   - [About](#About)
   - [Building and Installation](#Building-and-Installation)
   - [DB Schema](#DB-Schema)
+  - [Configuration](#Configuration)
 
 ## About
 
@@ -18,4 +19,8 @@ The overview is in the main project [README](../README.md).
 
 ## DB Schema
 
-* [Schema](db-schema-config) guidelines and setup.
\ No newline at end of file
+* [Schema](db-schema-config) guidelines and setup.
+
+## Configuration
+
+* [Configuration](configuration) parameter details.
\ No newline at end of file
diff --git a/doc/build.md b/doc/build.md
index 451825abc5dc9cc27cc5f2635f809c9715e26d21..3f6496096e3f2c56538eeb00d623189177452306 100644
--- a/doc/build.md
+++ b/doc/build.md
@@ -88,7 +88,15 @@ cmake -DBUILD_UNIT_TESTS=ON ..
 make
 ```
 
-To run all unit tests, a postgresql database node is required with the project schema loaded up. Also note the connection string to this database node needs setting in TestHelpers.hpp. To run all tests:
+To run all unit tests, a postgresql database node is required with the project schema loaded up. There is a default connection string inside test/TestHelpers.hpp:
+
+```
+user=postgres host=localhost port=5432 dbname=hdb password=password
+```
+
+If you run the hdb timescale docker image associated with this project locally then this will connect automatically. If you wish to use a different database, edit the string in test/TestHelpers.hpp.
+
+To run all tests:
 
 ```bash
 ./test/unit-tests
diff --git a/doc/configuration.md b/doc/configuration.md
new file mode 100644
index 0000000000000000000000000000000000000000..a35efb0fc95be6dcd5b7c25bc99b5ef0d2434ad7
--- /dev/null
+++ b/doc/configuration.md
@@ -0,0 +1,40 @@
+# Configuration
+
+## Library Configuration Parameters
+
+Configuration parameters are as follows:
+
+| Parameter | Mandatory | Default | Description |
+|------|-----|-----|-----|
+| libname | true | None | Must be "libhdb++timescale.so" |
+| connect_string | true | None | Postgres connection string, eg user=postgres host=localhost port=5432 dbname=hdb password=password |
+| logging_level | false | error | Logging level. See table below |
+| log_file | false | false | Enable logging to file |
+| log_console | false | false | Enable logging to the console |
+| log_syslog | false | false | Enable logging to syslog |
+| log_file_name | false | None | When logging to file, this is the path and name of file to use. Ensure the path exists otherwise this is an error conditions. |
+
+The logging_level parameter is case insensitive. Logging levels are as follows:
+
+| Level | Description |
+|------|-----|
+| error | Log only error level events (recommended unless debugging) |
+| warning | Log only warning level events |
+| info | Log only warning level events |
+| debug | Log only warning level events. Good for early install debugging |
+| trace | Trace level logging. Excessive level of debug, good for involved debugging |
+| disabled | Disable logging subsystem |
+
+## Configuration Example
+
+Short example LibConfiguration property value on an EventSubscriber or ConfigManager. You will HAVE to change the various parts to match your system:
+
+```
+connect_string=user=hdb-user password=password host=hdb-database port=5432 dbname=hdb
+logging_level=debug
+log_file=true
+log_syslog=false
+log_console=false
+libname=libhdb++timescale.so
+log_file_name=/tmp/hdb/es-name.log
+````
\ No newline at end of file
diff --git a/src/AttributeName.cpp b/src/AttributeName.cpp
index 97d735b5811bc97c0f178aab653c98e6bf4fb89e..b703ee77efb2edf8ed0d15734a0c64519c53ee30 100644
--- a/src/AttributeName.cpp
+++ b/src/AttributeName.cpp
@@ -276,7 +276,7 @@ void AttributeName::validate()
 
 //=============================================================================
 //=============================================================================
-void AttributeName::print(std::ostream &os) const
+void AttributeName::print(ostream &os) const
 {
     os << "AttributeName(_fqdn_attr_name: " << _fqdn_attr_name << ")";
 }
@@ -290,6 +290,19 @@ AttributeName &AttributeName::operator=(const AttributeName &other)
 
     // now copy the fqdn, we do not copy the cache
     _fqdn_attr_name = other._fqdn_attr_name;
+    return *this;
+}
+
+//=============================================================================
+//=============================================================================
+AttributeName &AttributeName::operator=(AttributeName &&other) noexcept
+{
+    // clear the cache
+    clear();
+
+    // now copy the fqdn, we do not copy the cache
+    _fqdn_attr_name = move(other._fqdn_attr_name);
+    return *this;
 }
 
 } // namespace hdbpp_internal
diff --git a/src/AttributeName.hpp b/src/AttributeName.hpp
index 0e19bb3516b99b6c1da4cb29dafe263df079475d..809400a51691095d52a3c3a353a7286884222cc1 100644
--- a/src/AttributeName.hpp
+++ b/src/AttributeName.hpp
@@ -27,9 +27,6 @@
 
 namespace hdbpp_internal
 {
-/// @class AttributeName
-/// @brief Represents a fully qualified domain name (FQDN) for a device server attribute.
-/// @details
 /// Represents a FQDN for a device server attribute. The AttributeName
 /// class must be primed with a valid fully qualified domain attribute name. From
 /// this name the class can extract various fields for the user. Each field is cached,
@@ -42,80 +39,35 @@ class AttributeName
 public:
     // TODO Test all exceptions
 
-    /// @brief Default constructor
     AttributeName() = default;
-
-    /// @brief Default move constructor
     AttributeName(AttributeName &&) = default;
-
-    /// @brief Copy constructor
-    /// @param attr_name AttributeName to construct from
+    ~AttributeName() = default;
     AttributeName(const AttributeName &attr_name) { *this = attr_name; }
-
-    /// @brief Construct an AttributeName object
-    /// @param fqdn_attr_name FQDN attribute name
     AttributeName(const std::string &fqdn_attr_name);
 
-    /// @brief Return the fully qualified attribute name the object was created with.
-    /// @return FQDN attribute name
     const std::string &fqdnAttributeName() const noexcept { return _fqdn_attr_name; }
-
-    /// @brief Return the full attribute name extracted from the fully qualified attribute name.
-    /// @return Full attribute name
-    /// @throw std::invalid_argument
     const std::string &fullAttributeName();
 
-    /// @brief Return the tango host extracted from the fully qualified attribute name.
-    /// @return Tango Host name
-    /// @throw std::invalid_argument
+    // tango host info
     const std::string &tangoHost();
-
-    /// @brief Return the tango host with the domain, i.e. "esrf.fr", appended
-    /// @return Tango Host with domain name appended
-    /// @throw std::invalid_argument
     const std::string &tangoHostWithDomain();
 
-    /// @brief Return the domain element of the full attribute name
-    /// @throw std::invalid_argument
+    // attribute name elements
     const std::string &domain();
-
-    /// @brief Return the family element of the full attribute name
-    /// @throw std::invalid_argument
     const std::string &family();
-
-    /// @brief Return the member element of the full attribute name
-    /// @throw std::invalid_argument
     const std::string &member();
-
-    /// @brief Return the name element of the full attribute name
-    /// @throw std::invalid_argument
     const std::string &name();
 
-    /// @brief Set the contained attribute name
-    /// @param fqdn_attr_name FQDN attribute name
+    // utility functions
     void set(const std::string &fqdn_attr_name);
-
-    /// @brief Clear attribute name and any internal cached items
     void clear() noexcept;
-
-    /// @brief Return the status of the AttributeName setting.
-    /// @return True if empty, False otherwise
     bool empty() const noexcept { return _fqdn_attr_name.empty(); }
-
-    /// @brief Print the AttributeName object to the stream
     void print(std::ostream &os) const;
 
-    /// @brief Equality operator
-    /// @return True if same, False otherwise
     bool operator==(const AttributeName &other) const { return _fqdn_attr_name == other._fqdn_attr_name; }
-
-    /// @brief Inequality operator
-    /// @return True if different, False otherwise
     bool operator!=(const AttributeName &other) const { return !(_fqdn_attr_name == other._fqdn_attr_name); }
-
-    /// @brief Copy operator
-    /// @return Reference to the current AttributeName
     AttributeName &operator=(const AttributeName &other);
+    AttributeName &operator=(AttributeName &&other) noexcept;
 
 private:
     // extract the full attribute name, i.e. domain/family/member/name
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index c7b1a9665d7629bde5b39fdb8e06b3d6a5ebbbc5..41698efbb491e8274a8f0735694283cf472e9991 100755
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.6)
 # source files
 set(SRC_FILES ${SRC_FILES}
     ${CMAKE_CURRENT_SOURCE_DIR}/AttributeName.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/AttributeName.hpp
     ${CMAKE_CURRENT_SOURCE_DIR}/AttributeTraits.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/HdbppTimescaleDb.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/LibUtils.cpp
diff --git a/src/ColumnCache.hpp b/src/ColumnCache.hpp
index c451eb76bd0fc32c464af39c8ebe05fff0b90a0f..97d83cc4778b03a0714a3371581f0ad933a7c13f 100644
--- a/src/ColumnCache.hpp
+++ b/src/ColumnCache.hpp
@@ -38,9 +38,9 @@ namespace pqxx_conn
     {
     public:
         ColumnCache(std::shared_ptr<pqxx::connection> conn,
-            const std::string &table_name,
-            const std::string &column_name,
-            const std::string &reference);
+            std::string table_name,
+            std::string column_name,
+            std::string reference);
 
         // query if the reference has a value, if its not cached it will be
         // loaded from the database
@@ -80,22 +80,19 @@ namespace pqxx_conn
         // cache of values to a reference, the unordered map is not sorted
         // so we do not loose time on each insert having it resorted
         std::unordered_map<TRef, TValue> _values;
-
-        // logging subsystem
-        std::shared_ptr<spdlog::logger> _logger;
     };
 
     //=============================================================================
     //=============================================================================
     template<typename TValue, typename TRef>
     ColumnCache<TValue, TRef>::ColumnCache(std::shared_ptr<pqxx::connection> conn,
-        const std::string &table_name,
-        const std::string &column_name,
-        const std::string &reference) :
-        _conn(conn),
-        _table_name(table_name),
-        _column_name(column_name),
-        _reference(reference)
+        std::string table_name,
+        std::string column_name,
+        std::string reference) :
+        _conn(std::move(conn)),
+        _table_name(std::move(table_name)),
+        _column_name(std::move(column_name)),
+        _reference(std::move(reference))
     {
         assert(_conn != nullptr);
         assert(!_table_name.empty());
@@ -106,9 +103,7 @@ namespace pqxx_conn
         _fetch_all_query_name = _column_name + _table_name + _reference + "_all";
         _fetch_id_query_name = _column_name + _table_name + _reference + "_id";
 
-        _logger = spdlog::get(LibLoggerName);
-
-        _logger->trace("Cache created for table: {} using columns {}/{}", _table_name, _column_name, _reference);
+        spdlog::trace("Cache created for table: {} using columns {}/{}", _table_name, _column_name, _reference);
     }
 
     //=============================================================================
@@ -131,9 +126,9 @@ namespace pqxx_conn
                 if (!tx.prepared(_fetch_all_query_name).exists())
                 {
                     tx.conn().prepare(_fetch_all_query_name,
-                        QueryBuilder::fetchAllValuesQuery(_column_name, _table_name, _reference));
+                        QueryBuilder::fetchAllValuesStatement(_column_name, _table_name, _reference));
 
-                    _logger->trace("Created prepared statement for: {}", _fetch_all_query_name);
+                    spdlog::trace("Created prepared statement for: {}", _fetch_all_query_name);
                 }
 
                 auto result = tx.exec_prepared(_fetch_all_query_name);
@@ -143,7 +138,7 @@ namespace pqxx_conn
                 for (const auto &row : result)
                     _values.insert({row[1].template as<TRef>(), row[0].template as<TValue>()});
 
-                _logger->debug("Loaded: {} values into cache", _values.size());
+                spdlog::debug("Loaded: {} values into cache", _values.size());
             });
         }
         catch (const pqxx::pqxx_exception &ex)
@@ -151,9 +146,9 @@ namespace pqxx_conn
             string msg {"The database transaction failed. Unable to fetchAll for column: " + _column_name +
                 " in table: " + _table_name + ". Error: " + ex.base().what()};
 
-            _logger->error("Error: An unexpected error occurred when trying to run the database query");
-            _logger->error("Caught error: \"{}\"", ex.base().what());
-            _logger->error("Throwing storage error with message: \"{}\"", msg);
+            spdlog::error("Error: An unexpected error occurred when trying to run the database query");
+            spdlog::error("Caught error: \"{}\"", ex.base().what());
+            spdlog::error("Throwing storage error with message: \"{}\"", msg);
 
             Tango::Except::throw_exception("Storage Error", msg, LOCATION_INFO);
         }
@@ -170,12 +165,8 @@ namespace pqxx_conn
         // need to go to the database
         auto value_iter = _values.find(reference);
 
-        if (value_iter != _values.end())
-        {
-            // found a cached value, return asap
-            return true;
-        }
-        else
+        // not found, search the database
+        if (value_iter == _values.end())
         {
             try
             {
@@ -186,15 +177,17 @@ namespace pqxx_conn
 
                     if (!tx.prepared(_fetch_id_query_name).exists())
                     {
-                        tx.conn().prepare(
-                            _fetch_id_query_name, QueryBuilder::fetchValueQuery(_column_name, _table_name, _reference));
+                        tx.conn().prepare(_fetch_id_query_name,
+                            QueryBuilder::fetchValueStatement(_column_name, _table_name, _reference));
 
-                        _logger->trace("Created prepared statement for: {}", _fetch_id_query_name);
+                        spdlog::trace("Created prepared statement for: {}", _fetch_id_query_name);
                     }
 
                     auto result = tx.exec_prepared(_fetch_id_query_name, reference);
                     tx.commit();
 
+                    auto value_exists = false;
+
                     // no result is not an error, the value simply does not exist and its
                     // up to the caller to deal with the situation
                     if (!result.empty())
@@ -206,8 +199,8 @@ namespace pqxx_conn
                             auto value = result.at(0).at(0).template as<TValue>();
                             _values.insert({reference, value});
 
-                            _logger->debug("Cached value: \'{} \' with reference: \'{}\'", value, reference);
-                            return true;
+                            spdlog::debug(R"(Cached value: '{} ' with reference: '{}')", value, reference);
+                            value_exists = true;
                         }
                         else
                         {
@@ -216,7 +209,7 @@ namespace pqxx_conn
                         }
                     }
 
-                    return false;
+                    return value_exists;
                 });
             }
             catch (const pqxx::pqxx_exception &ex)
@@ -224,15 +217,15 @@ namespace pqxx_conn
                 string msg {"The database transaction failed. Unable to query column: " + _column_name +
                     " in table: " + _table_name + ". Error: " + ex.base().what()};
 
-                _logger->error("Error: An unexpected error occurred when trying to run the database query");
-                _logger->error("Caught error: \"{}\"", ex.base().what());
-                _logger->error("Throwing storage error with message: \"{}\"", msg);
+                spdlog::error("Error: An unexpected error occurred when trying to run the database query");
+                spdlog::error("Caught error: \"{}\"", ex.base().what());
+                spdlog::error("Throwing storage error with message: \"{}\"", msg);
 
                 Tango::Except::throw_exception("Storage Error", msg, LOCATION_INFO);
             }
         }
 
-        return false;
+        return true;
     }
 
     //=============================================================================
@@ -248,7 +241,7 @@ namespace pqxx_conn
         {
             // this is pretty fatal, we can not store information if it does not exist
             string msg {"Unable to find a value in either the cache or database for reference: " + reference};
-            _logger->error("Error: {}", msg);
+            spdlog::error("Error: {}", msg);
             Tango::Except::throw_exception("Storage Error", msg, LOCATION_INFO);
         }
 
@@ -267,12 +260,12 @@ namespace pqxx_conn
         // the caller to deal with
         if (_values.find(reference) != _values.end())
         {
-            _logger->warn("Value already exists in cache, not caching. Value: {} with reference: {}", value, reference);
+            spdlog::warn("Value already exists in cache, not caching. Value: {} with reference: {}", value, reference);
             return;
         }
 
         _values.insert({reference, value});
-        _logger->debug("Cached new value: {} with reference: {} by request", value, reference);
+        spdlog::debug("Cached new value: {} with reference: {} by request", value, reference);
     }
 
     //=============================================================================
diff --git a/src/DbConnection.cpp b/src/DbConnection.cpp
index 00af4f9876e7270ee9cc3af79f46936678c9fc93..9fcd8768d83700fcc88a2857df2bd5c3d364fda8 100644
--- a/src/DbConnection.cpp
+++ b/src/DbConnection.cpp
@@ -33,13 +33,13 @@ namespace pqxx_conn
 {
     //=============================================================================
     //=============================================================================
-    DbConnection::DbConnection() { _logger = spdlog::get(LibLoggerName); }
+    DbConnection::DbConnection(DbStoreMethod db_store_method) : _db_store_method(db_store_method) {}
 
     //=============================================================================
     //=============================================================================
     void DbConnection::connect(const string &connect_string)
     {
-        _logger->info("Connecting to postgres database with string: \"{}\"", connect_string);
+        spdlog::info("Connecting to postgres database with string: \"{}\"", connect_string);
 
         // construct the database connection
         try
@@ -54,28 +54,29 @@ namespace pqxx_conn
 
             // mark the connected flag as true to cache this state
             _connected = true;
-            _logger->info("Connected to postgres successfully");
+            spdlog::info("Connected to postgres successfully");
         }
         catch (const pqxx::broken_connection &ex)
         {
             string msg {"Failed to connect to database. Exception: "};
             msg += ex.what();
 
-            _logger->error("Error: Connecting to postgres database with connect string: \"{}\"", connect_string);
-            _logger->error("Caught error: \"{}\"", ex.what());
-            _logger->error("Throwing connection error with message: \"{}\"", msg);
+            spdlog::error("Error: Connecting to postgres database with connect string: \"{}\"", connect_string);
+            spdlog::error("Caught error: \"{}\"", ex.what());
+            spdlog::error("Throwing connection error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Connection Error", msg, LOCATION_INFO);
         }
 
         // now create and connect the cache objects to the database connection, this
         // will destroy any existing cache objects managed by the unique pointers
-        _conf_id_cache = make_unique<ColumnCache<int, std::string>>(_conn, CONF_TABLE_NAME, CONF_COL_ID, CONF_COL_NAME);
+        _conf_id_cache = make_unique<ColumnCache<int, std::string>>(
+            _conn, schema::ConfTableName, schema::ConfColId, schema::ConfColName);
 
         _error_desc_id_cache = make_unique<ColumnCache<int, std::string>>(
-            _conn, ERR_TABLE_NAME, ERR_COL_ID, ERR_COL_ERROR_DESC);
+            _conn, schema::ErrTableName, schema::ErrColId, schema::ErrColErrorDesc);
 
         _event_id_cache = make_unique<ColumnCache<int, std::string>>(
-            _conn, HISTORY_EVENT_TABLE_NAME, HISTORY_EVENT_COL_EVENT_ID, HISTORY_EVENT_COL_EVENT);
+            _conn, schema::HistoryEventTableName, schema::HistoryEventColEventId, schema::HistoryEventColEvent);
     }
 
     //=============================================================================
@@ -96,7 +97,7 @@ namespace pqxx_conn
 
         // stop attempts to use the connection
         _connected = false;
-        _logger->debug("Disconnected from the postgres database");
+        spdlog::debug("Disconnected from the postgres database");
     }
 
     //=============================================================================
@@ -121,7 +122,7 @@ namespace pqxx_conn
         assert(_error_desc_id_cache != nullptr);
         assert(_event_id_cache != nullptr);
 
-        _logger->trace("Storing new attribute {} of type {}", full_attr_name, traits);
+        spdlog::trace("Storing new attribute {} of type {}", full_attr_name, traits);
 
         checkConnection(LOCATION_INFO);
 
@@ -132,9 +133,9 @@ namespace pqxx_conn
             string msg {
                 "This attribute [" + full_attr_name + "] already exists in the database. Unable to add it again."};
 
-            _logger->error("Error: The attribute already exists in the database and can not be added again");
-            _logger->error("Attribute details. Name: {} traits: {}", full_attr_name, traits);
-            _logger->error("Throwing consistency error with message: \"{}\"", msg);
+            spdlog::error("Error: The attribute already exists in the database and can not be added again");
+            spdlog::error("Attribute details. Name: {} traits: {}", full_attr_name, traits);
+            spdlog::error("Throwing consistency error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Consistency Error", msg, LOCATION_INFO);
         }
 
@@ -146,14 +147,14 @@ namespace pqxx_conn
 
                 if (!tx.prepared(StoreAttribute).exists())
                 {
-                    tx.conn().prepare(StoreAttribute, QueryBuilder::storeAttributeQuery());
-                    _logger->trace("Created prepared statement for: {}", StoreAttribute);
+                    tx.conn().prepare(StoreAttribute, QueryBuilder::storeAttributeStatement());
+                    spdlog::trace("Created prepared statement for: {}", StoreAttribute);
                 }
 
                 // execute the statement with the expectation that we get a row back
                 auto row = tx.exec_prepared1(StoreAttribute,
                     full_attr_name,
-                    _query_builder.tableName(traits),
+                    QueryBuilder::tableName(traits),
                     control_system,
                     att_domain,
                     att_family,
@@ -171,7 +172,7 @@ namespace pqxx_conn
                 return row.at(0).as<int>();
             });
 
-            _logger->debug("Stored new attribute {} of type {} with db id: {}", full_attr_name, traits, conf_id);
+            spdlog::debug("Stored new attribute {} of type {} with db id: {}", full_attr_name, traits, conf_id);
 
             // cache the new conf id for future use
             _conf_id_cache->cacheValue(conf_id, full_attr_name);
@@ -180,7 +181,7 @@ namespace pqxx_conn
         {
             handlePqxxError("The attribute [" + full_attr_name + "] was not saved.",
                 ex.base().what(),
-                QueryBuilder::storeAttributeQuery(),
+                QueryBuilder::storeAttributeStatement(),
                 LOCATION_INFO);
         }
     }
@@ -196,7 +197,7 @@ namespace pqxx_conn
         assert(_error_desc_id_cache != nullptr);
         assert(_event_id_cache != nullptr);
 
-        _logger->trace("Storing history event {} for attribute {}", event, full_attr_name);
+        spdlog::trace("Storing history event {} for attribute {}", event, full_attr_name);
 
         checkConnection(LOCATION_INFO);
         checkAttributeExists(full_attr_name, LOCATION_INFO);
@@ -210,10 +211,10 @@ namespace pqxx_conn
             string msg {
                 "The event [" + event + "] is missing in both the cache and database, this is an unrecoverable error."};
 
-            _logger->error(
+            spdlog::error(
                 "Event found missing, this occurred when storing event: {} for attribute: {}", event, full_attr_name);
 
-            _logger->error("Throwing consistency error with message: \"{}\"", msg);
+            spdlog::error("Throwing consistency error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Consistency Error", msg, LOCATION_INFO);
         }
 
@@ -225,8 +226,8 @@ namespace pqxx_conn
 
                 if (!tx.prepared(StoreHistoryEvent).exists())
                 {
-                    tx.conn().prepare(StoreHistoryEvent, QueryBuilder::storeHistoryEventQuery());
-                    _logger->trace("Created prepared statement for: {}", StoreHistoryEvent);
+                    tx.conn().prepare(StoreHistoryEvent, QueryBuilder::storeHistoryEventStatement());
+                    spdlog::trace("Created prepared statement for: {}", StoreHistoryEvent);
                 }
 
                 // expect no result, this is an insert only query
@@ -234,13 +235,13 @@ namespace pqxx_conn
                 tx.commit();
             });
 
-            _logger->debug("Stored event {} and for attribute {}", event, full_attr_name);
+            spdlog::debug("Stored event {} and for attribute {}", event, full_attr_name);
         }
         catch (const pqxx::pqxx_exception &ex)
         {
             handlePqxxError("The attribute [" + full_attr_name + "] event [" + event + "] was not saved.",
                 ex.base().what(),
-                QueryBuilder::storeHistoryEventQuery(),
+                QueryBuilder::storeHistoryEventStatement(),
                 LOCATION_INFO);
         }
     }
@@ -260,24 +261,29 @@ namespace pqxx_conn
         const string &description)
     {
         assert(!full_attr_name.empty());
-        assert(!label.empty());
-        assert(!unit.empty());
-        assert(!standard_unit.empty());
-        assert(!display_unit.empty());
-        assert(!format.empty());
-        assert(!archive_rel_change.empty());
-        assert(!archive_abs_change.empty());
-        assert(!archive_period.empty());
-        assert(!description.empty());
         assert(_conn != nullptr);
         assert(_conf_id_cache != nullptr);
         assert(_error_desc_id_cache != nullptr);
         assert(_event_id_cache != nullptr);
 
-        _logger->trace("Storing parameter event for attribute {}", full_attr_name);
+        spdlog::trace("Storing parameter event for attribute {}", full_attr_name);
 
-        _logger->trace("Parmater event data: event_time {}, label {}, unit {}, standard_unit {}, display_unit {}, "
-                       "format {}, archive_rel_change {}, archive_abs_change {}, archive_period {}, description {}",
+        auto check_parameter = [](auto &name, auto &value) {
+            if (value.empty())
+                spdlog::warn("Parameter {} is empty. Please set in the device server", name);
+        };
+
+        check_parameter("label", label);
+        check_parameter("unit", unit);
+        check_parameter("standard_unit", standard_unit);
+        check_parameter("display_unit", display_unit);
+        check_parameter("archive_rel_change", archive_rel_change);
+        check_parameter("archive_abs_change", archive_abs_change);
+        check_parameter("archive_period", archive_period);
+        check_parameter("description", description);
+
+        spdlog::trace("Parmater event data: event_time {}, label {}, unit {}, standard_unit {}, display_unit {}, "
+                      "format {}, archive_rel_change {}, archive_abs_change {}, archive_period {}, description {}",
             event_time,
             label,
             unit,
@@ -300,8 +306,8 @@ namespace pqxx_conn
 
                 if (!tx.prepared(StoreParameterEvent).exists())
                 {
-                    tx.conn().prepare(StoreParameterEvent, QueryBuilder::storeParameterEventQuery());
-                    _logger->trace("Created prepared statement for: {}", StoreParameterEvent);
+                    tx.conn().prepare(StoreParameterEvent, QueryBuilder::storeParameterEventStatement());
+                    spdlog::trace("Created prepared statement for: {}", StoreParameterEvent);
                 }
 
                 // no result expected
@@ -321,13 +327,13 @@ namespace pqxx_conn
                 tx.commit();
             });
 
-            _logger->debug("Stored parameter event and for attribute {}", full_attr_name);
+            spdlog::debug("Stored parameter event and for attribute {}", full_attr_name);
         }
         catch (const pqxx::pqxx_exception &ex)
         {
             handlePqxxError("The attribute [" + full_attr_name + "] parameter event was not saved.",
                 ex.base().what(),
-                QueryBuilder::storeParameterEventQuery(),
+                QueryBuilder::storeParameterEventStatement(),
                 LOCATION_INFO);
         }
     }
@@ -348,7 +354,7 @@ namespace pqxx_conn
         assert(_error_desc_id_cache != nullptr);
         assert(_event_id_cache != nullptr);
 
-        _logger->trace("Storing error message event for attribute {}. Quality: {}. Error message: \"{}\"",
+        spdlog::trace("Storing error message event for attribute {}. Quality: {}. Error message: \"{}\"",
             full_attr_name,
             quality,
             error_msg);
@@ -367,11 +373,11 @@ namespace pqxx_conn
             string msg {"The error message [" + error_msg +
                 "] is missing in both the cache and database, this is an unrecoverable error."};
 
-            _logger->error("Error message found missing, this occurred when storing msg: \"{}\" for attribute: {}",
+            spdlog::error("Error message found missing, this occurred when storing msg: \"{}\" for attribute: {}",
                 error_msg,
                 full_attr_name);
 
-            _logger->error("Throwing consistency error with message: \"{}\"", msg);
+            spdlog::error("Throwing consistency error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Consistency Error", msg, LOCATION_INFO);
         }
 
@@ -384,9 +390,8 @@ namespace pqxx_conn
                 if (!tx.prepared(_query_builder.storeDataEventErrorName(traits)).exists())
                 {
                     tx.conn().prepare(_query_builder.storeDataEventErrorName(traits),
-                        _query_builder.storeDataEventErrorQuery(traits));
-                    _logger->trace(
-                        "Created prepared statement for: {}", _query_builder.storeDataEventErrorName(traits));
+                        _query_builder.storeDataEventErrorStatement(traits));
+                    spdlog::trace("Created prepared statement for: {}", _query_builder.storeDataEventErrorName(traits));
                 }
 
                 // no result expected
@@ -421,7 +426,7 @@ namespace pqxx_conn
         checkConnection(LOCATION_INFO);
         checkAttributeExists(full_attr_name, LOCATION_INFO);
 
-        _logger->trace("Fetching last history event for attribute: {}", full_attr_name);
+        spdlog::trace("Fetching last history event for attribute: {}", full_attr_name);
 
         // the result
         string last_event;
@@ -434,7 +439,7 @@ namespace pqxx_conn
                 pqxx::work tx {(*_conn), FetchLastHistoryEvent};
 
                 if (!tx.prepared(FetchLastHistoryEvent).exists())
-                    tx.conn().prepare(FetchLastHistoryEvent, QueryBuilder::fetchLastHistoryEventQuery());
+                    tx.conn().prepare(FetchLastHistoryEvent, QueryBuilder::fetchLastHistoryEventStatement());
 
                 // unless this is the first time this attribute event history has
                 // been queried, then we expect something back
@@ -452,7 +457,7 @@ namespace pqxx_conn
         {
             handlePqxxError("Can not return last event for attribute [" + full_attr_name + "].",
                 ex.base().what(),
-                QueryBuilder::fetchLastHistoryEventQuery(),
+                QueryBuilder::fetchLastHistoryEventStatement(),
                 LOCATION_INFO);
         }
 
@@ -471,11 +476,11 @@ namespace pqxx_conn
 
         if (_conf_id_cache->valueExists(full_attr_name))
         {
-            _logger->trace("Query attribute archived returns true for: {}", full_attr_name);
+            spdlog::trace("Query attribute archived returns true for: {}", full_attr_name);
             return true;
         }
 
-        _logger->trace("Query attribute archived returns false for: {}", full_attr_name);
+        spdlog::trace("Query attribute archived returns false for: {}", full_attr_name);
         return false;
     }
 
@@ -492,7 +497,7 @@ namespace pqxx_conn
         checkConnection(LOCATION_INFO);
         checkAttributeExists(full_attr_name, LOCATION_INFO);
 
-        _logger->trace("Fetching attribute traits for attribute: {}", full_attr_name);
+        spdlog::trace("Fetching attribute traits for attribute: {}", full_attr_name);
 
         AttributeTraits traits;
 
@@ -504,7 +509,7 @@ namespace pqxx_conn
                 pqxx::work tx {(*_conn), FetchAttributeTraits};
 
                 if (!tx.prepared(FetchAttributeTraits).exists())
-                    tx.conn().prepare(FetchAttributeTraits, QueryBuilder::fetchAttributeTraitsQuery());
+                    tx.conn().prepare(FetchAttributeTraits, QueryBuilder::fetchAttributeTraitsStatement());
 
                 // always expect a result, the type info for the attribute
                 auto row = tx.exec_prepared1(FetchAttributeTraits, full_attr_name);
@@ -519,7 +524,7 @@ namespace pqxx_conn
         {
             handlePqxxError("Can not return the type traits for attribute [" + full_attr_name + "].",
                 ex.base().what(),
-                QueryBuilder::fetchAttributeTraitsQuery(),
+                QueryBuilder::fetchAttributeTraitsStatement(),
                 LOCATION_INFO);
         }
 
@@ -530,7 +535,7 @@ namespace pqxx_conn
     //=============================================================================
     void DbConnection::storeEvent(const std::string &full_attr_name, const std::string &event)
     {
-        _logger->debug("Event {} needs adding to the database, by request of attribute {}", event, full_attr_name);
+        spdlog::debug("Event {} needs adding to the database, by request of attribute {}", event, full_attr_name);
 
         try
         {
@@ -541,8 +546,8 @@ namespace pqxx_conn
 
                 if (!tx.prepared(StoreHistoryString).exists())
                 {
-                    tx.conn().prepare(StoreHistoryString, QueryBuilder::storeHistoryStringQuery());
-                    _logger->trace("Created prepared statement for: {}", StoreHistoryString);
+                    tx.conn().prepare(StoreHistoryString, QueryBuilder::storeHistoryStringStatement());
+                    spdlog::trace("Created prepared statement for: {}", StoreHistoryString);
                 }
 
                 auto row = tx.exec_prepared1(StoreHistoryString, event);
@@ -552,7 +557,7 @@ namespace pqxx_conn
                 return row.at(0).as<int>();
             });
 
-            _logger->debug(
+            spdlog::debug(
                 "Stored event {} for attribute {} and got database id for it: {}", event, full_attr_name, event_id);
 
             // cache the new event id for future use
@@ -562,7 +567,7 @@ namespace pqxx_conn
         {
             handlePqxxError("The event [" + event + "] for attribute [" + full_attr_name + "] was not saved.",
                 ex.base().what(),
-                QueryBuilder::storeHistoryStringQuery(),
+                QueryBuilder::storeHistoryStringStatement(),
                 LOCATION_INFO);
         }
     }
@@ -571,7 +576,7 @@ namespace pqxx_conn
     //=============================================================================
     void DbConnection::storeErrorMsg(const std::string &full_attr_name, const std::string &error_msg)
     {
-        _logger->debug(
+        spdlog::debug(
             "Error message \"{}\" needs adding to the database, by request of attribute {}", error_msg, full_attr_name);
 
         try
@@ -582,8 +587,8 @@ namespace pqxx_conn
 
                 if (!tx.prepared(StoreErrorString).exists())
                 {
-                    tx.conn().prepare(StoreErrorString, QueryBuilder::storeErrorQuery());
-                    _logger->trace("Created prepared statement for: {}", StoreErrorString);
+                    tx.conn().prepare(StoreErrorString, QueryBuilder::storeErrorStatement());
+                    spdlog::trace("Created prepared statement for: {}", StoreErrorString);
                 }
 
                 // expect a single row returned
@@ -594,7 +599,7 @@ namespace pqxx_conn
                 return row.at(0).as<int>();
             });
 
-            _logger->debug("Stored error message \"{}\" for attribute {} and got database id for it: {}",
+            spdlog::debug("Stored error message \"{}\" for attribute {} and got database id for it: {}",
                 error_msg,
                 full_attr_name,
                 error_id);
@@ -606,7 +611,7 @@ namespace pqxx_conn
         {
             handlePqxxError("The error string [" + error_msg + "] for attribute [" + full_attr_name + "] was not saved",
                 ex.base().what(),
-                QueryBuilder::storeErrorQuery(),
+                QueryBuilder::storeErrorStatement(),
                 LOCATION_INFO);
         }
     }
@@ -622,9 +627,9 @@ namespace pqxx_conn
             string msg {"This attribute [" + full_attr_name +
                 "] does not exist in the database. Unable to work with this attribute until it is added."};
 
-            _logger->error("Error: The attribute does not exist in the database, add it first.");
-            _logger->error("Attribute details. Name: {}", full_attr_name);
-            _logger->error("Throwing consistency error with message: \"{}\"", msg);
+            spdlog::error("Error: The attribute does not exist in the database, add it first.");
+            spdlog::error("Attribute details. Name: {}", full_attr_name);
+            spdlog::error("Throwing consistency error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Consistency Error", msg, location);
         }
     }
@@ -638,10 +643,10 @@ namespace pqxx_conn
             string msg {
                 "Connection to database is closed. Ensure it has been opened before trying to use the connection."};
 
-            _logger->error(
+            spdlog::error(
                 "Error: The DbConnection is showing a closed connection status, open it before using store functions");
 
-            _logger->error("Throwing connection error with message: \"{}\"", msg);
+            spdlog::error("Throwing connection error with message: \"{}\"", msg);
             Tango::Except::throw_exception("Connection Error", msg, location);
         }
     }
@@ -652,12 +657,11 @@ namespace pqxx_conn
         const string &msg, const string &what, const string &query, const std::string &location)
     {
         string full_msg {"The database transaction failed. " + msg};
-        _logger->error("Error: An unexpected error occurred when trying to run the database query");
-        _logger->error("Caught error at: {} Error: \"{}\"", location, what);
-        _logger->error("Error: Failed query: {}", query);
-        _logger->error("Throwing storage error with message: \"{}\"", full_msg);
+        spdlog::error("Error: An unexpected error occurred when trying to run the database query");
+        spdlog::error("Caught error at: {} Error: \"{}\"", location, what);
+        spdlog::error("Error: Failed query: {}", query);
+        spdlog::error("Throwing storage error with message: \"{}\"", full_msg);
         Tango::Except::throw_exception("Storage Error", full_msg, location);
     }
-
 } // namespace pqxx_conn
 } // namespace hdbpp_internal
diff --git a/src/DbConnection.hpp b/src/DbConnection.hpp
index c1bfc0d88707b94c42b9da875478c606246ece7f..3749f86b67cb5a23341f906b8069d29d3cddc08a 100644
--- a/src/DbConnection.hpp
+++ b/src/DbConnection.hpp
@@ -43,11 +43,20 @@ namespace pqxx_conn
     {
     public:
         // TODO add options fields - json string
-        // TODO add error feedback
         // TODO add fetch DataType function
 
-        DbConnection();
-        virtual ~DbConnection() {}
+        // Sets the priority to use when accessing the database.
+        enum DbStoreMethod
+        {
+            // Compose insert strings and execute each in turn
+            InsertString,
+
+            // Where possible, use prepared statements, this is quicker than
+            // using strings
+            PreparedStatement
+        };
+
+        DbConnection(DbStoreMethod db_store_method);
 
         // connection API
         void connect(const string &connect_string) override;
@@ -145,8 +154,8 @@ namespace pqxx_conn
         std::unique_ptr<ColumnCache<int, std::string>> _event_id_cache;
         std::unique_ptr<ColumnCache<int, int>> _type_id_cache;
 
-        // logging subsystem
-        std::shared_ptr<spdlog::logger> _logger;
+        // configured db access method
+        DbStoreMethod _db_store_method;
     };
 } // namespace pqxx_conn
 } // namespace hdbpp_internal
diff --git a/src/DbConnection.tpp b/src/DbConnection.tpp
index 6c4e14b618621bcdda25e1bf8e15d83cb814168b..00d0e8657b1b7f984968eead312e985355e1cb12 100644
--- a/src/DbConnection.tpp
+++ b/src/DbConnection.tpp
@@ -33,38 +33,42 @@ namespace pqxx_conn
         //=============================================================================
         //=============================================================================
         template<typename T>
-        struct Preprocess
-        {
-            static void run(std::unique_ptr<std::vector<T>> &, pqxx::work &) {}
-        };
-
-        //=============================================================================
-        //=============================================================================
-        template<>
-        struct Preprocess<std::string>
+        struct Store
         {
-            static void run(std::unique_ptr<std::vector<std::string>> &value, pqxx::work &tx)
+            static void run(std::unique_ptr<std::vector<T>> &value,
+                const AttributeTraits &traits,
+                pqxx::prepare::invocation &inv,
+                pqxx::work & /*unused*/)
             {
-                for (auto &str : *value)
-                    str = tx.quote(str);
+                // for a scalar, store the first element of the vector,
+                // we do not expect more than 1 element, for an array, store
+                // the entire vector
+                if (traits.isScalar())
+                    inv((*value)[0]);
+                else
+                    inv(*value);
             }
         };
 
-        //=============================================================================
-        //=============================================================================
-        template<typename T>
-        struct Store
+        template<>
+        struct Store<std::string>
         {
-            static void run(
-                std::unique_ptr<std::vector<T>> &value, pqxx::prepare::invocation &inv, const AttributeTraits &traits)
+            static void run(std::unique_ptr<std::vector<std::string>> &value,
+                const AttributeTraits &traits,
+                pqxx::prepare::invocation &inv,
+                pqxx::work &tx)
             {
-                // for a scalar, store the first element of the vector,
-                // we do not expect more than 1 element, for an array, store
-                // the entire vector
                 if (traits.isScalar())
                     inv((*value)[0]);
                 else
+                {
+                    // a string needs quoting to be stored via this method, so it does not cause
+                    // an error in the prepared statement
+                    for (auto &str : *value)
+                        str = tx.esc(str);
+
                     inv(*value);
+                }
             }
         };
 
@@ -74,8 +78,9 @@ namespace pqxx_conn
         struct Store<bool>
         {
             static void run(std::unique_ptr<std::vector<bool>> &value,
+                const AttributeTraits &traits,
                 pqxx::prepare::invocation &inv,
-                const AttributeTraits &traits)
+                pqxx::work & /*unused*/)
             {
                 // a vector<bool> is not actually a vector<bool>, rather its some kind of bitfield. When
                 // trying to return an element, we appear to get some kind of bitfield reference (?),
@@ -105,11 +110,11 @@ namespace pqxx_conn
         assert(!full_attr_name.empty());
         assert(traits.isValid());
 
-        _logger->trace("Storing data event for attribute {} with traits {}, value_r valid: {}, value_w valid: {}",
+        spdlog::trace("Storing data event for attribute {} with traits {}, value_r valid: {}, value_w valid: {}",
             full_attr_name,
             traits,
-            value_r->size() > 0,
-            value_w->size() > 0);
+            !value_r->empty(),
+            !value_w->empty());
 
         checkConnection(LOCATION_INFO);
         checkAttributeExists(full_attr_name, LOCATION_INFO);
@@ -119,53 +124,69 @@ namespace pqxx_conn
             return pqxx::perform([&, this]() {
                 pqxx::work tx {(*_conn), StoreDataEvent};
 
-                // prepare as a prepared statement, we are going to use these
-                // queries often
-                if (!tx.prepared(_query_builder.storeDataEventName(traits)).exists())
+                // there is a single special case here, arrays of strings need a different syntax to store,
+                // to avoid the quoting. Its likely we will need more for DevEncoded and DevEnum
+                if (_db_store_method == DbStoreMethod::InsertString ||
+                    (traits.isArray() && traits.type() == Tango::DEV_STRING))
                 {
-                    tx.conn().prepare(
-                        _query_builder.storeDataEventName(traits), _query_builder.storeDataEventQuery<T>(traits));
+                    auto query = _query_builder.storeDataEventString<T>(
+                        pqxx::to_string(_conf_id_cache->value(full_attr_name)),
+                        pqxx::to_string(event_time),
+                        pqxx::to_string(quality),
+                        value_r,
+                        value_w,
+                        traits);
+
+                    tx.exec0(query);
                 }
-
-                // get the pqxx prepared statement invocation object to allow us to
-                // bind each parameter in turn, this gives us the flexibility to bind
-                // conditional parameters (as long as the query string matches)
-                auto inv = tx.prepared(_query_builder.storeDataEventName(traits));
-
-                // this lambda stores the data value correctly into the invocation,
-                // we must treat scalar/spectrum in different ways, one is a single
-                // element and the other an array. Further, the unique_ptr may be
-                // empty and signify a null should be stored in the column instead
-                auto store_value = [&tx, &inv, &traits](auto &value) {
-                    if (value && value->size() > 0)
-                    {
-                        // this ensures strings are quoted and escaped, other types are ignored
-                        store_data_utils::Preprocess<T>::run(value, tx);
-                        store_data_utils::Store<T>::run(value, inv, traits);
-                    }
-                    else
+                else
+                {
+                    // prepare as a prepared statement, we are going to use these
+                    // queries often
+                    if (!tx.prepared(_query_builder.storeDataEventName(traits)).exists())
                     {
-                        // no value was given for this field, simply add a null
-                        // instead, this allows invalid quality attributes to be saved
-                        // with no data
-                        inv();
+                        tx.conn().prepare(_query_builder.storeDataEventName(traits),
+                            _query_builder.storeDataEventStatement<T>(traits));
                     }
-                };
-
-                // bind all the parameters
-                inv(_conf_id_cache->value(full_attr_name));
-                inv(event_time);
-
-                if (traits.hasReadData())
-                    store_value(value_r);
 
-                if (traits.hasWriteData())
-                    store_value(value_w);
-
-                inv(quality);
-
-                // execute
-                inv.exec();
+                    // get the pqxx prepared statement invocation object to allow us to
+                    // bind each parameter in turn, this gives us the flexibility to bind
+                    // conditional parameters (as long as the query string matches)
+                    auto inv = tx.prepared(_query_builder.storeDataEventName(traits));
+
+                    // this lambda stores the data value correctly into the invocation,
+                    // we must treat scalar/spectrum in different ways, one is a single
+                    // element and the other an array. Further, the unique_ptr may be
+                    // empty and signify a null should be stored in the column instead
+                    auto store_value = [&tx, &traits, &inv](auto &value) {
+                        if (value && value->size() > 0)
+                        {
+                            store_data_utils::Store<T>::run(value, traits, inv, tx);
+                        }
+                        else
+                        {
+                            // no value was given for this field, simply add a null
+                            // instead, this allows invalid quality attributes to be saved
+                            // with no data
+                            inv();
+                        }
+                    };
+
+                    // bind all the parameters
+                    inv(_conf_id_cache->value(full_attr_name));
+                    inv(event_time);
+
+                    if (traits.hasReadData())
+                        store_value(value_r);
+
+                    if (traits.hasWriteData())
+                        store_value(value_w);
+
+                    inv(quality);
+
+                    // execute
+                    inv.exec();
+                }
 
                 // commit the result
                 tx.commit();
@@ -175,11 +196,10 @@ namespace pqxx_conn
         {
             handlePqxxError("The attribute [" + full_attr_name + "] data event was not saved.",
                 ex.base().what(),
-                _query_builder.storeDataEventQuery<T>(traits),
+                _query_builder.storeDataEventStatement<T>(traits),
                 LOCATION_INFO);
         }
     }
-
 } // namespace pqxx_conn
 } // namespace hdbpp_internal
 #endif // _PSQL_CONNECTION_TPP
diff --git a/src/HdbppTimescaleDb.cpp b/src/HdbppTimescaleDb.cpp
index 164a99b041eecc698fd7b77204ff04dded38e277..99c7f3bdbdb7875fe2918d40e8b756f4e53bbcc3 100644
--- a/src/HdbppTimescaleDb.cpp
+++ b/src/HdbppTimescaleDb.cpp
@@ -104,10 +104,20 @@ HdbppTimescaleDb::HdbppTimescaleDb(const vector<string> &configuration)
     auto level = param_to_lower(HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "logging_level", false));
     auto log_file = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_file", false);
     auto log_console = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_console", false);
+    auto log_syslog = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_syslog", false);
     auto log_file_name = HdbppTimescaleDbUtils::getConfigParam(libhdb_conf, "log_file_name", false);
 
-    LogConfigurator::initLogging(
-        param_to_lower(log_file) == "true", param_to_lower(log_console) == "true", log_file_name);
+    // init the base logging system
+    LogConfigurator::initLogging();
+
+    if (param_to_lower(log_file) == "true")
+        LogConfigurator::initFileLogging(log_file_name);
+
+    if (param_to_lower(log_console) == "true")
+        LogConfigurator::initConsoleLogging();
+
+    if (param_to_lower(log_syslog) == "true")
+        LogConfigurator::initSyslogLogging();
 
     if (level == "error" || level.empty())
         LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err);
@@ -125,7 +135,9 @@ HdbppTimescaleDb::HdbppTimescaleDb(const vector<string> &configuration)
         LogConfigurator::setLoggingLevel(spdlog::level::level_enum::err);
 
     spdlog::info("Logging level: {}", level);
-    spdlog::info("Logging to file: {}, logging to console: {}", log_file, log_console);
+    spdlog::info("Logging to console: {}", log_console);
+    spdlog::info("Logging to syslog: {}", log_syslog);
+    spdlog::info("Logging to file: {}", log_file);
     spdlog::info("Logfile (if any): {}", log_file_name);
 
     spdlog::info("Starting libhdbpp-timescale shared library...");
@@ -135,7 +147,7 @@ HdbppTimescaleDb::HdbppTimescaleDb(const vector<string> &configuration)
     spdlog::info("Mandatory config parameter connect_string: {}", connection_string);
 
     // allocate a connection to store data with
-    Conn = make_unique<pqxx_conn::DbConnection>();
+    Conn = make_unique<pqxx_conn::DbConnection>(pqxx_conn::DbConnection::DbStoreMethod::PreparedStatement);
 
     // now bring up the connection
     Conn->connect(connection_string);
@@ -271,5 +283,5 @@ AbstractDB *HdbppTimescaleDbFactory::create_db(vector<string> configuration)
 DBFactory *getDBFactory()
 {
     auto *factory = new hdbpp::HdbppTimescaleDbFactory();
-    return static_cast<DBFactory*>(factory);
+    return static_cast<DBFactory *>(factory);
 }
diff --git a/src/HdbppTxBase.hpp b/src/HdbppTxBase.hpp
index 57e2e29bd1491cbb9fa8681cdcc1c82711268cde..3716390a0837a831c609db79be3ccddd23681abb 100644
--- a/src/HdbppTxBase.hpp
+++ b/src/HdbppTxBase.hpp
@@ -43,7 +43,6 @@ class HdbppTxBase
 {
 public:
     HdbppTxBase(Conn &conn) : _conn(conn) {}
-    virtual ~HdbppTxBase() {}
 
     // simple feedback that the transaction was successfull. Most
     // errors are handled with exceptions which are thrown to the
diff --git a/src/HdbppTxDataEvent.hpp b/src/HdbppTxDataEvent.hpp
index 10c25b369f547a18b40aac91f87ae3acd7a3d440..47f352ea2c20fde3bbf0fa4c6e253fb6e77d11d1 100644
--- a/src/HdbppTxDataEvent.hpp
+++ b/src/HdbppTxDataEvent.hpp
@@ -37,7 +37,6 @@ private:
 
 public:
     HdbppTxDataEvent(Conn &conn) : HdbppTxDataEventBase<Conn, HdbppTxDataEvent>(conn) {}
-    virtual ~HdbppTxDataEvent() {}
 
     HdbppTxDataEvent<Conn> &withAttribute(Tango::DeviceAttribute *dev_attr)
     {
@@ -82,7 +81,7 @@ HdbppTxDataEvent<Conn> &HdbppTxDataEvent<Conn>::store()
         spdlog::error("Error: {}", msg);
         Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO);
     }
-    else if (!_dev_attr)
+    else if (_dev_attr == nullptr)
     {
         std::string msg {"Device Attribute is not set. Unable to complete the transaction."};
         msg += ". For attribute" + Base::attributeName().fqdnAttributeName();
diff --git a/src/HdbppTxDataEventBase.hpp b/src/HdbppTxDataEventBase.hpp
index 73fee32de0cadf31cdde475ac220a7dfef95aaff..8d0fac95574a25370e5b205c6ca8e5adaf559621 100644
--- a/src/HdbppTxDataEventBase.hpp
+++ b/src/HdbppTxDataEventBase.hpp
@@ -41,7 +41,6 @@ public:
     // TODO auto add new attribute feature
 
     HdbppTxDataEventBase(Conn &conn) : HdbppTxBase<Conn>(conn) {}
-    virtual ~HdbppTxDataEventBase() {}
 
     Derived<Conn> &withName(const std::string &fqdn_attr_name)
     {
@@ -75,7 +74,7 @@ public:
     }
 
     /// @brief Print the HdbppTxDataEventBase object to the stream
-    virtual void print(std::ostream &os) const noexcept override;
+    void print(std::ostream &os) const noexcept override;
 
 protected:
     // release the private data safely for the derived classes
diff --git a/src/HdbppTxDataEventError.hpp b/src/HdbppTxDataEventError.hpp
index b4274d99a8c01282de39177f28ec9a9e572f79a1..69820f1668cd723a60eda4a036acb37338030570 100644
--- a/src/HdbppTxDataEventError.hpp
+++ b/src/HdbppTxDataEventError.hpp
@@ -36,7 +36,6 @@ private:
 
 public:
     HdbppTxDataEventError(Conn &conn) : HdbppTxDataEventBase<Conn, HdbppTxDataEventError>(conn) {}
-    virtual ~HdbppTxDataEventError() {}
 
     HdbppTxDataEventError<Conn> &withError(const std::string &error_msg)
     {
diff --git a/src/HdbppTxHistoryEvent.hpp b/src/HdbppTxHistoryEvent.hpp
index abd6f8f1ece7ef51736c2a994327ca2993a8614a..95041f19e79717b3c7b4377958ab44ef64c90f88 100644
--- a/src/HdbppTxHistoryEvent.hpp
+++ b/src/HdbppTxHistoryEvent.hpp
@@ -38,7 +38,6 @@ public:
     // TODO make crash event configurable
 
     HdbppTxHistoryEvent(Conn &conn) : HdbppTxBase<Conn>(conn) {}
-    virtual ~HdbppTxHistoryEvent() {}
 
     HdbppTxHistoryEvent<Conn> &withName(const std::string &fqdn_attr_name)
     {
diff --git a/src/HdbppTxNewAttribute.hpp b/src/HdbppTxNewAttribute.hpp
index 063bf643ea99fe11d27d7cf494ecc19063d70790..5a01203db87ac0e0721a9747c0c6e7de208ecb8d 100644
--- a/src/HdbppTxNewAttribute.hpp
+++ b/src/HdbppTxNewAttribute.hpp
@@ -36,7 +36,6 @@ class HdbppTxNewAttribute : public HdbppTxBase<Conn>
 {
 public:
     HdbppTxNewAttribute(Conn &conn) : HdbppTxBase<Conn>(conn) {}
-    virtual ~HdbppTxNewAttribute() {}
 
     HdbppTxNewAttribute<Conn> &withName(const std::string &fqdn_attr_name)
     {
@@ -75,7 +74,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
     }
     else if (_traits.isInvalid())
     {
-        std::string msg {"AttributeTraits are invalid. Unable to complete the transaction. For attribute" +
+        std::string msg {"AttributeTraits are invalid. Unable to complete the transaction. For attribute: " +
             _attr_name.fqdnAttributeName()};
 
         spdlog::error("Error: {}", msg);
@@ -83,7 +82,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
     }
     else if (HdbppTxBase<Conn>::connection().isClosed())
     {
-        std::string msg {"The connection is reporting it is closed. Unable to store new attribute. For attribute" +
+        std::string msg {"The connection is reporting it is closed. Unable to store new attribute. For attribute: " +
             _attr_name.fqdnAttributeName()};
 
         spdlog::error("Error: {}", msg);
@@ -94,7 +93,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
     if (_traits.isImage())
     {
         std::string msg {
-            "Image type attributes are currently not supported. For attribute" + _attr_name.fqdnAttributeName()};
+            "Image type attributes are currently not supported. For attribute: " + _attr_name.fqdnAttributeName()};
 
         spdlog::error("Error: {}", msg);
         Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO);
@@ -103,8 +102,8 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
     // unsupported types
     if (_traits.type() == Tango::DEV_ENUM || _traits.type() == Tango::DEV_ENCODED)
     {
-        std::string msg {"Unsupported attribute type: " + tangoEnumToString(_traits.type()) + ". For attribute" +
-            _attr_name.fqdnAttributeName()};
+        std::string msg {"Unsupported attribute type: " + tangoEnumToString(_traits.type()) +
+            ". For attribute: " + _attr_name.fqdnAttributeName()};
 
         spdlog::error("Error: {}", msg);
         Tango::Except::throw_exception("Invalid Argument", msg, LOCATION_INFO);
@@ -123,7 +122,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
         {
             // oops, someone is trying to change types, this is not supported yet, throw an exception
             std::string msg {
-                "Attempt to add an attribute which is already stored with different type information. For attribute" +
+                "Attempt to add an attribute which is already stored with different type information. For attribute: " +
                 _attr_name.fqdnAttributeName()};
 
             spdlog::error("Error: {}", msg);
@@ -151,7 +150,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
         {
             // someone is trying to add the same attribute over and over?
             std::string msg {"The attribute already exists in the database. Can not add again. "};
-            spdlog::warn("Warning: {} For attribute {}", msg, _attr_name.fqdnAttributeName());
+            spdlog::warn("Warning: {} For attribute: {}", msg, _attr_name.fqdnAttributeName());
 
             // bad black box behaviour, this is not an error, in fact, the system
             // built top assume this undocumented behaviour!!
@@ -159,7 +158,7 @@ HdbppTxNewAttribute<Conn> &HdbppTxNewAttribute<Conn>::store()
     }
     else
     {
-        spdlog::info("Adding a new attribute to the system: {}", _attr_name.fqdnAttributeName());
+        spdlog::debug("Adding a new attribute to the system: {}", _attr_name.fqdnAttributeName());
 
         // attempt to store the new attribute into the database for the first time
         HdbppTxBase<Conn>::connection().storeAttribute(prepared_attr_name,
diff --git a/src/HdbppTxParameterEvent.hpp b/src/HdbppTxParameterEvent.hpp
index cd1d30692dbcc7cd6f98b1fbdb77ef42e5fa622a..709e3998851a7c3ab0abcddca3cca18daf771c65 100644
--- a/src/HdbppTxParameterEvent.hpp
+++ b/src/HdbppTxParameterEvent.hpp
@@ -37,7 +37,6 @@ class HdbppTxParameterEvent : public HdbppTxBase<Conn>
 {
 public:
     HdbppTxParameterEvent(Conn &conn) : HdbppTxBase<Conn>(conn) {}
-    virtual ~HdbppTxParameterEvent() {}
 
     HdbppTxParameterEvent<Conn> &withName(const std::string &fqdn_attr_name)
     {
diff --git a/src/LibUtils.cpp b/src/LibUtils.cpp
index 9be85ba0a6d3dcbb4acaf45d74f45c1357844226..84e1e98177bdd029598fd50a7ad11a7891e6ecf5 100644
--- a/src/LibUtils.cpp
+++ b/src/LibUtils.cpp
@@ -20,6 +20,7 @@
 #include "LibUtils.hpp"
 
 #include "spdlog/async.h"
+#include "spdlog/sinks/dist_sink.h"
 #include "spdlog/sinks/null_sink.h"
 #include "spdlog/sinks/rotating_file_sink.h"
 #include "spdlog/sinks/stdout_color_sinks.h"
@@ -134,47 +135,69 @@ ostream &operator<<(ostream &os, Tango::AttrQuality quality)
 
 //=============================================================================
 //=============================================================================
-void LogConfigurator::initLogging(bool enable_file, bool enable_console, const string &log_file_name)
+void LogConfigurator::initLogging()
 {
-    try
-    {
-        spdlog::init_thread_pool(8192, 1);
-
-        vector<spdlog::sink_ptr> sinks;
-
-        // attempt to create a rotating log files of size 10MB and 3 rotations
-        if (enable_file && !log_file_name.empty())
-            sinks.push_back(make_shared<spdlog::sinks::rotating_file_sink_mt>(log_file_name, 1024 * 1024 * 10, 3));
-
-        if (enable_console)
-            sinks.push_back(make_shared<spdlog::sinks::stdout_color_sink_mt>());
+    auto logger = spdlog::get(logging_utils::LibLoggerName);
 
-        if (sinks.empty())
-            sinks.push_back(make_shared<spdlog::sinks::null_sink_mt>());
-
-        auto logger = make_shared<spdlog::async_logger>(LibLoggerName,
-            sinks.begin(),
-            sinks.end(),
-            spdlog::thread_pool(),
-            spdlog::async_overflow_policy::overrun_oldest);
-
-        // set the logger as the default so it can be accessed all over the library
-        spdlog::register_logger(logger);
-        spdlog::flush_every(std::chrono::seconds(1));
-        spdlog::flush_on(spdlog::level::warn);
-        spdlog::set_default_logger(logger);
-
-        spdlog::debug("Initialised the logging system...");
+    if (!logger)
+    {
+        try
+        {
+            spdlog::init_thread_pool(8192, 1);
+
+            auto dist_sink = make_shared<spdlog::sinks::dist_sink_mt>();
+
+            auto logger = make_shared<spdlog::async_logger>(logging_utils::LibLoggerName,
+                dist_sink,
+                spdlog::thread_pool(),
+                spdlog::async_overflow_policy::overrun_oldest);
+
+            // set the logger as the default so it can be accessed all over the library
+            spdlog::register_logger(logger);
+            spdlog::flush_every(std::chrono::seconds(1));
+            spdlog::flush_on(spdlog::level::warn);
+            spdlog::set_default_logger(logger);
+        }
+        catch (const spdlog::spdlog_ex &ex)
+        {
+            string msg {"Failed to initialise the logging system, caught error: " + string(ex.what())};
+            cout << msg << endl;
+            Tango::Except::throw_exception("Runtime Error", msg, LOCATION_INFO);
+        }
+    }
+}
 
-        if (enable_file && !log_file_name.empty())
-            spdlog::debug("File logging enabled. Log file at: {}", log_file_name);
+//=============================================================================
+//=============================================================================
+void LogConfigurator::initSyslogLogging()
+{
+    try
+    {
+        auto logger = spdlog::get(logging_utils::LibLoggerName);
+        auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks();
+        sinks_tmp.push_back(make_shared<spdlog::sinks::syslog_sink_mt>(logging_utils::SyslogIdent, 0, LOG_USER, false));
+    }
+    catch (const spdlog::spdlog_ex &ex)
+    {
+        string msg {"Failed to initialise the syslog logging system, caught error: " + string(ex.what())};
+        cout << msg << endl;
+        Tango::Except::throw_exception("Runtime Error", msg, LOCATION_INFO);
+    }
+}
 
-        if (enable_console)
-            spdlog::debug("Console logging enabled.");
+//=============================================================================
+//=============================================================================
+void LogConfigurator::initConsoleLogging()
+{
+    try
+    {
+        auto logger = spdlog::get(logging_utils::LibLoggerName);
+        auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks();
+        sinks_tmp.push_back(make_shared<spdlog::sinks::stdout_color_sink_mt>());
     }
     catch (const spdlog::spdlog_ex &ex)
     {
-        string msg {"Failed to initialise the logging system, caught error: " + string(ex.what())};
+        string msg {"Failed to initialise the console logging system, caught error: " + string(ex.what())};
         cout << msg << endl;
         Tango::Except::throw_exception("Runtime Error", msg, LOCATION_INFO);
     }
@@ -182,17 +205,27 @@ void LogConfigurator::initLogging(bool enable_file, bool enable_console, const s
 
 //=============================================================================
 //=============================================================================
-void LogConfigurator::initLoggingMetrics(bool enable_file, bool enable_console, const string &log_file_name)
+void LogConfigurator::initFileLogging(const std::string &log_file_name)
 {
-    auto logger = spdlog::get(LibLoggerName);
-    if (!logger) initLogging(enable_file, enable_console, log_file_name);
+    try
+    {
+        auto logger = spdlog::get(logging_utils::LibLoggerName);
+        auto &sinks_tmp = dynamic_pointer_cast<spdlog::sinks::dist_sink_mt>(*(logger->sinks().begin()))->sinks();
+        sinks_tmp.push_back(make_shared<spdlog::sinks::rotating_file_sink_mt>(log_file_name, 1024 * 1024 * 10, 3));
+    }
+    catch (const spdlog::spdlog_ex &ex)
+    {
+        string msg {"Failed to initialise the file logging system, caught error: " + string(ex.what())};
+        cout << msg << endl;
+        Tango::Except::throw_exception("Runtime Error", msg, LOCATION_INFO);
+    }
 }
 
 //=============================================================================
 //=============================================================================
 void LogConfigurator::shutdownLogging()
 {
-    auto logger = spdlog::get(LibLoggerName);
+    auto logger = spdlog::get(logging_utils::LibLoggerName);
 
     if (!logger)
     {
diff --git a/src/LibUtils.hpp b/src/LibUtils.hpp
index 266c7e0c7e11d1ad5248f04a526ddd59ad9cffc1..8ec200f4a64bfd76bb0762bbfeba738cf839caca 100644
--- a/src/LibUtils.hpp
+++ b/src/LibUtils.hpp
@@ -58,40 +58,50 @@ std::ostream &operator<<(std::ostream &os, Tango::AttrDataFormat format);
 std::ostream &operator<<(std::ostream &os, Tango::CmdArgType type);
 std::ostream &operator<<(std::ostream &os, Tango::AttrQuality quality);
 
-// SPDLOG config and setup
-const std::string LibLoggerName = "hdbpp";
-
 struct LogConfigurator
 {
-    static void initLogging(bool enable_file, bool enable_console, const std::string &log_file_name = "");
-
-    // this version is used for metrics testing, and ignores the call if the
-    // logger already exists
-    static void initLoggingMetrics(bool enable_file, bool enable_console, const std::string &log_file_name = "");
+    static void initLogging();
+    static void initSyslogLogging();
+    static void initConsoleLogging();
+    static void initFileLogging(const std::string &log_file_name);
 
     static void shutdownLogging();
     static void setLoggingLevel(spdlog::level::level_enum level);
 };
 
-// get the file name from the __FILE__ variable for error messages
-constexpr auto* getFileName(const char* const path)
+namespace logging_utils
 {
-    const auto* start_position = path;
-
-    for (const auto* current_character = path; *current_character != '\0'; ++current_character)
-        if (*current_character == '\\' || *current_character == '/')
-            start_position = current_character;
-
-    if (start_position != path)
-        ++start_position;
-
-    return start_position;
-}
+    // SPDLOG config and setup
+    const std::string LibLoggerName = "hdbpp";
+    const std::string SyslogIdent = "hdbpp-timescale";
+
+    // get the file name from the __FILE__ variable for error messages
+    constexpr auto *getFileName(const char *const path)
+    {
+        // We silence clang warnings for this funciton, this is a quick and simple
+        // way to produce the file name, and yes we use pointer arithmetic, but
+        // the alternatives can be messy or overworked.
+        const auto *start_position = path;
+
+        // NOLINTNEXTLINE
+        for (const auto *current_character = path; *current_character != '\0'; ++current_character)
+            // NOLINTNEXTLINE
+            if (*current_character == '\\' || *current_character == '/')
+                start_position = current_character;
+
+        if (start_position != path)
+            // NOLINTNEXTLINE
+            ++start_position;
+
+        return start_position;
+    }
+} // namespace logging_utils
 
 // Macros to get the location for reporting errors
 #define S1(x) #x
 #define S2(x) S1(x)
-#define LOCATION_INFO string(getFileName(__FILE__)) + ":" + string(__func__) + ":" S2(__LINE__)
+
+#define LOCATION_INFO std::string(logging_utils::getFileName(__FILE__)) + ":" + std::string(__func__) + ":" S2(__LINE__)
 
 }; // namespace hdbpp_internal
 #endif // _LIBUTILS_H
diff --git a/src/PqxxExtension.hpp b/src/PqxxExtension.hpp
index c64de39d04af85a21eb6ce8ad6345fed98c48a48..fbaeaf3de184063e5a5207055c9af5121b6e09c0 100644
--- a/src/PqxxExtension.hpp
+++ b/src/PqxxExtension.hpp
@@ -176,8 +176,9 @@ public:
     }
 };
 
-// This specialisation is for string types to ensure the string is quoted
-// for storage
+// This specialisation is for string types. Unlike other types the string type requires
+// the use of the ARRAY notation and dollar quoting to ensure the strings are stored
+// without escape characters.
 template<>
 struct string_traits<std::vector<std::string>>
 {
@@ -197,29 +198,35 @@ public:
 
         value.clear();
 
-        // TODO this extraction is not yet complete. Required to complete unit tests
+        std::pair<array_parser::juncture, std::string> output;
 
-        // not the best solution right now, but we are using this for testing only
-        // currently. Copy the str into a std::string so we can work with it more easily.
-        std::string in(str + 1, str + (strlen(str) - 1));
-        std::string sep {"','"};
-        size_t last_position = 1;
-        size_t position = in.find(sep, 0);
-
-        // as mention above, this could probably be sped up, i.e. preallocate
-        // the vector etc, but for now we just need it for testing, so improvements
-        // are left till later
-        while (position != std::string::npos)
+        // use pqxx array parser features to get each element from the array
+        array_parser parser(str);
+        output = parser.get_next();
+
+        if (output.first == array_parser::juncture::row_start)
         {
-            value.push_back(in.substr(last_position, position - last_position));
-            last_position = position + 3;
-            position = in.find(sep, position + 1);
+            output = parser.get_next();
+
+            // loop and extract each string in turn
+            while (output.first == array_parser::juncture::string_value)
+            {
+                value.push_back(output.second);
+                output = parser.get_next();
+
+                if (output.first == array_parser::juncture::row_end)
+                    break;
+
+                if (output.first == array_parser::juncture::done)
+                    break;
+            }
         }
     }
 
     static std::string to_string(const std::vector<std::string> &value)
     {
-        // simply use the pqxx utilities for this, rather than reinvent the wheel
+        // This function should not be used, so we do a simple basic conversion
+        // for testing only
         return "{" + separated_list(",", value.begin(), value.end()) + "}";
     }
 };
diff --git a/src/QueryBuilder.cpp b/src/QueryBuilder.cpp
index eac7987d3527fa511ee17bfffe7e4019aa6e6b4a..cbffde264407e3d291b0cf8adc0592f8c78f9e81 100644
--- a/src/QueryBuilder.cpp
+++ b/src/QueryBuilder.cpp
@@ -130,36 +130,36 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeAttributeQuery()
+    const string &QueryBuilder::storeAttributeStatement()
     {
         // clang-format off
         static string query =
-            "INSERT INTO " + CONF_TABLE_NAME + " (" +
-                CONF_COL_NAME + "," +
-                CONF_COL_TYPE_ID + "," +
-                CONF_COL_FORMAT_TYPE_ID + "," +
-                CONF_COL_WRITE_TYPE_ID + "," +
-                CONF_COL_TABLE_NAME + "," +
-                CONF_COL_CS_NAME + "," + 
-                CONF_COL_DOMAIN + "," +
-                CONF_COL_FAMILY + "," +
-                CONF_COL_MEMBER + "," +
-                CONF_COL_LAST_NAME + "," + 
-                CONF_COL_HIDE + ") (" +
+            "INSERT INTO " + schema::ConfTableName + " (" +
+                schema::ConfColName + "," +
+                schema::ConfColTypeId + "," +
+                schema::ConfColFormatTypeId + "," +
+                schema::ConfColWriteTypeId + "," +
+                schema::ConfColTableName + "," +
+                schema::ConfColCsName + "," + 
+                schema::ConfColDomain + "," +
+                schema::ConfColFamily + "," +
+                schema::ConfColMember + "," +
+                schema::ConfColLastName + "," + 
+                schema::ConfColHide + ") (" +
                 "SELECT " + 
                     "$1," + 
-                    CONF_TYPE_COL_TYPE_ID + "," + 
-                    CONF_FORMAT_COL_FORMAT_ID + "," + 
-                    CONF_WRITE_COL_WRITE_ID + 
+                    schema::ConfTypeColTypeId + "," + 
+                    schema::ConfFormatColFormatId + "," + 
+                    schema::ConfWriteColWriteId + 
                     ",$2,$3,$4,$5,$6,$7,$8 " +
                 "FROM " + 
-                    CONF_TYPE_TABLE_NAME + ", " +
-                    CONF_FORMAT_TABLE_NAME + ", " +
-                    CONF_WRITE_TABLE_NAME + " " +
-                "WHERE " + CONF_TYPE_TABLE_NAME + "." + CONF_TYPE_COL_TYPE_NUM + " = $9 " + 
-                "AND " + CONF_FORMAT_TABLE_NAME + "." + CONF_FORMAT_COL_FORMAT_NUM + " = $10 " + 
-                "AND " + CONF_WRITE_TABLE_NAME + "." + CONF_WRITE_COL_WRITE_NUM + " = $11) " +
-                "RETURNING " + CONF_COL_ID;
+                    schema::ConfTypeTableName + ", " +
+                    schema::ConfFormatTableName + ", " +
+                    schema::ConfWriteTableName + " " +
+                "WHERE " + schema::ConfTypeTableName + "." + schema::ConfTypeColTypeNum + " = $9 " + 
+                "AND " + schema::ConfFormatTableName + "." + schema::ConfFormatColFormatNum + " = $10 " + 
+                "AND " + schema::ConfWriteTableName + "." + schema::ConfWriteColWriteNum + " = $11) " +
+                "RETURNING " + schema::ConfColId;
         // clang-format on
 
         return query;
@@ -167,13 +167,13 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeHistoryStringQuery()
+    const string &QueryBuilder::storeHistoryStringStatement()
     {
         // clang-format off
         static string query = 
-            "INSERT INTO " + HISTORY_EVENT_TABLE_NAME + " (" +
-                HISTORY_EVENT_COL_EVENT + 
-                ") VALUES ($1) RETURNING " + HISTORY_EVENT_COL_EVENT_ID;
+            "INSERT INTO " + schema::HistoryEventTableName + " (" +
+                schema::HistoryEventColEvent + 
+                ") VALUES ($1) RETURNING " + schema::HistoryEventColEventId;
         // clang-format on
 
         return query;
@@ -181,18 +181,18 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeHistoryEventQuery()
+    const string &QueryBuilder::storeHistoryEventStatement()
     {
         // clang-format off
         static string query =
-            "INSERT INTO " + HISTORY_TABLE_NAME + " (" + 
-                HISTORY_COL_ID + "," +
-                HISTORY_COL_EVENT_ID + "," +
-                HISTORY_COL_TIME + ") " +
+            "INSERT INTO " + schema::HistoryTableName + " (" + 
+                schema::HistoryColId + "," +
+                schema::HistoryColEventId + "," +
+                schema::HistoryColTime + ") " +
                 "SELECT " +
-                    "$1," + HISTORY_EVENT_COL_EVENT_ID + ",CURRENT_TIMESTAMP(6)" +
-                " FROM " + HISTORY_EVENT_TABLE_NAME +
-                " WHERE " + HISTORY_EVENT_COL_EVENT + " = $2";
+                    "$1," + schema::HistoryEventColEventId + ",CURRENT_TIMESTAMP(6)" +
+                " FROM " + schema::HistoryEventTableName +
+                " WHERE " + schema::HistoryEventColEvent + " = $2";
         // clang-format on
 
         return query;
@@ -200,23 +200,23 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeParameterEventQuery()
+    const string &QueryBuilder::storeParameterEventStatement()
     {
         // clang-format off
         static string query =
             "INSERT INTO " +
-            PARAM_TABLE_NAME + " (" +
-            PARAM_COL_ID + "," +
-            PARAM_COL_EV_TIME + "," +
-            PARAM_COL_LABEL + "," +
-            PARAM_COL_UNIT + "," +
-            PARAM_COL_STANDARDUNIT + "," +
-            PARAM_COL_DISPLAYUNIT + "," +
-            PARAM_COL_FORMAT + "," +
-            PARAM_COL_ARCHIVERELCHANGE + "," +
-            PARAM_COL_ARCHIVEABSCHANGE + "," +
-            PARAM_COL_ARCHIVEPERIOD + "," +
-            PARAM_COL_DESCRIPTION + ") " +
+            schema::ParamTableName + " (" +
+            schema::ParamColId + "," +
+            schema::ParamColEvTime + "," +
+            schema::ParamColLabel + "," +
+            schema::ParamColUnit + "," +
+            schema::ParamColStandardUnit + "," +
+            schema::ParamColDisplayUnit + "," +
+            schema::ParamColFormat + "," +
+            schema::ParamColArchiveRelChange + "," +
+            schema::ParamColArchiveAbsChange + "," +
+            schema::ParamColArchivePeriod + "," +
+            schema::ParamColDescription + ") " +
             "VALUES ($1, TO_TIMESTAMP($2), $3, $4, $5, $6, $7, $8, $9, $10, $11)";
         // clang-format on
 
@@ -225,7 +225,7 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeDataEventErrorQuery(const AttributeTraits &traits)
+    const string &QueryBuilder::storeDataEventErrorStatement(const AttributeTraits &traits)
     {
         // search the cache for a previous entry
         auto result = _data_event_error_queries.find(traits);
@@ -233,10 +233,12 @@ namespace pqxx_conn
         if (result == _data_event_error_queries.end())
         {
             auto param_number = 0;
-            auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + DAT_COL_ID + "," + DAT_COL_DATA_TIME;
+
+            auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + schema::DatColId + "," +
+                schema::DatColDataTime;
 
             // split to ensure increments are in the correct order
-            query = query + "," + DAT_COL_QUALITY + "," + DAT_COL_ERROR_DESC_ID + ") VALUES ($" +
+            query = query + "," + schema::DatColQuality + "," + schema::DatColErrorDescId + ") VALUES ($" +
                 to_string(++param_number);
 
             query = query + ",TO_TIMESTAMP($" + to_string(++param_number) + ")";
@@ -247,8 +249,8 @@ namespace pqxx_conn
             // cache the query string against the traits
             _data_event_error_queries.emplace(traits, query);
 
-            _logger->debug("Built new data event error query and cached it against traits: {}", traits);
-            _logger->debug("New data event error query is: {}", query);
+            spdlog::debug("Built new data event error query and cached it against traits: {}", traits);
+            spdlog::debug("New data event error query is: {}", query);
 
             // now return it (must dereference the map again to get the static version)
             return _data_event_error_queries[traits];
@@ -260,12 +262,12 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::storeErrorQuery()
+    const string &QueryBuilder::storeErrorStatement()
     {
         // clang-format off
         static string query = 
-            "INSERT INTO " + ERR_TABLE_NAME + " (" +
-                ERR_COL_ERROR_DESC + ") VALUES ($1) RETURNING " + ERR_COL_ID;
+            "INSERT INTO " + schema::ErrTableName + " (" +
+                schema::ErrColErrorDesc + ") VALUES ($1) RETURNING " + schema::ErrColId;
         // clang-format on
 
         return query;
@@ -273,7 +275,7 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string QueryBuilder::fetchAllValuesQuery(
+    const string QueryBuilder::fetchAllValuesStatement(
         const string &column_name, const string &table_name, const string &reference)
     {
         return "SELECT " + column_name + ", " + reference + " " + "FROM " + table_name;
@@ -281,7 +283,7 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string QueryBuilder::fetchValueQuery(
+    const string QueryBuilder::fetchValueStatement(
         const string &column_name, const string &table_name, const string &reference)
     {
         return "SELECT " + column_name + " " + "FROM " + table_name + " WHERE " + reference + "=$1";
@@ -289,17 +291,17 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const string &QueryBuilder::fetchLastHistoryEventQuery()
+    const string &QueryBuilder::fetchLastHistoryEventStatement()
     {
         // clang-format off
         static string query = 
-            "SELECT " + HISTORY_EVENT_COL_EVENT +
-                " FROM " + HISTORY_TABLE_NAME +
-                " JOIN " + HISTORY_EVENT_TABLE_NAME +
-                " ON " + HISTORY_EVENT_TABLE_NAME + "." + 
-                    HISTORY_EVENT_COL_EVENT_ID + "=" + HISTORY_TABLE_NAME + "." + HISTORY_COL_EVENT_ID +
-                " WHERE " + HISTORY_COL_ID + " =$1" +
-                " ORDER BY " + HISTORY_COL_TIME + " DESC LIMIT 1";
+            "SELECT " + schema::HistoryEventColEvent +
+                " FROM " + schema::HistoryTableName +
+                " JOIN " + schema::HistoryEventTableName +
+                " ON " + schema::HistoryEventTableName + "." + 
+                    schema::HistoryEventColEventId + "=" + schema::HistoryTableName + "." + schema::HistoryColEventId +
+                " WHERE " + schema::HistoryColId + " =$1" +
+                " ORDER BY " + schema::HistoryColTime + " DESC LIMIT 1";
         // clang-format on
 
         return query;
@@ -307,29 +309,29 @@ namespace pqxx_conn
 
     //=============================================================================
     //=============================================================================
-    const std::string &QueryBuilder::fetchAttributeTraitsQuery()
+    const std::string &QueryBuilder::fetchAttributeTraitsStatement()
     {
         // clang-format off
         static string query = 
             "SELECT " + 
-                CONF_TYPE_COL_TYPE_NUM + "," +
-                CONF_FORMAT_COL_FORMAT_NUM + "," +
-                CONF_WRITE_COL_WRITE_NUM + " " +
+                schema::ConfTypeColTypeNum + "," +
+                schema::ConfFormatColFormatNum + "," +
+                schema::ConfWriteColWriteNum + " " +
             "FROM " +
-	            CONF_TYPE_TABLE_NAME + " t," +
-                CONF_FORMAT_TABLE_NAME + " f," + 
-                CONF_WRITE_TABLE_NAME + " w, " + 
+	            schema::ConfTypeTableName + " t," +
+                schema::ConfFormatTableName + " f," + 
+                schema::ConfWriteTableName + " w, " + 
                 "(SELECT " + 
-                    CONF_COL_TYPE_ID + "," + 
-                    CONF_COL_FORMAT_TYPE_ID + "," + 
-                    CONF_COL_WRITE_TYPE_ID + " " + 
-                "FROM " + CONF_TABLE_NAME + " WHERE " + CONF_COL_NAME + "=$1) AS tmp " + 
+                    schema::ConfColTypeId + "," + 
+                    schema::ConfColFormatTypeId + "," + 
+                    schema::ConfColWriteTypeId + " " + 
+                "FROM " + schema::ConfTableName + " WHERE " + schema::ConfColName + "=$1) AS tmp " + 
             "WHERE " +
-	            "t." + CONF_COL_TYPE_ID + "=tmp." + CONF_COL_TYPE_ID + " " +
+	            "t." + schema::ConfColTypeId + "=tmp." + schema::ConfColTypeId + " " +
             "AND " +  
-            	"f." + CONF_COL_FORMAT_TYPE_ID + "=tmp." + CONF_COL_FORMAT_TYPE_ID + " " +
+            	"f." + schema::ConfColFormatTypeId + "=tmp." + schema::ConfColFormatTypeId + " " +
             "AND " + 
-	            "w." + CONF_COL_WRITE_TYPE_ID  + "=tmp." + CONF_COL_WRITE_TYPE_ID;
+	            "w." + schema::ConfColWriteTypeId  + "=tmp." + schema::ConfColWriteTypeId;
         // clang-format on
 
         return query;
@@ -339,13 +341,13 @@ namespace pqxx_conn
     //=============================================================================
     string QueryBuilder::tableName(const AttributeTraits &traits)
     {
-        return SCHEMA_TABLE_PREFIX +
+        return schema::SchemaTablePrefix +
             [&traits]() {
                 switch (traits.formatType())
                 {
-                    case Tango::SCALAR: return TYPE_SCALAR;
-                    case Tango::SPECTRUM: return TYPE_ARRAY;
-                    case Tango::IMAGE: return TYPE_IMAGE;
+                    case Tango::SCALAR: return schema::TypeScalar;
+                    case Tango::SPECTRUM: return schema::TypeArray;
+                    case Tango::IMAGE: return schema::TypeImage;
                 }
 
                 return string("Unknown");
@@ -353,20 +355,20 @@ namespace pqxx_conn
             "_" + [&traits]() {
                 switch (traits.type())
                 {
-                    case Tango::DEV_DOUBLE: return TYPE_DEV_DOUBLE;
-                    case Tango::DEV_FLOAT: return TYPE_DEV_FLOAT;
-                    case Tango::DEV_STRING: return TYPE_DEV_STRING;
-                    case Tango::DEV_LONG: return TYPE_DEV_LONG;
-                    case Tango::DEV_ULONG: return TYPE_DEV_ULONG;
-                    case Tango::DEV_LONG64: return TYPE_DEV_LONG64;
-                    case Tango::DEV_ULONG64: return TYPE_DEV_ULONG64;
-                    case Tango::DEV_SHORT: return TYPE_DEV_SHORT;
-                    case Tango::DEV_USHORT: return TYPE_DEV_USHORT;
-                    case Tango::DEV_BOOLEAN: return TYPE_DEV_BOOLEAN;
-                    case Tango::DEV_UCHAR: return TYPE_DEV_UCHAR;
-                    case Tango::DEV_STATE: return TYPE_DEV_STATE;
-                    case Tango::DEV_ENCODED: return TYPE_DEV_ENCODED;
-                    case Tango::DEV_ENUM: return TYPE_DEV_ENUM;
+                    case Tango::DEV_DOUBLE: return schema::TypeDevDouble;
+                    case Tango::DEV_FLOAT: return schema::TypeDevFloat;
+                    case Tango::DEV_STRING: return schema::TypeDevString;
+                    case Tango::DEV_LONG: return schema::TypeDevLong;
+                    case Tango::DEV_ULONG: return schema::TypeDevUlong;
+                    case Tango::DEV_LONG64: return schema::TypeDevLong64;
+                    case Tango::DEV_ULONG64: return schema::TypeDevUlong64;
+                    case Tango::DEV_SHORT: return schema::TypeDevShort;
+                    case Tango::DEV_USHORT: return schema::TypeDevUshort;
+                    case Tango::DEV_BOOLEAN: return schema::TypeDevBoolean;
+                    case Tango::DEV_UCHAR: return schema::TypeDevUchar;
+                    case Tango::DEV_STATE: return schema::TypeDevState;
+                    case Tango::DEV_ENCODED: return schema::TypeDevEncoded;
+                    case Tango::DEV_ENUM: return schema::TypeDevEnum;
                 }
 
                 return string("Unknown");
@@ -392,7 +394,7 @@ namespace pqxx_conn
             // add to the cache for future hits
             cache.emplace(traits, new_name);
 
-            _logger->debug("New query name: {} cached against traits:", new_name, traits);
+            spdlog::debug("New query name: {} cached against traits:", new_name, traits);
             return cache[traits];
         }
 
diff --git a/src/QueryBuilder.hpp b/src/QueryBuilder.hpp
index 79cb042d34af857a38124de69cac861db0d85e1d..4555f6f9cb4faec0486188c3977e657d5b735067 100644
--- a/src/QueryBuilder.hpp
+++ b/src/QueryBuilder.hpp
@@ -22,6 +22,7 @@
 
 #include "AttributeTraits.hpp"
 #include "HdbppDefines.hpp"
+#include "PqxxExtension.hpp"
 #include "TimescaleSchema.hpp"
 #include "spdlog/spdlog.h"
 
@@ -56,9 +57,78 @@ namespace pqxx_conn
     namespace query_utils
     {
         // This function generates the postgres cast for the event data insert
-        // queries, it is specialized for all possible tango types in the source file
+        // queries, it is specialized for all possible tango types
         template<typename T>
         std::string postgresCast(bool is_array);
+
+        // Convert the given data into a string suitable for storing in the database. These calls
+        // are used to build the string version of the insert command, they are required since we
+        // need to specialise for strings (to ensure we do not store escape characters) and bools
+        // (which are in fact a bitfield internally and wont convert in the via the pqxx routines )
+        template<typename T>
+        struct DataToString
+        {
+            static std::string run(std::unique_ptr<std::vector<T>> &value, const AttributeTraits &traits)
+            {
+                if (traits.isScalar())
+                    return pqxx::to_string((*value)[0]);
+
+                return "'" + pqxx::to_string(value) + "'";
+            }
+        };
+
+        // Convert a vector<bool> to a postgres array
+        template<>
+        struct DataToString<bool>
+        {
+            static std::string run(std::unique_ptr<std::vector<bool>> &value, const AttributeTraits &traits)
+            {
+                // a vector<bool> is not actually a vector<bool>, rather its some kind of bitfield. When
+                // trying to return an element, we appear to get some kind of bitfield reference,
+                // so we return the value to a local variable to remove the reference to the bitfield and
+                // this ensure its actually a bool passed into the conversion framework
+                if (traits.isScalar())
+                {
+                    bool v = (*value)[0];
+                    return pqxx::to_string(v);
+                }
+
+                // handled by our own extensions in PqxxExtensions.hpp
+                return "'" + pqxx::to_string(value) + "'";
+            }
+        };
+
+        // This specialisation for strings uses the ARRAY syntax and dollar quoting to
+        // ensure arrays of strings are stored without escape characters
+        template<>
+        struct DataToString<std::string>
+        {
+            static std::string run(std::unique_ptr<std::vector<std::string>> &value, const AttributeTraits &traits)
+            {
+                // arrays of strings need both the ARRAY keywords and dollar escaping, this is so we
+                // do not have to rely on the postgres escape functions that double and then store
+                // escaped characters. This is a mess when extracting the array of strings.
+                if (traits.isScalar())
+                {
+                    // use dollars to ensure it saves
+                    return "$$" + pqxx::to_string((*value)[0]) + "$$";
+                }
+
+                auto iter = value->begin();
+                std::string result = "ARRAY[";
+
+                result = result + "$$" + pqxx::to_string((*iter)) + "$$";
+
+                for (++iter; iter != value->end(); ++iter)
+                {
+                    result += ",";
+                    result += "$$" + pqxx::to_string((*iter)) + "$$";
+                }
+
+                result += "]";
+                return result;
+            }
+        };
     }; // namespace query_utils
 
     // these are used as transactions names for pqxx, some are used to as prepared
@@ -81,42 +151,54 @@ namespace pqxx_conn
     class QueryBuilder
     {
     public:
-        QueryBuilder() { _logger = spdlog::get(LibLoggerName); }
+        // Static Prepared statement strings
+        // these builder functions require no caching, so can be simple static
+        // functions
+
+        static std::string tableName(const AttributeTraits &traits);
+        static const std::string &storeAttributeStatement();
+        static const std::string &storeHistoryEventStatement();
+        static const std::string &storeHistoryStringStatement();
+        static const std::string &storeParameterEventStatement();
+        static const std::string &storeErrorStatement();
+        static const std::string &fetchLastHistoryEventStatement();
+        static const std::string &fetchAttributeTraitsStatement();
+
+        static const std::string fetchValueStatement(
+            const std::string &column_name, const std::string &table_name, const std::string &reference);
+
+        static const std::string fetchAllValuesStatement(
+            const std::string &column_name, const std::string &table_name, const std::string &reference);
 
-        // Non-static methods
+        // Non-static prepared statements
+        // these builder functions cache the built queries, therefore they
+        // are not static like the others sincethey require data storage
 
-        // these builder functions cache the built query names, therefore they
-        // are not static like the others
         const std::string &storeDataEventName(const AttributeTraits &traits);
         const std::string &storeDataEventErrorName(const AttributeTraits &traits);
 
-        // like the query name functions, these cache data internally to speed up the
-        // process of putting data int the db
+        // Builds a prepared statement for the given traits, the statement is cached
+        // internally to improve execution time
         template<typename T>
-        const std::string &storeDataEventQuery(const AttributeTraits &traits);
-
-        const std::string &storeDataEventErrorQuery(const AttributeTraits &traits);
-        std::string tableName(const AttributeTraits &traits);
+        const std::string &storeDataEventStatement(const AttributeTraits &traits);
 
-        void print(std::ostream &os) const noexcept;
-
-        // Static methods
-
-        static const std::string &storeAttributeQuery();
-        static const std::string &storeHistoryEventQuery();
-        static const std::string &storeHistoryStringQuery();
-        static const std::string &storeParameterEventQuery();
-        static const std::string &storeErrorQuery();
-        static const std::string &fetchLastHistoryEventQuery();
-        static const std::string &fetchAttributeTraitsQuery();
+        // A variant of storeDataEventStatement that builds a string based on the
+        // parameters, this is then passed back to the caller to be executed. No
+        // internal caching, so its less efficient, but can be chained in a pipe
+        // to batch data to the database.
+        template<typename T>
+        const std::string storeDataEventString(const std::string &full_attr_name,
+            const std::string &event_time,
+            const std::string &quality,
+            std::unique_ptr<vector<T>> &value_r,
+            std::unique_ptr<vector<T>> &value_w,
+            const AttributeTraits &traits);
 
-        // these query strings are built each call, so are cached in the class
-        // that requests them
-        static const std::string fetchValueQuery(
-            const std::string &column_name, const std::string &table_name, const std::string &reference);
+        // Builds a prepared statement for data event errors
+        const std::string &storeDataEventErrorStatement(const AttributeTraits &traits);
 
-        static const std::string fetchAllValuesQuery(
-            const std::string &column_name, const std::string &table_name, const std::string &reference);
+        // Utility
+        void print(std::ostream &os) const noexcept;
 
     private:
         // generic function to handle caching items into the cache maps
@@ -128,17 +210,14 @@ namespace pqxx_conn
         std::map<AttributeTraits, std::string> _data_event_error_query_names;
 
         // cached insert query strings built from the traits object
-        map<AttributeTraits, std::string> _data_event_queries;
-        map<AttributeTraits, std::string> _data_event_error_queries;
-
-        // logging subsystem
-        std::shared_ptr<spdlog::logger> _logger;
+        std::map<AttributeTraits, std::string> _data_event_queries;
+        std::map<AttributeTraits, std::string> _data_event_error_queries;
     };
 
     //=============================================================================
     //=============================================================================
     template<typename T>
-    const string &QueryBuilder::storeDataEventQuery(const AttributeTraits &traits)
+    const string &QueryBuilder::storeDataEventStatement(const AttributeTraits &traits)
     {
         // search the cache for a previous entry
         auto result = _data_event_queries.find(traits);
@@ -149,16 +228,17 @@ namespace pqxx_conn
             // attribute traits and then cached.
             auto param_number = 0;
 
-            auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + DAT_COL_ID + "," + DAT_COL_DATA_TIME;
+            auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + schema::DatColId + "," +
+                schema::DatColDataTime;
 
             if (traits.hasReadData())
-                query = query + "," + DAT_COL_VALUE_R;
+                query = query + "," + schema::DatColValueR;
 
             if (traits.hasWriteData())
-                query = query + "," + DAT_COL_VALUE_W;
+                query = query + "," + schema::DatColValueW;
 
             // split to ensure increments are in the correct order
-            query = query + "," + DAT_COL_QUALITY + ") VALUES ($" + to_string(++param_number);
+            query = query + "," + schema::DatColQuality + ") VALUES ($" + to_string(++param_number);
             query = query + ",TO_TIMESTAMP($" + to_string(++param_number) + ")";
 
             // add the read parameter with cast
@@ -176,8 +256,8 @@ namespace pqxx_conn
             // cache the query string against the traits
             _data_event_queries.emplace(traits, query);
 
-            _logger->debug("Built new data event query and cached it against traits: {}", traits);
-            _logger->debug("New data event query is: {}", query);
+            spdlog::debug("Built new data event query and cached it against traits: {}", traits);
+            spdlog::debug("New data event query is: {}", query);
 
             // now return it (must dereference the map again to get the static version)
             return _data_event_queries[traits];
@@ -186,6 +266,62 @@ namespace pqxx_conn
         // return the previously cached example
         return result->second;
     }
+
+    template<typename T>
+    const std::string QueryBuilder::storeDataEventString(const std::string &full_attr_name,
+        const std::string &event_time,
+        const std::string &quality,
+        std::unique_ptr<vector<T>> &value_r,
+        std::unique_ptr<vector<T>> &value_w,
+        const AttributeTraits &traits)
+    {
+        auto query = "INSERT INTO " + QueryBuilder::tableName(traits) + " (" + schema::DatColId + "," +
+            schema::DatColDataTime;
+
+        if (traits.hasReadData())
+            query = query + "," + schema::DatColValueR;
+
+        if (traits.hasWriteData())
+            query = query + "," + schema::DatColValueW;
+
+        // split to ensure increments are in the correct order
+        query = query + "," + schema::DatColQuality + ") VALUES ('" + full_attr_name + "'";
+        query = query + ",TO_TIMESTAMP(" + event_time + ")";
+
+        // add the read parameter with cast
+        if (traits.hasReadData())
+        {
+            if (value_r->empty())
+            {
+                query = query + ",NULL";
+            }
+            else
+            {
+                query = query + "," + query_utils::DataToString<T>::run(value_r, traits) +
+                    "::" + query_utils::postgresCast<T>(traits.isArray());
+            }
+        }
+
+        // add the write parameter with cast
+        if (traits.hasWriteData())
+        {
+            if (value_w->empty())
+            {
+                query = query + ",NULL";
+            }
+            else
+            {
+                query = query + "," + query_utils::DataToString<T>::run(value_w, traits) +
+                    "::" + query_utils::postgresCast<T>(traits.isArray());
+            }
+        }
+
+        query = query + "," + quality + ")";
+
+        // now return the built query
+        return query;
+    }
+
 } // namespace pqxx_conn
 } // namespace hdbpp_internal
 #endif // _QUERY_BUILDER_HPP
diff --git a/src/TimescaleSchema.hpp b/src/TimescaleSchema.hpp
index f0c05c23dbb6a9d41e9f20b6673ee06e9b74f489..6a8e48b20ca22c15540b2da51aa83cc2cbbde3c2 100644
--- a/src/TimescaleSchema.hpp
+++ b/src/TimescaleSchema.hpp
@@ -26,115 +26,116 @@ namespace hdbpp_internal
 {
 namespace pqxx_conn
 {
-    // general schema related strings
-    const std::string SCHEMA_TABLE_PREFIX = "att_";
-
-    // attribute type information
-    const std::string TYPE_SCALAR = "scalar";
-    const std::string TYPE_ARRAY = "array";
-    const std::string TYPE_IMAGE = "image";
-    const std::string TYPE_DEV_BOOLEAN = "devboolean";
-    const std::string TYPE_DEV_UCHAR = "devuchar";
-    const std::string TYPE_DEV_SHORT = "devshort";
-    const std::string TYPE_DEV_USHORT = "devushort";
-    const std::string TYPE_DEV_LONG = "devlong";
-    const std::string TYPE_DEV_ULONG = "devulong";
-    const std::string TYPE_DEV_LONG64 = "devlong64";
-    const std::string TYPE_DEV_ULONG64 = "devulong64";
-    const std::string TYPE_DEV_FLOAT = "devfloat";
-    const std::string TYPE_DEV_DOUBLE = "devdouble";
-    const std::string TYPE_DEV_STRING = "devstring";
-    const std::string TYPE_DEV_STATE = "devstate";
-    const std::string TYPE_DEV_ENCODED = "devencoded";
-    const std::string TYPE_DEV_ENUM = "devenum";
-
-    // att_conf table
-    const std::string CONF_TABLE_NAME = "att_conf";
-    const std::string CONF_COL_ID = "att_conf_id";
-    const std::string CONF_COL_NAME = "att_name";
-    const std::string CONF_COL_TYPE_ID = "att_conf_type_id";
-    const std::string CONF_COL_FORMAT_TYPE_ID = "att_conf_format_id";
-    const std::string CONF_COL_WRITE_TYPE_ID = "att_conf_write_id";
-    const std::string CONF_COL_TABLE_NAME = "table_name";
-    const std::string CONF_COL_CS_NAME = "cs_name";
-    const std::string CONF_COL_DOMAIN = "domain";
-    const std::string CONF_COL_FAMILY = "family";
-    const std::string CONF_COL_MEMBER = "member";
-    const std::string CONF_COL_LAST_NAME = "name";
-    const std::string CONF_COL_TTL = "ttl";
-    const std::string CONF_COL_HIDE = "hide";
-
-    // att_conf_type table
-    const std::string CONF_TYPE_TABLE_NAME = "att_conf_type";
-    const std::string CONF_TYPE_COL_TYPE_ID = "att_conf_type_id";
-    const std::string CONF_TYPE_COL_TYPE = "type";
-    const std::string CONF_TYPE_COL_TYPE_NUM = "type_num";
-
-    // att_conf_format table
-    const std::string CONF_FORMAT_TABLE_NAME = "att_conf_format";
-    const std::string CONF_FORMAT_COL_FORMAT_ID = "att_conf_format_id";
-    const std::string CONF_FORMAT_COL_FORMAT = "format";
-    const std::string CONF_FORMAT_COL_FORMAT_NUM = "format_num";
-
-    // att_conf_write table
-    const std::string CONF_WRITE_TABLE_NAME = "att_conf_write";
-    const std::string CONF_WRITE_COL_WRITE_ID = "att_conf_write_id";
-    const std::string CONF_WRITE_COL_WRITE = "write";
-    const std::string CONF_WRITE_COL_WRITE_NUM = "write_num";
-
-    // att_history table
-    const std::string HISTORY_TABLE_NAME = "att_history";
-    const std::string HISTORY_COL_ID = "att_conf_id";
-    const std::string HISTORY_COL_EVENT_ID = "att_history_event_id";
-    const std::string HISTORY_COL_TIME = "event_time";
-    const std::string HISTORY_COL_DETAILS = "details";
-
-    // att_history_event table
-    const std::string HISTORY_EVENT_TABLE_NAME = "att_history_event";
-    const std::string HISTORY_EVENT_COL_EVENT_ID = "att_history_event_id";
-    const std::string HISTORY_EVENT_COL_EVENT = "event";
-
-    // att_parameter table
-    const std::string PARAM_TABLE_NAME = "att_parameter";
-    const std::string PARAM_COL_ID = "att_conf_id";
-    const std::string PARAM_COL_INS_TIME = "insert_time";
-    const std::string PARAM_COL_EV_TIME = "recv_time";
-    const std::string PARAM_COL_LABEL = "label";
-    const std::string PARAM_COL_UNIT = "unit";
-    const std::string PARAM_COL_STANDARDUNIT = "standard_unit";
-    const std::string PARAM_COL_DISPLAYUNIT = "display_unit";
-    const std::string PARAM_COL_FORMAT = "format";
-    const std::string PARAM_COL_ARCHIVERELCHANGE = "archive_rel_change";
-    const std::string PARAM_COL_ARCHIVEABSCHANGE = "archive_abs_change";
-    const std::string PARAM_COL_ARCHIVEPERIOD = "archive_period";
-    const std::string PARAM_COL_DESCRIPTION = "description";
-    const std::string PARAM_COL_DETAILS = "details";
-
-    // att_error_desc table
-    const std::string ERR_TABLE_NAME = "att_error_desc";
-    const std::string ERR_COL_ID = "att_error_desc_id";
-    const std::string ERR_COL_ERROR_DESC = "error_desc";
-
-    // data tables
-    const std::string DAT_COL_ID = "att_conf_id";
-    const std::string DAT_COL_INS_TIME = "insert_time";
-    const std::string DAT_COL_RCV_TIME = "recv_time";
-    const std::string DAT_COL_DATA_TIME = "data_time";
-    const std::string DAT_COL_VALUE_R = "value_r";
-    const std::string DAT_COL_VALUE_W = "value_w";
-    const std::string DAT_COL_QUALITY = "quality";
-    const std::string DAT_COL_ERROR_DESC_ID = "att_error_desc_id";
-    const std::string DAT_COL_DETAILS = "details";
-
-    // special fields for enums
-    const std::string DAT_COL_DAT_COL_VALUE_R_LABEL = "value_r_label";
-    const std::string DAT_COL_DAT_COL_VALUE_W_LABEL = "value_w_label";
-
-    // special fields for image tables
-    const std::string DAT_IMG_COL_DIMX_R = "dim_x_r";
-    const std::string DAT_IMG_COL_DIMY_R = "dim_y_r";
-    const std::string DAT_IMG_COL_DIMX_W = "dim_x_w";
-    const std::string DAT_IMG_COL_DIMY_W = "dim_y_w";
+    namespace schema
+    {
+        // general schema related strings
+        const std::string SchemaTablePrefix = "att_";
+
+        // attribute type information
+        const std::string TypeScalar = "scalar";
+        const std::string TypeArray = "array";
+        const std::string TypeImage = "image";
+        const std::string TypeDevBoolean = "devboolean";
+        const std::string TypeDevUchar = "devuchar";
+        const std::string TypeDevShort = "devshort";
+        const std::string TypeDevUshort = "devushort";
+        const std::string TypeDevLong = "devlong";
+        const std::string TypeDevUlong = "devulong";
+        const std::string TypeDevLong64 = "devlong64";
+        const std::string TypeDevUlong64 = "devulong64";
+        const std::string TypeDevFloat = "devfloat";
+        const std::string TypeDevDouble = "devdouble";
+        const std::string TypeDevString = "devstring";
+        const std::string TypeDevState = "devstate";
+        const std::string TypeDevEncoded = "devencoded";
+        const std::string TypeDevEnum = "devenum";
+
+        // att_conf table
+        const std::string ConfTableName = "att_conf";
+        const std::string ConfColId = "att_conf_id";
+        const std::string ConfColName = "att_name";
+        const std::string ConfColTypeId = "att_conf_type_id";
+        const std::string ConfColFormatTypeId = "att_conf_format_id";
+        const std::string ConfColWriteTypeId = "att_conf_write_id";
+        const std::string ConfColTableName = "table_name";
+        const std::string ConfColCsName = "cs_name";
+        const std::string ConfColDomain = "domain";
+        const std::string ConfColFamily = "family";
+        const std::string ConfColMember = "member";
+        const std::string ConfColLastName = "name";
+        const std::string ConfColTtl = "ttl";
+        const std::string ConfColHide = "hide";
+
+        // att_conf_type table
+        const std::string ConfTypeTableName = "att_conf_type";
+        const std::string ConfTypeColTypeId = "att_conf_type_id";
+        const std::string ConfTypeColType = "type";
+        const std::string ConfTypeColTypeNum = "type_num";
+
+        // att_conf_format table
+        const std::string ConfFormatTableName = "att_conf_format";
+        const std::string ConfFormatColFormatId = "att_conf_format_id";
+        const std::string ConfFormatColFormat = "format";
+        const std::string ConfFormatColFormatNum = "format_num";
+
+        // att_conf_write table
+        const std::string ConfWriteTableName = "att_conf_write";
+        const std::string ConfWriteColWriteId = "att_conf_write_id";
+        const std::string ConfWriteColWrite = "write";
+        const std::string ConfWriteColWriteNum = "write_num";
+
+        // att_history table
+        const std::string HistoryTableName = "att_history";
+        const std::string HistoryColId = "att_conf_id";
+        const std::string HistoryColEventId = "att_history_event_id";
+        const std::string HistoryColTime = "event_time";
+        const std::string HistoryColDetails = "details";
+
+        // att_history_event table
+        const std::string HistoryEventTableName = "att_history_event";
+        const std::string HistoryEventColEventId = "att_history_event_id";
+        const std::string HistoryEventColEvent = "event";
+
+        // att_parameter table
+        const std::string ParamTableName = "att_parameter";
+        const std::string ParamColId = "att_conf_id";
+        const std::string ParamColInsTime = "insert_time";
+        const std::string ParamColEvTime = "recv_time";
+        const std::string ParamColLabel = "label";
+        const std::string ParamColUnit = "unit";
+        const std::string ParamColStandardUnit = "standard_unit";
+        const std::string ParamColDisplayUnit = "display_unit";
+        const std::string ParamColFormat = "format";
+        const std::string ParamColArchiveRelChange = "archive_rel_change";
+        const std::string ParamColArchiveAbsChange = "archive_abs_change";
+        const std::string ParamColArchivePeriod = "archive_period";
+        const std::string ParamColDescription = "description";
+        const std::string ParamColDetails = "details";
+
+        // att_error_desc table
+        const std::string ErrTableName = "att_error_desc";
+        const std::string ErrColId = "att_error_desc_id";
+        const std::string ErrColErrorDesc = "error_desc";
+
+        // data tables
+        const std::string DatColId = "att_conf_id";
+        const std::string DatColDataTime = "data_time";
+        const std::string DatColValueR = "value_r";
+        const std::string DatColValueW = "value_w";
+        const std::string DatColQuality = "quality";
+        const std::string DatColErrorDescId = "att_error_desc_id";
+        const std::string DatColDetails = "details";
+
+        // special fields for enums
+        const std::string DatColDatColValueRLabel = "value_r_label";
+        const std::string DatColDatColValueWLabel = "value_w_label";
+
+        // special fields for image tables
+        const std::string DatImgColDimxR = "dim_x_r";
+        const std::string DatImgColDimyR = "dim_y_r";
+        const std::string DatImgColDimxW = "dim_x_w";
+        const std::string DatImgColDimyW = "dim_y_w";
+    } // namespace schema
 } // namespace pqxx_conn
 } // namespace hdbpp_internal
 #endif // _TIMESCALE_SCHEMA_HPP
diff --git a/test/AttributeTraitsTests.cpp b/test/AttributeTraitsTests.cpp
index 697c78039ce887dd18c3e2d219d9356264cfff5c..6c12ecfb09a579b108bf20fe32339f8dce33b818 100644
--- a/test/AttributeTraitsTests.cpp
+++ b/test/AttributeTraitsTests.cpp
@@ -18,17 +18,14 @@
    along with libhdb++timescale.  If not, see <http://www.gnu.org/licenses/>. */
 
 #include "AttributeTraits.hpp"
-#include "catch2/catch.hpp"
-
 #include "LibUtils.hpp"
+#include "catch2/catch.hpp"
 
 using namespace std;
 using namespace hdbpp_internal;
 
 SCENARIO("Attribute format returns expected results", "[attribute-traits]")
 {
-    cout << LOCATION_INFO << endl;
-
     GIVEN("Constructed AttributeTraits as an Array")
     {
         AttributeTraits traits {Tango::READ, Tango::SPECTRUM, Tango::DEV_BOOLEAN};
@@ -244,10 +241,10 @@ SCENARIO("Attribute traits are invalid if not set with valid traits", "[attribut
 
         WHEN("Checking if traits type is valid")
         {
-            THEN("Result is false") 
-            { 
-                REQUIRE(!traits.isValid()); 
-                REQUIRE(traits.isInvalid()); 
+            THEN("Result is false")
+            {
+                REQUIRE(!traits.isValid());
+                REQUIRE(traits.isInvalid());
             }
         }
     }
@@ -257,10 +254,10 @@ SCENARIO("Attribute traits are invalid if not set with valid traits", "[attribut
 
         WHEN("Checking if traits type is valid")
         {
-            THEN("Result is false") 
-            { 
-                REQUIRE(!traits.isValid()); 
-                REQUIRE(traits.isInvalid()); 
+            THEN("Result is false")
+            {
+                REQUIRE(!traits.isValid());
+                REQUIRE(traits.isInvalid());
             }
         }
     }
@@ -270,10 +267,10 @@ SCENARIO("Attribute traits are invalid if not set with valid traits", "[attribut
 
         WHEN("Checking if traits type is valid")
         {
-            THEN("Result is false") 
-            { 
-                REQUIRE(!traits.isValid()); 
-                REQUIRE(traits.isInvalid()); 
+            THEN("Result is false")
+            {
+                REQUIRE(!traits.isValid());
+                REQUIRE(traits.isInvalid());
             }
         }
     }
@@ -283,11 +280,11 @@ SCENARIO("Attribute traits are invalid if not set with valid traits", "[attribut
 
         WHEN("Checking if traits type is valid")
         {
-            THEN("Result is true") 
-            { 
-                REQUIRE(traits.isValid()); 
-                REQUIRE(!traits.isInvalid()); 
+            THEN("Result is true")
+            {
+                REQUIRE(traits.isValid());
+                REQUIRE(!traits.isInvalid());
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/ColumnCacheTests.cpp b/test/ColumnCacheTests.cpp
index 02eaa9f673822dc9c6e31a7b0ae4b2da692fff78..604ebe67843f92a0d2e92ec3eab59c89801c561b 100644
--- a/test/ColumnCacheTests.cpp
+++ b/test/ColumnCacheTests.cpp
@@ -27,7 +27,7 @@
 using namespace std;
 using namespace hdbpp_internal;
 using namespace hdbpp_internal::pqxx_conn;
-using namespace hdbpp_test::psql_conn_test;
+using namespace hdbpp_test::psql_connection;
 
 namespace value_cache_test
 {
@@ -72,7 +72,7 @@ shared_ptr<pqxx::connection> connectDb()
 
 using namespace value_cache_test;
 
-SCENARIO("ColumnCache can access and retrieve data from the database", "[db-access][column-cache][psql]")
+SCENARIO("ColumnCache can access and retrieve data from the database", "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
@@ -108,7 +108,7 @@ SCENARIO("ColumnCache can access and retrieve data from the database", "[db-acce
     conn->disconnect();
 }
 
-SCENARIO("ColumnCache can grown in size when caching", "[db-access][column-cache][psql]")
+SCENARIO("ColumnCache can grown in size when caching", "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
@@ -172,7 +172,7 @@ SCENARIO("ColumnCache can grown in size when caching", "[db-access][column-cache
     conn->disconnect();
 }
 
-SCENARIO("ColumnCache will handle the same value being added twice", "[db-access][column-cache][psql]")
+SCENARIO("ColumnCache will handle the same value being added twice", "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
@@ -203,7 +203,7 @@ SCENARIO("ColumnCache will handle the same value being added twice", "[db-access
     conn->disconnect();
 }
 
-SCENARIO("It is an error to request invalid values", "[db-access][column-cache][psql]")
+SCENARIO("It is an error to request invalid values", "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
@@ -223,7 +223,7 @@ SCENARIO("It is an error to request invalid values", "[db-access][column-cache][
     conn->disconnect();
 }
 
-SCENARIO("Clearing the cache does not stop entries being cached again", "[db-access][column-cache][psql]")
+SCENARIO("Clearing the cache does not stop entries being cached again", "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
@@ -256,7 +256,7 @@ SCENARIO("Clearing the cache does not stop entries being cached again", "[db-acc
 }
 
 SCENARIO("ColumnCache will fetch values from db and cache them as they are requested by reference",
-    "[db-access][column-cache][psql]")
+    "[db-access][column-cache]")
 {
     auto conn = connectDb();
 
diff --git a/test/DbConnectionTests.cpp b/test/DbConnectionTests.cpp
index 52349892f84dae93a085f4e9f1a9134d8a11d001..e83015eb339f02c334c633f131ef611762e78db0 100644
--- a/test/DbConnectionTests.cpp
+++ b/test/DbConnectionTests.cpp
@@ -34,53 +34,12 @@ using namespace std;
 using namespace pqxx;
 using namespace hdbpp_internal;
 using namespace hdbpp_internal::pqxx_conn;
-using namespace hdbpp_test::attr_name;
-using namespace hdbpp_test::attr_info;
-using namespace hdbpp_test::psql_conn_test;
+using namespace hdbpp_test;
+using namespace hdbpp_test::psql_connection;
 using namespace hdbpp_test::data_gen;
 
-namespace psql_conn_test
+namespace pqxx_conn_test
 {
-// define it globally so we can use its cache during tests
-QueryBuilder TestQueryBuilder;
-
-void clearTable(pqxx::connection &conn, const string &table_name)
-{
-    {
-        work tx {conn};
-        tx.exec("TRUNCATE " + table_name + "  RESTART IDENTITY CASCADE");
-        tx.commit();
-    }
-}
-
-// wrapper to store an attribute
-void storeTestAttribute(DbConnection &conn, const AttributeTraits &traits)
-{
-    REQUIRE_NOTHROW(conn.storeAttribute(
-        TestAttrFinalName, TestAttrCs, TestAttrDomain, TestAttrFamily, TestAttrMember, TestAttrName, traits));
-}
-
-// wrapper to store some event data, and return the data for comparison
-template<Tango::CmdArgType Type>
-tuple<vector<typename TangoTypeTraits<Type>::type>, vector<typename TangoTypeTraits<Type>::type>> storeTestEventData(
-    DbConnection &conn, const AttributeTraits &traits, int quality = Tango::ATTR_VALID)
-{
-    struct timeval tv
-    {};
-    gettimeofday(&tv, nullptr);
-    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
-
-    auto r = generateData<Type>(traits, !traits.hasReadData());
-    auto w = generateData<Type>(traits, !traits.hasWriteData());
-
-    // make a copy for the consistency check
-    auto ret = make_tuple((*r), (*w));
-
-    REQUIRE_NOTHROW(conn.storeDataEvent(TestAttrFinalName, event_time, quality, move(r), move(w), traits));
-
-    return ret;
-}
-
 // generic compare for most types
 template<typename T>
 bool compareData(T lhs, T rhs)
@@ -152,989 +111,1061 @@ bool compareVector<double>(const vector<double> &lhs, const vector<double> &rhs)
     return true;
 }
 
-// taking the original data as a reference, this function loads the last line of data and compares
-// it to the reference data as a test
-template<typename T>
-void checkStoreTestEventData(
-    pqxx::connection &test_conn, const AttributeTraits &traits, const tuple<vector<T>, vector<T>> &data)
+class DbConnectionTestsFixture
 {
-    pqxx::work tx {test_conn};
-
-    auto data_row(tx.exec1(
-        "SELECT * FROM " + TestQueryBuilder.tableName(traits) + " ORDER BY " + DAT_COL_DATA_TIME + " LIMIT 1"));
-
-    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-    tx.commit();
+private:
+    DbConnection::DbStoreMethod _db_access = DbConnection::DbStoreMethod::PreparedStatement;
+    std::unique_ptr<DbConnection> _test_conn;
 
-    REQUIRE(data_row.at(DAT_COL_ID).as<int>() == attr_row.at(CONF_COL_ID).as<int>());
+    static std::unique_ptr<pqxx::connection> _verify_conn;
+    static QueryBuilder _query_builder;
 
-    if (traits.isScalar() && traits.hasReadData())
-    {
-        REQUIRE(data_row.at(DAT_COL_VALUE_R).size() > 0);
-        REQUIRE(compareData(data_row.at(DAT_COL_VALUE_R).as<T>(), get<0>(data)[0]) == true);
-    }
-    else if (traits.isArray() && traits.hasReadData())
+protected:
+    void resetDbAccess(DbConnection::DbStoreMethod db_access)
     {
-        REQUIRE(data_row.at(DAT_COL_VALUE_R).size() > 0);
-        REQUIRE(compareVector(data_row.at(DAT_COL_VALUE_R).as<vector<T>>(), get<0>(data)) == true);
+        _db_access = db_access;
+        _test_conn.reset(nullptr);
     }
 
-    if (traits.isScalar() && traits.hasWriteData())
-    {
-        REQUIRE(data_row.at(DAT_COL_VALUE_W).size() > 0);
-        REQUIRE(compareData(data_row.at(DAT_COL_VALUE_W).as<T>(), get<1>(data)[0]) == true);
-    }
-    else if (traits.isArray() && traits.hasWriteData())
+    DbConnection &testConn();
+    pqxx::connection &verifyConn();
+
+    void clearTables();
+    void storeAttribute(const AttributeTraits &traits);
+    string storeAttributeByTraits(const AttributeTraits &traits);
+
+    template<Tango::CmdArgType Type>
+    tuple<vector<typename TangoTypeTraits<Type>::type>, vector<typename TangoTypeTraits<Type>::type>>
+        storeTestEventData(const string &att_name, const AttributeTraits &traits, int quality = Tango::ATTR_VALID);
+
+    template<typename T>
+    void checkStoreTestEventData(
+        const string &att_name, const AttributeTraits &traits, const tuple<vector<T>, vector<T>> &data);
+
+    QueryBuilder &queryBuilder() { return _query_builder; }
+    vector<AttributeTraits> getTraits() const;
+    vector<AttributeTraits> getTraitsImplemented() const;
+
+public:
+    DbConnectionTestsFixture() = default;
+};
+
+std::unique_ptr<pqxx::connection> DbConnectionTestsFixture::_verify_conn = std::unique_ptr<pqxx::connection> {};
+
+//=============================================================================
+//=============================================================================
+DbConnection &DbConnectionTestsFixture::testConn()
+{
+    if (_test_conn == nullptr)
     {
-        REQUIRE(data_row.at(DAT_COL_VALUE_W).size() > 0);
-        REQUIRE(compareVector(data_row.at(DAT_COL_VALUE_W).as<vector<T>>(), get<1>(data)) == true);
+        _test_conn = make_unique<DbConnection>(_db_access);
+        REQUIRE_NOTHROW(_test_conn->connect(postgres_db::HdbppConnectionString));
     }
+
+    return *_test_conn;
 }
-}; // namespace psql_conn_test
 
-SCENARIO("The DbConnection class can open a valid connection to a postgres node",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
+//=============================================================================
+//=============================================================================
+pqxx::connection &DbConnectionTestsFixture::verifyConn()
 {
-    GIVEN("An unconnected DbConnection object")
+    if (_verify_conn == nullptr)
+        _verify_conn = make_unique<pqxx::connection>(postgres_db::HdbppConnectionString);
+
+    return *_verify_conn;
+}
+
+//=============================================================================
+//=============================================================================
+void DbConnectionTestsFixture::clearTables()
+{
+    vector<AttributeTraits> traits_array = getTraits();
+
     {
-        DbConnection conn;
+        string query = "TRUNCATE ";
+
+        work tx {verifyConn()};
 
-        WHEN("Requesting a connection with a given connect string")
+        for (auto &traits : traits_array)
         {
-            REQUIRE_NOTHROW(conn.connect(postgres_db::ConnectionString));
+            query += QueryBuilder::tableName(traits);
+            query += ",";
+        }
 
-            THEN("A connection is opened and reported as open") { REQUIRE(conn.isOpen()); }
-            AND_WHEN("The connection is disconnected")
-            {
-                REQUIRE_NOTHROW(conn.disconnect());
+        query += schema::ErrTableName + ",";
+        query += schema::ParamTableName + ",";
+        query += schema::HistoryEventTableName + ",";
+        query += schema::HistoryTableName + ",";
+        query += schema::ConfTableName + " RESTART IDENTITY";
 
-                THEN("The connection reports closed") { REQUIRE(conn.isClosed()); }
-            }
-        }
+        REQUIRE_NOTHROW(tx.exec(query));
+        tx.commit();
     }
 }
 
-SCENARIO("The DbConnection class handles a bad connection attempt with an exception",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
+//=============================================================================
+//=============================================================================
+void DbConnectionTestsFixture::storeAttribute(const AttributeTraits &traits)
 {
-    GIVEN("An unconnected DbConnection object")
-    {
-        DbConnection conn;
+    REQUIRE_NOTHROW(testConn().storeAttribute(attr_name::TestAttrFinalName,
+        attr_name::TestAttrCs,
+        attr_name::TestAttrDomain,
+        attr_name::TestAttrFamily,
+        attr_name::TestAttrMember,
+        attr_name::TestAttrName,
+        traits));
+}
 
-        WHEN("Requesting a connection with an invalid host")
-        {
-            THEN("A connection_error is thrown")
-            {
-                REQUIRE_THROWS_AS(conn.connect("user=postgres password=password host=unknown"), Tango::DevFailed);
-            }
-        }
-        WHEN("Requesting a connection with an invalid user")
-        {
-            THEN("A connection_error is thrown")
-            {
-                REQUIRE_THROWS_AS(conn.connect("user=invalid password=password host=hdb1"), Tango::DevFailed);
-            }
-        }
-        WHEN("Requesting a connection with an invalid password")
-        {
-            THEN("A connection_error is thrown")
-            {
-                REQUIRE_THROWS_AS(conn.connect("user=postgres password=invalid host=hdb1"), Tango::DevFailed);
-            }
-        }
-    }
+//=============================================================================
+//=============================================================================
+string DbConnectionTestsFixture::storeAttributeByTraits(const AttributeTraits &traits)
+{
+    auto name = attr_name::TestAttrFinalName + "_" + tangoEnumToString(traits.type()) + "_" +
+        tangoEnumToString(traits.writeType()) + "_" + tangoEnumToString(traits.formatType());
+
+    REQUIRE_NOTHROW(testConn().storeAttribute(name,
+        attr_name::TestAttrCs,
+        attr_name::TestAttrDomain,
+        attr_name::TestAttrFamily,
+        attr_name::TestAttrMember,
+        attr_name::TestAttrName,
+        traits));
+
+    return name;
 }
 
-SCENARIO("Storing Attributes in the database", "[db-access][hdbpp-db-access][db-connection][psql]")
+//=============================================================================
+//=============================================================================
+vector<AttributeTraits> DbConnectionTestsFixture::getTraits() const
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    vector<AttributeTraits> traits_array {};
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-    psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
+    vector<Tango::CmdArgType> types {Tango::DEV_BOOLEAN,
+        Tango::DEV_DOUBLE,
+        Tango::DEV_FLOAT,
+        Tango::DEV_STRING,
+        Tango::DEV_LONG,
+        Tango::DEV_ULONG,
+        Tango::DEV_LONG64,
+        Tango::DEV_ULONG64,
+        Tango::DEV_SHORT,
+        Tango::DEV_USHORT,
+        Tango::DEV_UCHAR,
+        Tango::DEV_STATE,
+        Tango::DEV_ENCODED,
+        Tango::DEV_ENUM};
 
-    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE};
+    vector<Tango::AttrDataFormat> format_types {Tango::SCALAR, Tango::SPECTRUM};
 
-    GIVEN("A valid DbConnection connected to a hdbpp database")
-    {
-        WHEN("Storing a test attribute data set to the database")
-        {
-            psql_conn_test::storeTestAttribute(conn, traits);
+    // loop for every combination of type in Tango
+    for (auto &type : types)
+        for (auto &format : format_types)
+            for (auto &write : write_types)
+                traits_array.emplace_back(AttributeTraits {write, format, type});
 
-            THEN("The data exists in the database, and can be read back and verified")
-            {
-                {
-                    pqxx::work tx {test_conn};
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-
-                    auto type_row(tx.exec1("SELECT " + CONF_TYPE_COL_TYPE_ID + " FROM " + CONF_TYPE_TABLE_NAME +
-                        " WHERE " + CONF_TYPE_COL_TYPE_NUM + " = " + std::to_string(traits.type())));
-
-                    auto format_row(tx.exec1("SELECT " + CONF_FORMAT_COL_FORMAT_ID + " FROM " + CONF_FORMAT_TABLE_NAME +
-                        " WHERE " + CONF_FORMAT_COL_FORMAT_NUM + " = " + std::to_string(traits.formatType())));
-
-                    auto access_row(tx.exec1("SELECT " + CONF_WRITE_COL_WRITE_ID + " FROM " + CONF_WRITE_TABLE_NAME +
-                        " WHERE " + CONF_WRITE_COL_WRITE_NUM + " = " + std::to_string(traits.writeType())));
-
-                    tx.commit();
-
-                    REQUIRE(attr_row.at(CONF_COL_NAME).as<string>() == TestAttrFQDName);
-                    REQUIRE(attr_row.at(CONF_COL_CS_NAME).as<string>() == TestAttrCs);
-                    REQUIRE(attr_row.at(CONF_COL_DOMAIN).as<string>() == TestAttrDomain);
-                    REQUIRE(attr_row.at(CONF_COL_FAMILY).as<string>() == TestAttrFamily);
-                    REQUIRE(attr_row.at(CONF_COL_MEMBER).as<string>() == TestAttrMember);
-                    REQUIRE(attr_row.at(CONF_COL_LAST_NAME).as<string>() == TestAttrName);
-                    REQUIRE(attr_row.at(CONF_COL_TABLE_NAME).as<string>() == QueryBuilder().tableName(traits));
-                    REQUIRE(attr_row.at(CONF_COL_TYPE_ID).as<int>() == type_row.at(CONF_TYPE_COL_TYPE_ID).as<int>());
-                    REQUIRE(attr_row.at(CONF_COL_FORMAT_TYPE_ID).as<int>() ==
-                        format_row.at(CONF_FORMAT_COL_FORMAT_ID).as<int>());
-                    REQUIRE(attr_row.at(CONF_COL_WRITE_TYPE_ID).as<int>() ==
-                        access_row.at(CONF_WRITE_COL_WRITE_ID).as<int>());
-                }
-            }
-            AND_WHEN("Trying to store the attribute again")
-            {
-                THEN("An exception is throw as the entry already exists in the database")
-                {
-                    REQUIRE_THROWS_AS(conn.storeAttribute(TestAttrFinalName,
-                                          TestAttrCs,
-                                          TestAttrDomain,
-                                          TestAttrFamily,
-                                          TestAttrMember,
-                                          TestAttrName,
-                                          traits),
-                        Tango::DevFailed);
-                }
-            }
-        }
-    }
+    return traits_array;
+}
+
+//=============================================================================
+//=============================================================================
+vector<AttributeTraits> DbConnectionTestsFixture::getTraitsImplemented() const
+{
+    vector<AttributeTraits> traits_array {};
+
+    vector<Tango::CmdArgType> types {Tango::DEV_BOOLEAN,
+        Tango::DEV_DOUBLE,
+        Tango::DEV_FLOAT,
+        Tango::DEV_STRING,
+        Tango::DEV_LONG,
+        Tango::DEV_ULONG,
+        Tango::DEV_LONG64,
+        Tango::DEV_ULONG64,
+        Tango::DEV_SHORT,
+        Tango::DEV_USHORT,
+        Tango::DEV_UCHAR,
+        Tango::DEV_STATE};
+
+    vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE};
+    vector<Tango::AttrDataFormat> format_types {Tango::SCALAR, Tango::SPECTRUM};
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    // loop for every combination of type in Tango
+    for (auto &type : types)
+        for (auto &format : format_types)
+            for (auto &write : write_types)
+                traits_array.emplace_back(AttributeTraits {write, format, type});
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    return traits_array;
 }
 
-SCENARIO("Storing Attributes in a disconnected state", "[db-access][hdbpp-db-access][db-connection][psql]")
+//=============================================================================
+//=============================================================================
+template<Tango::CmdArgType Type>
+tuple<vector<typename TangoTypeTraits<Type>::type>, vector<typename TangoTypeTraits<Type>::type>>
+    DbConnectionTestsFixture::storeTestEventData(const string &att_name, const AttributeTraits &traits, int quality)
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    struct timeval tv
+    {};
+    gettimeofday(&tv, nullptr);
+    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-    psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
+    auto r = generateData<Type>(traits, !traits.hasReadData());
+    auto w = generateData<Type>(traits, !traits.hasWriteData());
 
-    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    // make a copy for the consistency check
+    auto ret = make_tuple((*r), (*w));
+
+    REQUIRE_NOTHROW(testConn().storeDataEvent(att_name, event_time, quality, move(r), move(w), traits));
+
+    return ret;
+}
+
+//=============================================================================
+//=============================================================================
+template<typename T>
+void DbConnectionTestsFixture::checkStoreTestEventData(
+    const string &att_name, const AttributeTraits &traits, const tuple<vector<T>, vector<T>> &data)
+{
+    pqxx::work tx {verifyConn()};
+
+    // get the attribute id
+    auto attr_row(
+        tx.exec1("SELECT * FROM " + schema::ConfTableName + " WHERE " + schema::ConfColName + "='" + att_name + "'"));
+
+    // now get the last row stored
+    auto data_row(tx.exec1("SELECT * FROM " + QueryBuilder::tableName(traits) + " WHERE " + schema::DatColId + "=" +
+        to_string(attr_row.at(schema::ConfColId).as<int>()) + " " + " ORDER BY " + schema::DatColDataTime +
+        " DESC LIMIT 1"));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database")
+    tx.commit();
+
+    REQUIRE(data_row.at(schema::DatColId).as<int>() == attr_row.at(schema::ConfColId).as<int>());
+
+    if (traits.isScalar() && traits.hasReadData())
     {
-        WHEN("Disconnecting from the database and trying to store event")
-        {
-            conn.disconnect();
+        REQUIRE(data_row.at(schema::DatColValueR).size() > 0);
+        REQUIRE(compareData(data_row.at(schema::DatColValueR).as<T>(), get<0>(data)[0]) == true);
+    }
 
-            THEN("An exception is throw as the database connection is down")
-            {
-                REQUIRE_THROWS_AS(conn.storeAttribute(TestAttrFinalName,
-                                      TestAttrCs,
-                                      TestAttrDomain,
-                                      TestAttrFamily,
-                                      TestAttrMember,
-                                      TestAttrName,
-                                      traits),
-                    Tango::DevFailed);
-            }
-        }
+    if (traits.isArray() && traits.hasReadData())
+    {
+        REQUIRE(data_row.at(schema::DatColValueR).size() > 0);
+        REQUIRE(compareVector(data_row.at(schema::DatColValueR).as<vector<T>>(), get<0>(data)) == true);
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    if (traits.isScalar() && traits.hasWriteData())
+    {
+        REQUIRE(data_row.at(schema::DatColValueW).size() > 0);
+        REQUIRE(compareData(data_row.at(schema::DatColValueW).as<T>(), get<1>(data)[0]) == true);
+    }
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    if (traits.isArray() && traits.hasWriteData())
+    {
+        REQUIRE(data_row.at(schema::DatColValueW).size() > 0);
+        REQUIRE(compareVector(data_row.at(schema::DatColValueW).as<vector<T>>(), get<1>(data)) == true);
+    }
 }
+}; // namespace pqxx_conn_test
 
-SCENARIO("Storing History Events in the database", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "The DbConnection class can open a valid connection to a postgresql node",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement);
+    REQUIRE_NOTHROW(conn.connect(postgres_db::ConnectionString));
+    REQUIRE(conn.isOpen());
+    REQUIRE_NOTHROW(conn.disconnect());
+    REQUIRE(conn.isClosed());
+    SUCCEED("Passed");
+}
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "The DbConnection class handles a bad connection attempts with an exception",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement);
+    REQUIRE_THROWS_AS(conn.connect("user=postgres password=password host=unknown"), Tango::DevFailed);
+    REQUIRE_THROWS_AS(conn.connect("user=invalid password=password host=hdb1"), Tango::DevFailed);
+    REQUIRE_THROWS_AS(conn.connect("user=postgres password=invalid host=hdb1"), Tango::DevFailed);
+    SUCCEED("Passed");
+}
+
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing Attributes in the database succeeds",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_NOTHROW(storeAttribute(traits));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
     {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+        pqxx::work tx {verifyConn()};
+        auto attr_row(tx.exec1("SELECT * FROM " + schema::ConfTableName));
 
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+        auto type_row(tx.exec1("SELECT " + schema::ConfTypeColTypeId + " FROM " + schema::ConfTypeTableName +
+            " WHERE " + schema::ConfTypeColTypeNum + " = " + std::to_string(traits.type())));
 
-        WHEN("Storing a new history event in the database")
-        {
-            REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events::PauseEvent));
+        auto format_row(tx.exec1("SELECT " + schema::ConfFormatColFormatId + " FROM " + schema::ConfFormatTableName +
+            " WHERE " + schema::ConfFormatColFormatNum + " = " + std::to_string(traits.formatType())));
 
-            THEN("Then both the event and history event exists in the database, and can be read back and verified")
-            {
-                {
-                    pqxx::work tx {test_conn};
-                    auto event_row(tx.exec1("SELECT * FROM " + HISTORY_EVENT_TABLE_NAME));
-                    auto history_row(tx.exec1("SELECT * FROM " + HISTORY_TABLE_NAME));
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                    tx.commit();
-
-                    // check event type
-                    REQUIRE(event_row.at(HISTORY_EVENT_COL_EVENT).as<string>() == events::PauseEvent);
-
-                    // check event id matches event table id
-                    REQUIRE(event_row.at(HISTORY_EVENT_COL_EVENT_ID).as<int>() ==
-                        history_row.at(HISTORY_COL_EVENT_ID).as<int>());
-
-                    // check attribute id match
-                    REQUIRE(attr_row.at(CONF_COL_ID).as<int>() == history_row.at(HISTORY_COL_ID).as<int>());
-                }
-            }
-            AND_WHEN("Trying to store a second history event with the same event")
-            {
-                REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events::PauseEvent));
-
-                THEN("A second history event is added to the database")
-                {
-                    {
-                        pqxx::work tx {test_conn};
-                        auto event_result(tx.exec1("SELECT * FROM " + HISTORY_EVENT_TABLE_NAME));
-                        auto history_row(tx.exec_n(2, "SELECT * FROM " + HISTORY_TABLE_NAME));
-                        auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                        tx.commit();
-
-                        REQUIRE(event_result.at(HISTORY_EVENT_COL_EVENT).as<string>() == events::PauseEvent);
-
-                        // check event type
-                        for (const auto &row : history_row)
-                        {
-                            REQUIRE(attr_row.at(CONF_COL_ID).as<int>() == row.at(HISTORY_COL_ID).as<int>());
-
-                            // check event id matches event table id
-                            REQUIRE(row.at(HISTORY_COL_EVENT_ID).as<int>() ==
-                                event_result.at(HISTORY_COL_EVENT_ID).as<int>());
-                        }
-                    }
-                }
-            }
-        }
-        WHEN("Storing a two different history event in the database in a row")
-        {
-            vector<string> events {events::StartEvent, events::PauseEvent};
+        auto access_row(tx.exec1("SELECT " + schema::ConfWriteColWriteId + " FROM " + schema::ConfWriteTableName +
+            " WHERE " + schema::ConfWriteColWriteNum + " = " + std::to_string(traits.writeType())));
 
-            REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events[0]));
-            REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events[1]));
+        tx.commit();
 
-            THEN("Then both the events exists in the history event table, and can be read back and verified")
-            {
-                {
-                    pqxx::work tx {test_conn};
-                    auto result(tx.exec_n(2, "SELECT * FROM " + HISTORY_EVENT_TABLE_NAME));
-                    tx.commit();
+        REQUIRE(attr_row.at(schema::ConfColName).as<string>() == attr_name::TestAttrFQDName);
+        REQUIRE(attr_row.at(schema::ConfColCsName).as<string>() == attr_name::TestAttrCs);
+        REQUIRE(attr_row.at(schema::ConfColDomain).as<string>() == attr_name::TestAttrDomain);
+        REQUIRE(attr_row.at(schema::ConfColFamily).as<string>() == attr_name::TestAttrFamily);
+        REQUIRE(attr_row.at(schema::ConfColMember).as<string>() == attr_name::TestAttrMember);
+        REQUIRE(attr_row.at(schema::ConfColLastName).as<string>() == attr_name::TestAttrName);
+        REQUIRE(attr_row.at(schema::ConfColTableName).as<string>() == QueryBuilder().tableName(traits));
+        REQUIRE(attr_row.at(schema::ConfColTypeId).as<int>() == type_row.at(schema::ConfTypeColTypeId).as<int>());
 
-                    int i = 0;
+        REQUIRE(attr_row.at(schema::ConfColFormatTypeId).as<int>() ==
+            format_row.at(schema::ConfFormatColFormatId).as<int>());
 
-                    // check event type
-                    for (auto row : result)
-                        REQUIRE(row.at(HISTORY_EVENT_COL_EVENT).as<string>() == events[i++]);
-                }
-            }
-        }
+        REQUIRE(
+            attr_row.at(schema::ConfColWriteTypeId).as<int>() == access_row.at(schema::ConfWriteColWriteId).as<int>());
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    SUCCEED("Passed");
+}
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing the same attribute in the database twice fails",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_NOTHROW(storeAttribute(traits));
+
+    REQUIRE_THROWS_AS(testConn().storeAttribute(attr_name::TestAttrFinalName,
+                          attr_name::TestAttrCs,
+                          attr_name::TestAttrDomain,
+                          attr_name::TestAttrFamily,
+                          attr_name::TestAttrMember,
+                          attr_name::TestAttrName,
+                          traits),
+        Tango::DevFailed);
+
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing History Events unrelated to any known Attribute", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing Attributes in a disconnected state",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement);
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    REQUIRE_THROWS_AS(conn.storeAttribute(attr_name::TestAttrFinalName,
+                          attr_name::TestAttrCs,
+                          attr_name::TestAttrDomain,
+                          attr_name::TestAttrFamily,
+                          attr_name::TestAttrMember,
+                          attr_name::TestAttrName,
+                          traits),
+        Tango::DevFailed);
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with no attribute stored in it")
-    {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+    SUCCEED("Passed");
+}
 
-        WHEN("Storing a new history event in the database")
-        {
-            THEN("An exception is raised")
-            {
-                REQUIRE_THROWS_AS(conn.storeHistoryEvent(TestAttrFQDName, events::PauseEvent), Tango::DevFailed);
-            }
-        }
-    }
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing a series of the same History Events in the database successfully",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_NOTHROW(storeAttribute(traits));
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(attr_name::TestAttrFQDName, events::PauseEvent));
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
-}
+    {
+        pqxx::work tx {verifyConn()};
+        auto event_row(tx.exec1("SELECT * FROM " + schema::HistoryEventTableName));
+        auto history_row(tx.exec1("SELECT * FROM " + schema::HistoryTableName));
+        auto attr_row(tx.exec1("SELECT * FROM " + schema::ConfTableName));
+        tx.commit();
 
-SCENARIO("Storing History Events in a disconnected state", "[db-access][hdbpp-db-access][db-connection][psql]")
-{
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+        // check event type
+        REQUIRE(event_row.at(schema::HistoryEventColEvent).as<string>() == events::PauseEvent);
+
+        // check event id matches event table id
+        REQUIRE(event_row.at(schema::HistoryEventColEventId).as<int>() ==
+            history_row.at(schema::HistoryColEventId).as<int>());
+
+        // check attribute id match
+        REQUIRE(attr_row.at(schema::ConfColId).as<int>() == history_row.at(schema::HistoryColId).as<int>());
+    }
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(attr_name::TestAttrFQDName, events::PauseEvent));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
     {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+        pqxx::work tx {verifyConn()};
+        auto event_result(tx.exec1("SELECT * FROM " + schema::HistoryEventTableName));
+        auto history_row(tx.exec_n(2, "SELECT * FROM " + schema::HistoryTableName));
+        auto attr_row(tx.exec1("SELECT * FROM " + schema::ConfTableName));
+        tx.commit();
 
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+        REQUIRE(event_result.at(schema::HistoryEventColEvent).as<string>() == events::PauseEvent);
 
-        WHEN("Disconnecting from the database and trying again")
+        // check event type
+        for (const auto &row : history_row)
         {
-            REQUIRE_NOTHROW(conn.disconnect());
+            REQUIRE(attr_row.at(schema::ConfColId).as<int>() == row.at(schema::HistoryColId).as<int>());
 
-            THEN("An exception is throw as the database connection is down")
-            {
-                REQUIRE_THROWS_AS(conn.storeHistoryEvent(TestAttrFQDName, events::PauseEvent), Tango::DevFailed);
-            }
+            // check event id matches event table id
+            REQUIRE(
+                row.at(schema::HistoryColEventId).as<int>() == event_result.at(schema::HistoryColEventId).as<int>());
         }
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
-
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing Parameter Events in the database", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing a series of different History Events in the database successfully",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    struct timeval tv
-    {};
-    gettimeofday(&tv, nullptr);
-    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_NOTHROW(storeAttribute(traits));
 
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    vector<string> events {events::StartEvent, events::PauseEvent};
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(attr_name::TestAttrFQDName, events[0]));
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(attr_name::TestAttrFQDName, events[1]));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
     {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, PARAM_TABLE_NAME);
+        pqxx::work tx {verifyConn()};
+        auto result(tx.exec_n(2, "SELECT * FROM " + schema::HistoryEventTableName));
+        tx.commit();
 
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+        int i = 0;
 
-        WHEN("Storing a new parameter event in the database")
-        {
-            REQUIRE_NOTHROW(conn.storeParameterEvent(TestAttrFinalName,
-                event_time,
-                AttrInfoLabel,
-                AttrInfoUnit,
-                AttrInfoStandardUnit,
-                AttrInfoDisplayUnit,
-                AttrInfoFormat,
-                AttrInfoRel,
-                AttrInfoAbs,
-                AttrInfoPeriod,
-                AttrInfoDescription));
-
-            THEN("The data exists in the database, and can be read back and verified")
-            {
-                {
-                    pqxx::work tx {test_conn};
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                    auto param_row(tx.exec1("SELECT * FROM " + PARAM_TABLE_NAME));
-                    tx.commit();
-
-                    // TODO check event time
-                    //REQUIRE(param_row.at(PARAM_COL_EV_TIME).as<double>() == event_time);
-                    REQUIRE(param_row.at(PARAM_COL_LABEL).as<string>() == AttrInfoLabel);
-                    REQUIRE(param_row.at(PARAM_COL_UNIT).as<string>() == AttrInfoUnit);
-                    REQUIRE(param_row.at(PARAM_COL_STANDARDUNIT).as<string>() == AttrInfoStandardUnit);
-                    REQUIRE(param_row.at(PARAM_COL_DISPLAYUNIT).as<string>() == AttrInfoDisplayUnit);
-                    REQUIRE(param_row.at(PARAM_COL_FORMAT).as<string>() == AttrInfoFormat);
-                    REQUIRE(param_row.at(PARAM_COL_ARCHIVERELCHANGE).as<string>() == AttrInfoRel);
-                    REQUIRE(param_row.at(PARAM_COL_ARCHIVEABSCHANGE).as<string>() == AttrInfoAbs);
-                    REQUIRE(param_row.at(PARAM_COL_ARCHIVEPERIOD).as<string>() == AttrInfoPeriod);
-                    REQUIRE(param_row.at(PARAM_COL_DESCRIPTION).as<string>() == AttrInfoDescription);
-
-                    // check attribute id match
-                    REQUIRE(attr_row.at(CONF_COL_ID).as<int>() == param_row.at(PARAM_COL_ID).as<int>());
-                }
-            }
-            AND_WHEN("Trying to store another parameter event for the same attribute")
-            {
-                REQUIRE_NOTHROW(conn.storeParameterEvent(TestAttrFinalName,
-                    event_time,
-                    AttrInfoLabel,
-                    AttrInfoUnit,
-                    AttrInfoStandardUnit,
-                    AttrInfoDisplayUnit,
-                    AttrInfoFormat,
-                    AttrInfoRel,
-                    AttrInfoAbs,
-                    AttrInfoPeriod,
-                    AttrInfoDescription));
-
-                THEN("A second parameter event is added to the database")
-                {
-                    {
-                        pqxx::work tx {test_conn};
-                        auto result(tx.exec_n(2, "SELECT * FROM " + PARAM_TABLE_NAME));
-                        tx.commit();
-
-                        REQUIRE(result.size() == 2);
-                    }
-                }
-            }
-        }
+        // check event type
+        for (auto row : result)
+            REQUIRE(row.at(schema::HistoryEventColEvent).as<string>() == events[i++]);
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    SUCCEED("Passed");
+}
+
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing History Events unrelated to any known Attribute",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_THROWS_AS(testConn().storeHistoryEvent(attr_name::TestAttrFQDName, events::PauseEvent), Tango::DevFailed);
+    SUCCEED("Passed");
+}
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing History Events in a disconnected state",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement);
+    REQUIRE_THROWS_AS(conn.storeHistoryEvent(attr_name::TestAttrFQDName, events::PauseEvent), Tango::DevFailed);
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing Parameter Events in a disconnected state", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing Parameter Events in the database",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
     struct timeval tv
     {};
+
     gettimeofday(&tv, nullptr);
     double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
-
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_NOTHROW(storeAttribute(traits));
+
+    REQUIRE_NOTHROW(testConn().storeParameterEvent(attr_name::TestAttrFinalName,
+        event_time,
+        attr_info::AttrInfoLabel,
+        attr_info::AttrInfoUnit,
+        attr_info::AttrInfoStandardUnit,
+        attr_info::AttrInfoDisplayUnit,
+        attr_info::AttrInfoFormat,
+        attr_info::AttrInfoRel,
+        attr_info::AttrInfoAbs,
+        attr_info::AttrInfoPeriod,
+        attr_info::AttrInfoDescription));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
     {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+        pqxx::work tx {verifyConn()};
+        auto attr_row(tx.exec1("SELECT * FROM " + schema::ConfTableName));
+        auto param_row(tx.exec1("SELECT * FROM " + schema::ParamTableName));
+        tx.commit();
 
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+        // TODO check event time
+        //REQUIRE(param_row.at(schema::ParamColEvTime).as<double>() == event_time);
+        REQUIRE(param_row.at(schema::ParamColLabel).as<string>() == attr_info::AttrInfoLabel);
+        REQUIRE(param_row.at(schema::ParamColUnit).as<string>() == attr_info::AttrInfoUnit);
+        REQUIRE(param_row.at(schema::ParamColStandardUnit).as<string>() == attr_info::AttrInfoStandardUnit);
+        REQUIRE(param_row.at(schema::ParamColDisplayUnit).as<string>() == attr_info::AttrInfoDisplayUnit);
+        REQUIRE(param_row.at(schema::ParamColFormat).as<string>() == attr_info::AttrInfoFormat);
+        REQUIRE(param_row.at(schema::ParamColArchiveRelChange).as<string>() == attr_info::AttrInfoRel);
+        REQUIRE(param_row.at(schema::ParamColArchiveAbsChange).as<string>() == attr_info::AttrInfoAbs);
+        REQUIRE(param_row.at(schema::ParamColArchivePeriod).as<string>() == attr_info::AttrInfoPeriod);
+        REQUIRE(param_row.at(schema::ParamColDescription).as<string>() == attr_info::AttrInfoDescription);
+
+        // check attribute id match
+        REQUIRE(attr_row.at(schema::ConfColId).as<int>() == param_row.at(schema::ParamColId).as<int>());
+    }
 
-        WHEN("Disconnecting from the database and trying again")
-        {
-            conn.disconnect();
+    REQUIRE_NOTHROW(testConn().storeParameterEvent(attr_name::TestAttrFinalName,
+        event_time,
+        attr_info::AttrInfoLabel,
+        attr_info::AttrInfoUnit,
+        attr_info::AttrInfoStandardUnit,
+        attr_info::AttrInfoDisplayUnit,
+        attr_info::AttrInfoFormat,
+        attr_info::AttrInfoRel,
+        attr_info::AttrInfoAbs,
+        attr_info::AttrInfoPeriod,
+        attr_info::AttrInfoDescription));
 
-            THEN("An exception is throw as the database connection is down")
-            {
-                REQUIRE_THROWS_AS(conn.storeParameterEvent(TestAttrFinalName,
-                                      event_time,
-                                      AttrInfoLabel,
-                                      AttrInfoUnit,
-                                      AttrInfoStandardUnit,
-                                      AttrInfoDisplayUnit,
-                                      AttrInfoFormat,
-                                      AttrInfoRel,
-                                      AttrInfoAbs,
-                                      AttrInfoPeriod,
-                                      AttrInfoDescription),
-                    Tango::DevFailed);
-            }
-        }
-    }
+    {
+        pqxx::work tx {verifyConn()};
+        auto result(tx.exec_n(2, "SELECT * FROM " + schema::ParamTableName));
+        tx.commit();
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+        REQUIRE(result.size() == 2);
+    }
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing event data which is invalid", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing Parameter Events in a disconnected state",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
     struct timeval tv
     {};
+
     gettimeofday(&tv, nullptr);
     double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    DbConnection conn(DbConnection::DbStoreMethod::PreparedStatement);
+
+    REQUIRE_THROWS_AS(conn.storeParameterEvent(attr_name::TestAttrFinalName,
+                          event_time,
+                          attr_info::AttrInfoLabel,
+                          attr_info::AttrInfoUnit,
+                          attr_info::AttrInfoStandardUnit,
+                          attr_info::AttrInfoDisplayUnit,
+                          attr_info::AttrInfoFormat,
+                          attr_info::AttrInfoRel,
+                          attr_info::AttrInfoAbs,
+                          attr_info::AttrInfoPeriod,
+                          attr_info::AttrInfoDescription),
+        Tango::DevFailed);
+
+    SUCCEED("Passed");
+}
+
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing events which has no data",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    struct timeval tv
+    {};
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    gettimeofday(&tv, nullptr);
+    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
-    {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
+    vector<DbConnection::DbStoreMethod> access_methods {
+        DbConnection::DbStoreMethod::PreparedStatement, DbConnection::DbStoreMethod::InsertString};
 
-        WHEN("Storing a read only scalar data event with no data")
-        {
-            AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-            psql_conn_test::storeTestAttribute(conn, traits);
+    vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE};
 
-            REQUIRE_NOTHROW(conn.storeDataEvent(TestAttrFinalName,
-                event_time,
-                Tango::ATTR_VALID,
-                move(make_unique<std::vector<double>>()),
-                move(make_unique<std::vector<double>>()),
-                traits));
+    for (auto access : access_methods)
+    {
+        resetDbAccess(access);
+        REQUIRE_NOTHROW(clearTables());
 
-            THEN("The event is stored, with no data, and can be read back")
-            {
-                {
-                    pqxx::work tx {test_conn};
-                    auto data_row(tx.exec1("SELECT * FROM " + psql_conn_test::TestQueryBuilder.tableName(traits) +
-                        " ORDER BY " + DAT_COL_DATA_TIME + " LIMIT 1"));
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                    tx.commit();
-
-                    REQUIRE(data_row.at(DAT_COL_ID).as<int>() == attr_row.at(CONF_COL_ID).as<int>());
-                    REQUIRE(data_row.at(DAT_COL_VALUE_R).is_null() == true);
-                }
-            }
-        }
-        WHEN("Storing a read/write spectrum data event with no data")
+        for (auto write_type : write_types)
         {
-            AttributeTraits traits {Tango::READ_WRITE, Tango::SPECTRUM, Tango::DEV_DOUBLE};
-            psql_conn_test::storeTestAttribute(conn, traits);
+            AttributeTraits traits {write_type, Tango::SCALAR, Tango::DEV_DOUBLE};
+            auto att_name = storeAttributeByTraits(traits);
 
-            REQUIRE_NOTHROW(conn.storeDataEvent(TestAttrFinalName,
+            REQUIRE_NOTHROW(testConn().storeDataEvent(att_name,
                 event_time,
                 Tango::ATTR_VALID,
                 move(make_unique<std::vector<double>>()),
                 move(make_unique<std::vector<double>>()),
                 traits));
 
-            THEN("The event is stored, with no data, and can be read back")
             {
-                {
-                    pqxx::work tx {test_conn};
-                    auto data_row(tx.exec1("SELECT * FROM " + psql_conn_test::TestQueryBuilder.tableName(traits) +
-                        " ORDER BY " + DAT_COL_DATA_TIME + " LIMIT 1"));
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                    tx.commit();
-
-                    REQUIRE(data_row.at(DAT_COL_ID).as<int>() == attr_row.at(CONF_COL_ID).as<int>());
-                    REQUIRE(data_row.at(DAT_COL_VALUE_R).is_null() == true);
-                    REQUIRE(data_row.at(DAT_COL_VALUE_W).is_null() == true);
-                }
+                pqxx::work tx {verifyConn()};
+
+                string query = "SELECT * FROM ";
+                query += schema::ConfTableName;
+                query += " WHERE ";
+                query += schema::ConfColName;
+                query += "='";
+                query += att_name;
+                query += "'";
+
+                // get the attribute id
+                auto attr_row(tx.exec1(query));
+
+                query = "SELECT * FROM ";
+                query += QueryBuilder::tableName(traits);
+                query += " WHERE ";
+                query += schema::DatColId;
+                query += "=";
+                query += to_string(attr_row.at(schema::ConfColId).as<int>());
+                query += " ";
+                query += " ORDER BY ";
+                query += schema::DatColDataTime;
+                query += " DESC LIMIT 1";
+
+                // now get the last row stored
+                auto data_row(tx.exec1(query));
+
+                tx.commit();
+
+                REQUIRE(data_row.at(schema::DatColId).as<int>() == attr_row.at(schema::ConfColId).as<int>());
+
+                if (traits.hasReadData())
+                    REQUIRE(data_row.at(schema::DatColValueR).is_null() == true);
+
+                if (traits.hasWriteData())
+                    REQUIRE(data_row.at(schema::DatColValueW).is_null() == true);
             }
         }
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
-
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-TEST_CASE("Storing event data of all Tango type combinations in the database",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing event data for all Tango type combinations in the database (prepared statements)",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
-
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-    psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-
-    vector<Tango::CmdArgType> types {
-        Tango::DEV_BOOLEAN,
-        Tango::DEV_DOUBLE,
-        Tango::DEV_FLOAT,
-        Tango::DEV_STRING,
-        Tango::DEV_LONG,
-        Tango::DEV_ULONG,
-        Tango::DEV_LONG64,
-        Tango::DEV_ULONG64,
-        Tango::DEV_SHORT,
-        Tango::DEV_USHORT,
-        Tango::DEV_UCHAR,
-        Tango::DEV_STATE,
-        //Tango::DEV_ENCODED, Tango::DEV_ENUM
-    };
-
-    vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE};
-    vector<Tango::AttrDataFormat> format_types {Tango::SCALAR, Tango::SPECTRUM};
+    auto traits_array = getTraitsImplemented();
+    REQUIRE_NOTHROW(clearTables());
+    resetDbAccess(DbConnection::DbStoreMethod::PreparedStatement);
 
-    // loop for every combination of type in Tango
-    for (auto &type : types)
+    for (auto &traits : traits_array)
     {
-        for (auto &format : format_types)
-        {
-            for (auto &write : write_types)
-            {
-                AttributeTraits traits {write, format, type};
+        INFO("Inserting data for traits: " << traits);
+        auto name = storeAttributeByTraits(traits);
 
-                DYNAMIC_SECTION("Storing a traits: " << traits)
-                {
-                    psql_conn_test::storeTestAttribute(conn, traits);
+        switch (traits.type())
+        {
+            case Tango::DEV_BOOLEAN:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_BOOLEAN>(name, traits));
+                break;
 
-                    switch (traits.type())
-                    {
-                        case Tango::DEV_BOOLEAN:
-                            psql_conn_test::checkStoreTestEventData(test_conn,
-                                traits,
-                                psql_conn_test::storeTestEventData<Tango::DEV_BOOLEAN>(conn, traits));
+            case Tango::DEV_SHORT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_SHORT>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_LONG:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_LONG>(name, traits));
+                break;
 
-                        case Tango::DEV_SHORT:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_SHORT>(conn, traits));
+            case Tango::DEV_LONG64:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_LONG64>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_FLOAT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_FLOAT>(name, traits));
+                break;
 
-                        case Tango::DEV_LONG:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_LONG>(conn, traits));
-                            break;
+            case Tango::DEV_DOUBLE:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_DOUBLE>(name, traits));
+                break;
 
-                        case Tango::DEV_LONG64:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_LONG64>(conn, traits));
+            case Tango::DEV_UCHAR:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_UCHAR>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_USHORT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_USHORT>(name, traits));
+                break;
 
-                        case Tango::DEV_FLOAT:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_FLOAT>(conn, traits));
+            case Tango::DEV_ULONG:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_ULONG>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_ULONG64:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_ULONG64>(name, traits));
+                break;
 
-                        case Tango::DEV_DOUBLE:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_DOUBLE>(conn, traits));
+            case Tango::DEV_STRING:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_STRING>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_STATE:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_STATE>(name, traits));
+                break;
 
-                        case Tango::DEV_UCHAR:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_UCHAR>(conn, traits));
+            default: throw "Should not be here!";
+        }
+    }
 
-                            break;
+    SUCCEED("Passed");
+}
 
-                        case Tango::DEV_USHORT:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_USHORT>(conn, traits));
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing event data for all Tango type combinations in the database (insert strings)",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    auto traits_array = getTraitsImplemented();
+    REQUIRE_NOTHROW(clearTables());
+    resetDbAccess(DbConnection::DbStoreMethod::InsertString);
 
-                            break;
+    for (auto &traits : traits_array)
+    {
+        INFO("Inserting data for traits: " << traits);
+        auto name = storeAttributeByTraits(traits);
 
-                        case Tango::DEV_ULONG:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_ULONG>(conn, traits));
+        switch (traits.type())
+        {
+            case Tango::DEV_BOOLEAN:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_BOOLEAN>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_SHORT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_SHORT>(name, traits));
+                break;
 
-                        case Tango::DEV_ULONG64:
-                            psql_conn_test::checkStoreTestEventData(test_conn,
-                                traits,
-                                psql_conn_test::storeTestEventData<Tango::DEV_ULONG64>(conn, traits));
+            case Tango::DEV_LONG:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_LONG>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_LONG64:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_LONG64>(name, traits));
+                break;
 
-                        case Tango::DEV_STRING:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_STRING>(conn, traits));
+            case Tango::DEV_FLOAT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_FLOAT>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_DOUBLE:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_DOUBLE>(name, traits));
+                break;
 
-                        case Tango::DEV_STATE:
-                            psql_conn_test::checkStoreTestEventData(
-                                test_conn, traits, psql_conn_test::storeTestEventData<Tango::DEV_STATE>(conn, traits));
+            case Tango::DEV_UCHAR:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_UCHAR>(name, traits));
+                break;
 
-                            break;
+            case Tango::DEV_USHORT:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_USHORT>(name, traits));
+                break;
 
-                            //case Tango::DEV_ENUM:
-                            //psql_conn_test::checkStoreTestEventData(
-                            //test_conn, traits, psql_conn_test::storeTestEventData(conn, traits));
+            case Tango::DEV_ULONG:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_ULONG>(name, traits));
+                break;
 
-                            //break;
+            case Tango::DEV_ULONG64:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_ULONG64>(name, traits));
+                break;
 
-                            //case Tango::DEV_ENCODED:
-                            //psql_conn_test::checkStoreTestEventData(
-                            //test_conn, traits, psql_conn_test::storeTestEventData<hdbpp_encoded_t>(conn, traits));
+            case Tango::DEV_STRING:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_STRING>(name, traits));
+                break;
 
-                            //break;
+            case Tango::DEV_STATE:
+                checkStoreTestEventData(name, traits, storeTestEventData<Tango::DEV_STATE>(name, traits));
+                break;
 
-                        default: throw "Should not be here!";
-                    }
-                }
-            }
+            default: throw "Should not be here!";
         }
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
-
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing data events in a disconnected state", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing complex arrays of strings containing postgres escape characters",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
-
     struct timeval tv
     {};
+
     gettimeofday(&tv, nullptr);
     double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    string s1 = "test brackets } {} with comma,";
+    string s2 = "quotes '' and commas, and 'quoted, comma', escaped \"double quote\"";
+    string s3 = R"(test two slash \ test four slash \\)";
+    string s4 = "line feed \n and return \r";
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
-    {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+    auto value_r = std::make_unique<std::vector<std::string>>();
+    value_r->push_back(s1);
+    value_r->push_back(s2);
+    value_r->push_back(s3);
+    value_r->push_back(s4);
 
-        WHEN("Disconnecting from the database and trying again")
-        {
-            REQUIRE_NOTHROW(conn.disconnect());
+    auto value_w = std::make_unique<std::vector<std::string>>();
+    value_w->push_back(s1);
+    value_w->push_back(s2);
+    value_w->push_back(s3);
+    value_w->push_back(s4);
 
-            THEN("An exception is throw as the database connection is down")
-            {
-                REQUIRE_THROWS_AS(conn.storeDataEvent(TestAttrFinalName,
-                                      event_time,
-                                      Tango::ATTR_VALID,
-                                      move(make_unique<std::vector<double>>()),
-                                      move(make_unique<std::vector<double>>()),
-                                      traits),
-                    Tango::DevFailed);
-            }
-        }
-    }
+    auto original_values = make_tuple((*value_r), (*value_w));
+
+    AttributeTraits traits {Tango::READ_WRITE, Tango::SPECTRUM, Tango::DEV_STRING};
+    REQUIRE_NOTHROW(clearTables());
+    auto name = storeAttributeByTraits(traits);
+
+    // this only ever stores by insert string, so no need to test prepared statements.
+    REQUIRE_NOTHROW(
+        testConn().storeDataEvent(name, event_time, Tango::ATTR_VALID, move(value_r), move(value_w), traits));
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    checkStoreTestEventData(name, traits, original_values);
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-SCENARIO("Storing data events as errors", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing complex strings containing postgres escape characters",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    string error_msg = "A Test Error, 'Message'";
-
     struct timeval tv
     {};
-    gettimeofday(&tv, nullptr);
-    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
-
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
-
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute stored in it")
-    {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, ERR_TABLE_NAME);
-
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
-
-        WHEN("Storing a new error message in the database")
-        {
-            REQUIRE_NOTHROW(
-                conn.storeDataEventError(TestAttrFinalName, event_time, Tango::ATTR_VALID, error_msg, traits));
 
-            THEN("Then both the event and history event exists in the database, and can be read back and verified")
-            {
-                {
-                    pqxx::work tx {test_conn};
+    vector<string> values = {"test brackets } {} with comma,",
+        "quotes '' and commas, and 'quoted, comma', escaped \"double quote\"",
+        R"(test two slash \ test four slash \\)",
+        "line feed \n and return \r"};
 
-                    auto data_row(tx.exec1("SELECT * FROM " + psql_conn_test::TestQueryBuilder.tableName(traits) +
-                        " ORDER BY " + DAT_COL_DATA_TIME + " LIMIT 1"));
+    AttributeTraits traits {Tango::READ_WRITE, Tango::SCALAR, Tango::DEV_STRING};
 
-                    auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                    auto error_row(tx.exec1("SELECT * FROM " + ERR_TABLE_NAME));
-                    tx.commit();
+    vector<DbConnection::DbStoreMethod> access_methods {
+        DbConnection::DbStoreMethod::PreparedStatement, DbConnection::DbStoreMethod::InsertString};
 
-                    REQUIRE(data_row.at(DAT_COL_ID).as<int>() == attr_row.at(CONF_COL_ID).as<int>());
-                    REQUIRE(data_row.at(DAT_COL_ERROR_DESC_ID).as<int>() == error_row.at(ERR_COL_ID).as<int>());
+    REQUIRE_NOTHROW(clearTables());
+    auto name = storeAttributeByTraits(traits);
 
-                    REQUIRE(error_row.at(ERR_COL_ERROR_DESC).as<string>() == error_msg);
-                }
-            }
-            AND_WHEN("A second error is stored with the same message")
-            {
-                gettimeofday(&tv, nullptr);
-                event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
+    for (auto access : access_methods)
+    {
+        resetDbAccess(access);
 
-                REQUIRE_NOTHROW(
-                    conn.storeDataEventError(TestAttrFinalName, event_time, Tango::ATTR_VALID, error_msg, traits));
+        for (auto &str : values)
+        {
+            gettimeofday(&tv, nullptr);
+            double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-                THEN("The same error id is used in the event data")
-                {
-                    {
-                        pqxx::work tx {test_conn};
+            auto value_r = std::make_unique<std::vector<std::string>>();
+            value_r->push_back(str);
 
-                        auto data_row(tx.exec1("SELECT * FROM " + psql_conn_test::TestQueryBuilder.tableName(traits) +
-                            " ORDER BY " + DAT_COL_DATA_TIME + " LIMIT 1"));
+            auto value_w = std::make_unique<std::vector<std::string>>();
+            value_w->push_back(str);
 
-                        auto attr_row(tx.exec1("SELECT * FROM " + CONF_TABLE_NAME));
-                        auto error_row(tx.exec1("SELECT * FROM " + ERR_TABLE_NAME));
-                        tx.commit();
+            auto original_values = make_tuple((*value_r), (*value_w));
 
-                        REQUIRE(data_row.at(DAT_COL_ID).as<int>() == attr_row.at(CONF_COL_ID).as<int>());
-                        REQUIRE(data_row.at(DAT_COL_ERROR_DESC_ID).as<int>() == error_row.at(ERR_COL_ID).as<int>());
+            REQUIRE_NOTHROW(
+                testConn().storeDataEvent(name, event_time, Tango::ATTR_VALID, move(value_r), move(value_w), traits));
 
-                        REQUIRE(error_row.at(ERR_COL_ERROR_DESC).as<string>() == error_msg);
-                    }
-                }
-            }
+            checkStoreTestEventData(name, traits, original_values);
         }
     }
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
-
-    if (test_conn.is_open())
-        test_conn.disconnect();
+    SUCCEED("Passed");
 }
 
-SCENARIO("Fetching the last event after it has just been stored", "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing data events in a disconnected state",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
-
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-
-    GIVEN("A valid DbConnection connected to a hdbpp database with an attribute and history event stored in it")
-    {
-        psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+    struct timeval tv
+    {};
 
-        AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-        psql_conn_test::storeTestAttribute(conn, traits);
+    gettimeofday(&tv, nullptr);
+    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-        REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events::PauseEvent));
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    auto name = storeAttributeByTraits(traits);
 
-        WHEN("Fetching the last history event for the attribute")
-        {
-            string event;
-            REQUIRE_NOTHROW(event = conn.fetchLastHistoryEvent(TestAttrFQDName));
+    testConn().disconnect();
 
-            THEN("It is equal to the event just stored") { REQUIRE(event == events::PauseEvent); }
-            AND_WHEN("Storing a second event and fetching it")
-            {
-                REQUIRE_NOTHROW(conn.storeHistoryEvent(TestAttrFQDName, events::StartEvent));
+    REQUIRE_THROWS_AS(testConn().storeDataEvent(name,
+                          event_time,
+                          Tango::ATTR_VALID,
+                          move(make_unique<std::vector<double>>()),
+                          move(make_unique<std::vector<double>>()),
+                          traits),
+        Tango::DevFailed);
 
-                string event;
-                REQUIRE_NOTHROW(event = conn.fetchLastHistoryEvent(TestAttrFQDName));
+    SUCCEED("Passed");
+}
 
-                THEN("It is equal to the event just stored") { REQUIRE(event == events::StartEvent); }
-            }
-        }
-    }
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Storing data events as errors",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    string error_msg = "A Test Error, 'Message'";
 
-    if (conn.isOpen())
-        REQUIRE_NOTHROW(conn.disconnect());
+    struct timeval tv
+    {};
 
-    if (test_conn.is_open())
-        test_conn.disconnect();
-}
+    gettimeofday(&tv, nullptr);
+    double event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-SCENARIO("When no events have been stored, no error is thrown requesting the last event",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
-{
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    auto name = storeAttributeByTraits(traits);
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
+    REQUIRE_NOTHROW(testConn().storeDataEventError(name, event_time, Tango::ATTR_VALID, error_msg, traits));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with no attribute nor history event stored in it")
     {
-        psql_conn_test::clearTable(test_conn, HISTORY_TABLE_NAME);
-        psql_conn_test::clearTable(test_conn, HISTORY_EVENT_TABLE_NAME);
+        pqxx::work tx {verifyConn()};
+
+        string query = "SELECT * FROM ";
+        query += schema::ConfTableName;
+        query += " WHERE ";
+        query += schema::ConfColName;
+        query += "='";
+        query += name;
+        query += "'";
+
+        // get the attribute id
+        pqxx::row attr_row;
+        REQUIRE_NOTHROW(attr_row = tx.exec1(query));
+
+        query = "SELECT * FROM ";
+        query += QueryBuilder::tableName(traits);
+        query += " WHERE ";
+        query += schema::DatColId;
+        query += "=";
+        query += to_string(attr_row.at(schema::ConfColId).as<int>());
+        query += " ";
+        query += " ORDER BY ";
+        query += schema::DatColDataTime;
+        query += " DESC LIMIT 1";
+
+        // now get the last row stored
+        pqxx::row data_row;
+        REQUIRE_NOTHROW(data_row = tx.exec1(query));
+
+        query = "SELECT * FROM ";
+        query += schema::ErrTableName;
+        query += " WHERE ";
+        query += schema::ErrColId + "=";
+        query += to_string(data_row.at(schema::DatColErrorDescId).as<int>());
+
+        pqxx::row error_row;
+        REQUIRE_NOTHROW(error_row = tx.exec1(query));
 
-        WHEN("Requesting the last event")
-        {
-            string event;
+        tx.commit();
 
-            THEN("No error occurs, and no event is returned")
-            {
-                REQUIRE_NOTHROW(event = conn.fetchLastHistoryEvent(TestAttrFQDName));
-                REQUIRE(event.empty());
-            }
-        }
+        REQUIRE(error_row.at(schema::ErrColErrorDesc).as<string>() == error_msg);
     }
-}
 
-SCENARIO("The archive of an attribute can be determined by fetchAttributeArchived()",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
-{
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    gettimeofday(&tv, nullptr);
+    event_time = tv.tv_sec + tv.tv_usec / 1.0e6;
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-    psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
+    REQUIRE_NOTHROW(testConn().storeDataEventError(name, event_time, Tango::ATTR_VALID, error_msg, traits));
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with no attribute in it")
     {
-        WHEN("Requesting the archive state of the test attribute")
-        {
-            THEN("The archive state is false") { REQUIRE(!conn.fetchAttributeArchived(TestAttrFQDName)); }
-        }
-        WHEN("Storing the test attribute and checking its archive state")
-        {
-            AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-            psql_conn_test::storeTestAttribute(conn, traits);
+        pqxx::work tx {verifyConn()};
+
+        string query = "SELECT * FROM ";
+        query += schema::ConfTableName;
+        query += " WHERE ";
+        query += schema::ConfColName;
+        query += "='";
+        query += name;
+        query += "'";
+
+        // get the attribute id
+        pqxx::row attr_row;
+        REQUIRE_NOTHROW(attr_row = tx.exec1(query));
+
+        query = "SELECT * FROM ";
+        query += QueryBuilder::tableName(traits);
+        query += " WHERE ";
+        query += schema::DatColId;
+        query += "=";
+        query += to_string(attr_row.at(schema::ConfColId).as<int>());
+        query += " ";
+        query += " ORDER BY ";
+        query += schema::DatColDataTime;
+        query += " DESC LIMIT 2";
+
+        // now get the last row stored
+        pqxx::result data_result;
+        REQUIRE_NOTHROW(data_result = tx.exec(query));
+        tx.commit();
 
-            THEN("The archive state is true") { REQUIRE(conn.fetchAttributeArchived(TestAttrFQDName)); }
-        }
+        REQUIRE(data_result[0].at(schema::DatColErrorDescId).as<int>() ==
+            data_result[1].at(schema::DatColErrorDescId).as<int>());
     }
+
+    SUCCEED("Passed");
 }
 
-SCENARIO("The type traits of an archived attribute can be returned by fetchAttributeTraits()",
-    "[db-access][hdbpp-db-access][db-connection][psql]")
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "Fetching the last history event after it has just been stored",
+    "[db-access][hdbpp-db-access][db-connection]")
 {
-    DbConnection conn;
-    REQUIRE_NOTHROW(conn.connect(postgres_db::HdbppConnectionString));
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    auto name = storeAttributeByTraits(traits);
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(name, events::PauseEvent));
+    string event;
+    REQUIRE_NOTHROW(event = testConn().fetchLastHistoryEvent(name));
+    REQUIRE(event == events::PauseEvent);
+    REQUIRE_NOTHROW(testConn().storeHistoryEvent(name, events::StartEvent));
+    REQUIRE_NOTHROW(event = testConn().fetchLastHistoryEvent(name));
+    REQUIRE(event == events::StartEvent);
+    SUCCEED("Passed");
+}
 
-    // used for verification
-    pqxx::connection test_conn(postgres_db::HdbppConnectionString);
-    psql_conn_test::clearTable(test_conn, CONF_TABLE_NAME);
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "When no history events have been stored, no error is thrown requesting the last event",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    storeAttribute(traits);
+    string event;
+    REQUIRE_NOTHROW(event = testConn().fetchLastHistoryEvent(attr_name::TestAttrFQDName));
+    REQUIRE(event.empty());
+    SUCCEED("Passed");
+}
 
-    GIVEN("A valid DbConnection connected to a hdbpp database with a attribute in it")
-    {
-        WHEN("Requesting the attribute type traits state of the test attribute")
-        {
-            THEN("An exception is thrown") { REQUIRE_THROWS(conn.fetchAttributeTraits(TestAttrFQDName)); }
-        }
-        WHEN("Storing the test attribute and checking its type traits")
-        {
-            AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-            psql_conn_test::storeTestAttribute(conn, traits);
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "The archive state of an attribute can be determined by fetchAttributeArchived()",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE(!testConn().fetchAttributeArchived(attr_name::TestAttrFQDName));
+    storeAttribute(traits);
+    REQUIRE(testConn().fetchAttributeArchived(attr_name::TestAttrFQDName));
+    SUCCEED("Passed");
+}
 
-            THEN("The returned traits match those it was stored with")
-            {
-                REQUIRE(conn.fetchAttributeTraits(TestAttrFQDName) == traits);
-            }
-        }
-    }
-}
\ No newline at end of file
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "fetchAttributeTraits() throws an exception when the attribute is not archived",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    REQUIRE_THROWS(testConn().fetchAttributeTraits(attr_name::TestAttrFQDName));
+    SUCCEED("Passed");
+}
+
+TEST_CASE_METHOD(pqxx_conn_test::DbConnectionTestsFixture,
+    "The type traits of an archived attribute can be returned by fetchAttributeTraits()",
+    "[db-access][hdbpp-db-access][db-connection]")
+{
+    AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+    REQUIRE_NOTHROW(clearTables());
+    storeAttribute(traits);
+    REQUIRE(testConn().fetchAttributeTraits(attr_name::TestAttrFQDName) == traits);
+    SUCCEED("Passed");
+}
diff --git a/test/HdbppTxDataEventTests.cpp b/test/HdbppTxDataEventTests.cpp
index 8861eab72ed71e67a8d91444cf8ddc332b119607..8288c14cd2e461370a1db882bd67dc2fb907f6ec 100644
--- a/test/HdbppTxDataEventTests.cpp
+++ b/test/HdbppTxDataEventTests.cpp
@@ -173,8 +173,10 @@ SCENARIO("Construct a valid HdbppTxDataEvent data event for storage", "[hdbpp-tx
     // ugly, how is this dealt with in Tango!?!
     struct timeval tv
     {};
+
     struct Tango::TimeVal tango_tv
     {};
+
     gettimeofday(&tv, nullptr);
     tango_tv.tv_sec = tv.tv_sec;
     tango_tv.tv_usec = tv.tv_usec;
@@ -240,8 +242,10 @@ SCENARIO("An invalid quality results in an HdbppTxDataEvent event with no data",
     // ugly, how is this dealt with in Tango!?!
     struct timeval tv
     {};
+
     struct Tango::TimeVal tango_tv
     {};
+
     gettimeofday(&tv, nullptr);
     tango_tv.tv_sec = tv.tv_sec;
     tango_tv.tv_usec = tv.tv_usec;
@@ -284,8 +288,10 @@ SCENARIO("A DeviceAttribute with no data results in an HdbppTxDataEvent event wi
     // ugly, how is this dealt with in Tango!?!
     struct timeval tv
     {};
+
     struct Tango::TimeVal tango_tv
     {};
+
     gettimeofday(&tv, nullptr);
     tango_tv.tv_sec = tv.tv_sec;
     tango_tv.tv_usec = tv.tv_usec;
@@ -328,8 +334,10 @@ SCENARIO(
     // ugly, how is this dealt with in Tango!?!
     struct timeval tv
     {};
+
     struct Tango::TimeVal tango_tv
     {};
+
     gettimeofday(&tv, nullptr);
     tango_tv.tv_sec = tv.tv_sec;
     tango_tv.tv_usec = tv.tv_usec;
@@ -409,8 +417,10 @@ TEST_CASE("Creating HdbppTxDataEvents for each tango type and storing them", "[d
     // ugly, how is this dealt with in Tango!?!
     struct timeval tv
     {};
+
     struct Tango::TimeVal tango_tv
     {};
+
     gettimeofday(&tv, nullptr);
     tango_tv.tv_sec = tv.tv_sec;
     tango_tv.tv_usec = tv.tv_usec;
@@ -505,4 +515,4 @@ SCENARIO("HdbppTxDataEvent Simulated exception received", "[hdbpp-tx][hdbpp-tx-d
             THEN("An exception is raised") { REQUIRE_THROWS_AS(tx.store(), runtime_error); }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/QueryBuilderTests.cpp b/test/QueryBuilderTests.cpp
index 16feaf0b956023b1f3589f0b6cd522d353efd984..3daaf18169959df25440bb2fa93e64d507c985ef 100644
--- a/test/QueryBuilderTests.cpp
+++ b/test/QueryBuilderTests.cpp
@@ -18,29 +18,145 @@
    along with libhdb++timescale.  If not, see <http://www.gnu.org/licenses/>. */
 
 #include "QueryBuilder.hpp"
+#include "TestHelpers.hpp"
 #include "TimescaleSchema.hpp"
 #include "catch2/catch.hpp"
 
 using namespace std;
 using namespace hdbpp_internal;
 using namespace hdbpp_internal::pqxx_conn;
+using namespace hdbpp_test::attr_name;
 using namespace Catch::Matchers;
 
-SCENARIO("storeDataEventQuery() returns the correct Value fields for the given traits", "[query-string]")
+SCENARIO("storeDataEventString() returns the correct Value fields for the given traits", "[query-string]")
 {
     GIVEN("A query builder object with nothing cached")
     {
         QueryBuilder query_builder;
+        auto value_r = make_unique<vector<double>>(1.1, 2.2);
+        auto value_r_empty = make_unique<vector<double>>();
+        auto value_w = make_unique<vector<double>>(3.3, 4.4);
+        auto value_w_empty = make_unique<vector<double>>();
 
         WHEN("Requesting a query string for traits configured for Tango::READ")
         {
             AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
-            auto result = query_builder.storeDataEventQuery<double>(traits);
 
-            THEN("The result must include the DAT_COL_VALUE_R field only")
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r, value_w_empty, traits);
+
+            THEN("The result must include the schema::DatColValueR field only")
+            {
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, !Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits)));
+            }
+        }
+        WHEN("Requesting a query string for traits configured for Tango::WRITE")
+        {
+            AttributeTraits traits {Tango::WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r_empty, value_w, traits);
+
+            THEN("The result must include the schema::DatColValueW field only")
+            {
+                REQUIRE_THAT(result, !Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits)));
+            }
+        }
+        WHEN("Requesting a query string for traits configured for Tango::READ_WRITE")
+        {
+            AttributeTraits traits {Tango::READ_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r, value_w, traits);
+
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
+            {
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits)));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits)));
+            }
+        }
+        WHEN("Requesting a query string for traits configured for Tango::READ_WITH_WRITE")
+        {
+            AttributeTraits traits {Tango::READ_WITH_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r, value_w, traits);
+
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
+            {
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits)));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits)));
+            }
+        }
+    }
+}
+
+SCENARIO("storeDataEventString() adds a null when value is size zero", "[query-string]")
+{
+    GIVEN("A query builder object with nothing cached")
+    {
+        QueryBuilder query_builder;
+        auto value_r = make_unique<vector<double>>(1.1, 2.2);
+        auto value_r_empty = make_unique<vector<double>>();
+        auto value_w = make_unique<vector<double>>(3.3, 4.4);
+        auto value_w_empty = make_unique<vector<double>>();
+
+        WHEN("Requesting a query string with a size zero read value")
+        {
+            AttributeTraits traits {Tango::READ_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r, value_w_empty, traits);
+
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
+            {
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_r, traits)));
+                REQUIRE_THAT(result, Contains("NULL"));
+            }
+        }
+        WHEN("Requesting a query string with a size zero write value")
+        {
+            AttributeTraits traits {Tango::READ_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
+
+            auto result = query_builder.storeDataEventString<double>(
+                TestAttrFQDName, string("0"), string("1"), value_r_empty, value_w, traits);
+
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
+            {
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
+                REQUIRE_THAT(result, Contains(query_utils::DataToString<double>::run(value_w, traits)));
+                REQUIRE_THAT(result, Contains("NULL"));
+            }
+        }
+    }
+}
+
+SCENARIO("storeDataEventStatement() returns the correct Value fields for the given traits", "[query-string]")
+{
+    GIVEN("A query builder object with nothing cached")
+    {
+        QueryBuilder query_builder;
+
+        WHEN("Requesting a query string for traits configured for Tango::READ")
+        {
+            AttributeTraits traits {Tango::READ, Tango::SCALAR, Tango::DEV_DOUBLE};
+            auto result = query_builder.storeDataEventStatement<double>(traits);
+
+            THEN("The result must include the schema::DatColValueR field only")
             {
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_R));
-                REQUIRE_THAT(result, !Contains(DAT_COL_VALUE_W));
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, !Contains(schema::DatColValueW));
                 REQUIRE_THAT(result, Contains("$4"));
                 REQUIRE_THAT(result, !Contains("$5"));
             }
@@ -48,12 +164,12 @@ SCENARIO("storeDataEventQuery() returns the correct Value fields for the given t
         WHEN("Requesting a query string for traits configured for Tango::WRITE")
         {
             AttributeTraits traits {Tango::WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
-            auto result = query_builder.storeDataEventQuery<double>(traits);
+            auto result = query_builder.storeDataEventStatement<double>(traits);
 
-            THEN("The result must include the DAT_COL_VALUE_W field only")
+            THEN("The result must include the schema::DatColValueW field only")
             {
-                REQUIRE_THAT(result, !Contains(DAT_COL_VALUE_R));
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_W));
+                REQUIRE_THAT(result, !Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
                 REQUIRE_THAT(result, Contains("$4"));
                 REQUIRE_THAT(result, !Contains("$5"));
             }
@@ -61,12 +177,12 @@ SCENARIO("storeDataEventQuery() returns the correct Value fields for the given t
         WHEN("Requesting a query string for traits configured for Tango::READ_WRITE")
         {
             AttributeTraits traits {Tango::READ_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
-            auto result = query_builder.storeDataEventQuery<double>(traits);
+            auto result = query_builder.storeDataEventStatement<double>(traits);
 
-            THEN("The result must include both the DAT_COL_VALUE_R and DAT_COL_VALUE_W field")
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
             {
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_R));
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_W));
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
                 REQUIRE_THAT(result, Contains("$4"));
                 REQUIRE_THAT(result, Contains("$5"));
             }
@@ -74,12 +190,12 @@ SCENARIO("storeDataEventQuery() returns the correct Value fields for the given t
         WHEN("Requesting a query string for traits configured for Tango::READ_WITH_WRITE")
         {
             AttributeTraits traits {Tango::READ_WITH_WRITE, Tango::SCALAR, Tango::DEV_DOUBLE};
-            auto result = query_builder.storeDataEventQuery<double>(traits);
+            auto result = query_builder.storeDataEventStatement<double>(traits);
 
-            THEN("The result must include both the DAT_COL_VALUE_R and DAT_COL_VALUE_W field")
+            THEN("The result must include both the schema::DatColValueR and schema::DatColValueW field")
             {
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_R));
-                REQUIRE_THAT(result, Contains(DAT_COL_VALUE_W));
+                REQUIRE_THAT(result, Contains(schema::DatColValueR));
+                REQUIRE_THAT(result, Contains(schema::DatColValueW));
                 REQUIRE_THAT(result, Contains("$4"));
                 REQUIRE_THAT(result, Contains("$5"));
             }
@@ -97,11 +213,11 @@ SCENARIO("Creating valid insert queries with storeDataEventErrorQuery()", "[quer
 
         WHEN("Requesting a table name for the traits")
         {
-            auto result = query_builder.tableName(traits);
+            auto result = QueryBuilder::tableName(traits);
 
-            THEN("The result must include the TYPE_SCALAR from the schema")
+            THEN("The result must include the schema::TypeScalar from the schema")
             {
-                REQUIRE_THAT(result, Contains(TYPE_SCALAR));
+                REQUIRE_THAT(result, Contains(schema::TypeScalar));
             }
         }
     }
@@ -127,22 +243,22 @@ TEST_CASE("Creating valid database table names for types", "[query-string]")
     vector<Tango::AttrWriteType> write_types {Tango::READ, Tango::WRITE, Tango::READ_WRITE, Tango::READ_WITH_WRITE};
     vector<Tango::AttrDataFormat> format_types {Tango::SCALAR, Tango::SPECTRUM, Tango::IMAGE};
 
-    vector<string> types_str {TYPE_DEV_DOUBLE,
-        TYPE_DEV_FLOAT,
-        TYPE_DEV_STRING,
-        TYPE_DEV_LONG,
-        TYPE_DEV_ULONG,
-        TYPE_DEV_LONG64,
-        TYPE_DEV_ULONG64,
-        TYPE_DEV_SHORT,
-        TYPE_DEV_USHORT,
-        TYPE_DEV_BOOLEAN,
-        TYPE_DEV_UCHAR,
-        TYPE_DEV_STATE,
-        TYPE_DEV_ENCODED,
-        TYPE_DEV_ENUM};
-
-    vector<string> format_types_str {TYPE_SCALAR, TYPE_ARRAY, TYPE_IMAGE};
+    vector<string> types_str {schema::TypeDevDouble,
+        schema::TypeDevFloat,
+        schema::TypeDevString,
+        schema::TypeDevLong,
+        schema::TypeDevUlong,
+        schema::TypeDevLong64,
+        schema::TypeDevUlong64,
+        schema::TypeDevShort,
+        schema::TypeDevUshort,
+        schema::TypeDevBoolean,
+        schema::TypeDevUchar,
+        schema::TypeDevState,
+        schema::TypeDevEncoded,
+        schema::TypeDevEnum};
+
+    vector<string> format_types_str {schema::TypeScalar, schema::TypeArray, schema::TypeImage};
 
     // loop for every combination of type in Tango
     for (unsigned int t = 0; t < types.size(); ++t)
@@ -151,16 +267,15 @@ TEST_CASE("Creating valid database table names for types", "[query-string]")
         {
             for (auto &write_type : write_types)
             {
-                QueryBuilder query_builder;
                 AttributeTraits traits {write_type, format_types[f], types[t]};
 
                 DYNAMIC_SECTION("Testing table name for traits: " << traits)
                 {
-                    auto result = query_builder.tableName(traits);
+                    auto result = QueryBuilder::tableName(traits);
                     REQUIRE_THAT(result, Contains(types_str[t]));
                     REQUIRE_THAT(result, Contains(format_types_str[f]));
                 }
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/test/TestHelpers.hpp b/test/TestHelpers.hpp
index 2a3b0872658b1d2ef64995e5f914fede5124ed5c..3000fceb95a3d98fb6003cea9dd4027d02a19cce 100644
--- a/test/TestHelpers.hpp
+++ b/test/TestHelpers.hpp
@@ -31,18 +31,15 @@
 
 namespace hdbpp_test
 {
-namespace psql_conn_test
+namespace psql_connection
 {
     namespace postgres_db
     {
         // connection strings
-        const std::string ConnectionString =
-            "user=hdb_admin_tester password=hdbpp host=hdb-services port=5000 dbname=hdb_test";
-
-        const std::string HdbppConnectionString =
-            "user=hdb_admin_tester password=hdbpp host=hdb-services port=5000 dbname=hdb_test";
+        const std::string ConnectionString = "user=postgres host=localhost port=5432 dbname=hdb password=password";
+        const std::string HdbppConnectionString = "user=postgres host=localhost port=5432 dbname=hdb password=password";
     } // namespace postgres_db
-} // namespace psql_conn_test
+} // namespace psql_connection
 
 namespace attr_name
 {
@@ -73,7 +70,7 @@ namespace attr_info
 {
     const std::string AttrInfoDescription =
         "Description about attribute, its \"quoted\",  and 'quoted', yet does it work?";
-        
+
     const std::string AttrInfoLabel = "Label";
     const std::string AttrInfoUnit = "Unit %";
     const std::string AttrInfoStandardUnit = "Standard Unit";
@@ -234,7 +231,7 @@ namespace data_gen
     }
 
     template<Tango::CmdArgType Type>
-    typename TangoTypeTraits<Type>::array generateSpectrumData(bool empty_data = false, int size = 1024)
+    typename TangoTypeTraits<Type>::array generateSpectrumData(bool empty_data = false, int size = 10)
     {
         return std::move(data<Type>(empty_data ? 0 : size));
     }
@@ -250,4 +247,4 @@ namespace data_gen
     }
 } // namespace data_gen
 } // namespace hdbpp_test
-#endif // _TEST_HELPERS_HPP
\ No newline at end of file
+#endif // _TEST_HELPERS_HPP
diff --git a/test/main.cpp b/test/main.cpp
index 2be52fded2c5230c8a1c9ad15202fc2fd2689e6a..0b69841a622379923a6cc5ecab354e55cf969b58 100644
--- a/test/main.cpp
+++ b/test/main.cpp
@@ -18,13 +18,15 @@
    along with libhdb++timescale.  If not, see <http://www.gnu.org/licenses/>. */
 
 #define CATCH_CONFIG_RUNNER
+#undef CATCH_CONFIG_FAST_COMPILE
 
 #include "LibUtils.hpp"
 #include "catch2/catch.hpp"
 
 int main(int argc, char *argv[])
 {
-    hdbpp_internal::LogConfigurator::initLogging(false, true, "/tmp/hdb/test.log");
+    hdbpp_internal::LogConfigurator::initLogging();
+    //hdbpp_internal::LogConfigurator::initConsoleLogging();
     hdbpp_internal::LogConfigurator::setLoggingLevel(spdlog::level::err);
 
     int result = Catch::Session().run(argc, argv);
diff --git a/thirdparty/spdlog b/thirdparty/spdlog
index a7148b718ea2fabb8387cb90aee9bf448da63e65..1549ff12f1aa61ffc4d9a8727c519034724392a0 160000
--- a/thirdparty/spdlog
+++ b/thirdparty/spdlog
@@ -1 +1 @@
-Subproject commit a7148b718ea2fabb8387cb90aee9bf448da63e65
+Subproject commit 1549ff12f1aa61ffc4d9a8727c519034724392a0