diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4b4dd9211ae01862e9e5afa62f625e409c9ab340..8437f0727d43d009280282235e589254b42d74c0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -97,7 +97,7 @@ docker_build_image_all: - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh unb2-sim latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-apsct latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-apspu latest - - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-beam latest + - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-tilebeam latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-beamlet latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-boot latest - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-docker latest @@ -252,17 +252,17 @@ docker_build_image_device_apspu: script: # Do not remove 'bash' or statement will be ignored by primitive docker shell - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-apspu $tag -docker_build_image_device_beam: +docker_build_image_device_tilebeam: extends: .base_docker_images_except only: refs: - merge_requests changes: - - docker-compose/device-beam.yml + - docker-compose/device-tilebeam.yml - docker-compose/lofar-device-base/* script: # Do not remove 'bash' or statement will be ignored by primitive docker shell - - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-beam $tag + - bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-tilebeam $tag docker_build_image_device_beamlet: extends: .base_docker_images_except only: diff --git a/CDB/LOFAR_ConfigDb.json b/CDB/LOFAR_ConfigDb.json index 621c845891df2b53856d3b997c1b9b462fb1f41e..a2ed6f1742c94908c4080e8e47b9e8bfad078461 100644 --- a/CDB/LOFAR_ConfigDb.json +++ b/CDB/LOFAR_ConfigDb.json @@ -14,10 +14,10 @@ } } }, - "Beam": { + "TileBeam": { "STAT": { - "Beam": { - "STAT/Beam/1": {} + "TileBeam": { + "STAT/TileBeam/1": {} } } }, diff --git a/docker-compose/archiver-timescale.yml b/docker-compose/archiver-timescale.yml index 385f1ce4a26138afbe46f3d75ee0e95e64640b4b..a154b96d3338a039237b5d6f1933933c0969ecb3 100644 --- a/docker-compose/archiver-timescale.yml +++ b/docker-compose/archiver-timescale.yml @@ -46,7 +46,7 @@ services: /bin/bash -c " wait-for-it.sh archiver-timescale:5432 --timeout=30 --strict -- wait-for-it.sh ${TANGO_HOST} --timeout=30 --strict -- - hdbppcm-srv 02" + hdbppcm-srv 01" logging: driver: syslog options: @@ -74,7 +74,7 @@ services: /bin/bash -c " wait-for-it.sh archiver-timescale:5432 --timeout=30 --strict -- wait-for-it.sh ${TANGO_HOST} --timeout=30 --strict -- - hdbppes-srv 02" + hdbppes-srv 01" logging: driver: syslog options: diff --git a/docker-compose/device-beam.yml b/docker-compose/device-tilebeam.yml similarity index 82% rename from docker-compose/device-beam.yml rename to docker-compose/device-tilebeam.yml index 6a276157f5593fcfe5bd90f1b231ccf0560b37e4..1d243a6e18dc34f43309ec95f0d65e2c34cf2ee2 100644 --- a/docker-compose/device-beam.yml +++ b/docker-compose/device-tilebeam.yml @@ -8,8 +8,8 @@ volumes: iers-data: {} services: - device-beam: - image: device-beam + device-tilebeam: + image: device-tilebeam # build explicitly, as docker-compose does not understand a local image # being shared among services. build: @@ -17,7 +17,7 @@ services: dockerfile: docker-compose/lofar-device-base/Dockerfile args: SOURCE_IMAGE: ${DOCKER_REGISTRY_HOST}/${DOCKER_REGISTRY_USER}-tango-itango:${TANGO_ITANGO_VERSION} - container_name: ${CONTAINER_NAME_PREFIX}device-beam + container_name: ${CONTAINER_NAME_PREFIX}device-tilebeam logging: driver: "json-file" options: @@ -39,5 +39,5 @@ services: - bin/start-ds.sh # configure CORBA to _listen_ on 0:port, but tell others we're _reachable_ through ${HOSTNAME}:port, since CORBA # can't know about our Docker port forwarding - - l2ss-beam Beam STAT -v -ORBendPoint giop:tcp:0:5711 -ORBendPointPublish giop:tcp:${HOSTNAME}:5711 + - l2ss-tilebeam TileBeam STAT -v -ORBendPoint giop:tcp:0:5711 -ORBendPointPublish giop:tcp:${HOSTNAME}:5711 restart: unless-stopped diff --git a/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py b/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py index 39497983356d061978acea77086db0d7b22cb88c..82afee40e570b375aec7339ddfa81013d9b39f61 100644 --- a/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py +++ b/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py @@ -7,9 +7,9 @@ sst = DeviceProxy("STAT/SST/1") xst = DeviceProxy("STAT/XST/1") unb2 = DeviceProxy("STAT/UNB2/1") boot = DeviceProxy("STAT/Boot/1") -beam = DeviceProxy("STAT/Beam/1") +tilebeam = DeviceProxy("STAT/TileBeam/1") beamlet = DeviceProxy("STAT/Beamlet/1") docker = DeviceProxy("STAT/Docker/1") # Put them in a list in case one wants to iterate -devices = [apsct, apspu, recv, sdp, sst, xst, unb2, boot, beam, beamlet, docker] +devices = [apsct, apspu, recv, sdp, sst, xst, unb2, boot, tilebeam, beamlet, docker] diff --git a/docker-compose/timescaledb/Dockerfile b/docker-compose/timescaledb/Dockerfile index f97faca9aaa8ab6f14299f64e9dbaa4fbe9ef599..88e81b1ba2a3ffbb98f1016cfdcdfc255daec575 100644 --- a/docker-compose/timescaledb/Dockerfile +++ b/docker-compose/timescaledb/Dockerfile @@ -1,10 +1,25 @@ FROM timescale/timescaledb:latest-pg12 - +# Set admin role to perform DB creation COPY resources/01_admin.sh docker-entrypoint-initdb.d/002_admin.sh +# Create DB schema (tables, indexes, etc.) COPY resources/02_hdb_schema.sql docker-entrypoint-initdb.d/003_hdb_schema.sql +# Create DB roles COPY resources/03_hdb_roles.sql docker-entrypoint-initdb.d/004_hdb_roles.sql -COPY resources/04_hdb_ext_aggregates.sql docker-entrypoint-initdb.d/005_hdb_ext_aggregates.sql -COPY resources/05_lofar_func.sh docker-entrypoint-initdb.d/006_lofar_func.sh -COPY resources/06_lofar_views.sql docker-entrypoint-initdb.d/007_lofar_views.sql -COPY resources/07_cleanup.sql docker-entrypoint-initdb.d/008_cleanup.sql +# Create further roles +COPY resources/04_hdb_ext_users.sql docker-entrypoint-initdb.d/005_hdb_ext_users.sql +# Add further functions +COPY resources/05_hdb_ext_import.sql docker-entrypoint-initdb.d/006_hdb_ext_import.sql +# Create timescaledb aggregates +COPY resources/06_hdb_ext_aggregates.sql docker-entrypoint-initdb.d/007_hdb_ext_aggregates.sql +COPY resources/07_hdb_ext_arrays_aggregates_helper.sql docker-entrypoint-initdb.d/008_hdb_ext_arrays_aggregates_helper.sql +COPY resources/08_hdb_ext_arrays_aggregates.sql docker-entrypoint-initdb.d/009_hdb_ext_arrays_aggregates.sql +# Add compress policy +COPY resources/09_hdb_ext_compress_policy.sql docker-entrypoint-initdb.d/010_hdb_ext_compress_policy.sql +# Add reorder policy +COPY resources/10_hdb_ext_reorder_policy.sql docker-entrypoint-initdb.d/011_hdb_ext_reorder_policy.sql +# Add LOFAR functions and views +COPY resources/11_lofar_func.sh docker-entrypoint-initdb.d/012_lofar_func.sh +COPY resources/12_lofar_views.sql docker-entrypoint-initdb.d/013_lofar_views.sql +# Cleanup admin role +COPY resources/13_cleanup.sql docker-entrypoint-initdb.d/014_cleanup.sql diff --git a/docker-compose/timescaledb/resources/04_hdb_ext_users.sql b/docker-compose/timescaledb/resources/04_hdb_ext_users.sql new file mode 100644 index 0000000000000000000000000000000000000000..1ce744e8675d4833f983cdf040107c4e1dd7c346 --- /dev/null +++ b/docker-compose/timescaledb/resources/04_hdb_ext_users.sql @@ -0,0 +1,32 @@ +-- ----------------------------------------------------------------------------- +-- This file is part of the hdbpp-timescale-project +-- +-- Copyright (C) : 2014-2019 +-- European Synchrotron Radiation Facility +-- BP 220, Grenoble 38043, FRANCE +-- +-- libhdb++timescale is free software: you can redistribute it and/or modify +-- it under the terms of the Lesser GNU General Public License as published by +-- the Free Software Foundation, either version 3 of the License, or +-- (at your option) any later version. +-- +-- libhdb++timescale is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser +-- GNU General Public License for more details. +-- +-- You should have received a copy of the Lesser GNU General Public License +-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. +-- ----------------------------------------------------------------------------- + +\c hdb + +-- Some useful users for a basic system +CREATE ROLE hdb_cfg_man WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_cfg_man; + +CREATE ROLE hdb_event_sub WITH LOGIN PASSWORD 'hdbpp'; +GRANT readwrite TO hdb_event_sub; + +CREATE ROLE hdb_data_reporter WITH LOGIN PASSWORD 'hdbpp'; +GRANT readonly TO hdb_data_reporter; diff --git a/docker-compose/timescaledb/resources/05_hdb_ext_import.sql b/docker-compose/timescaledb/resources/05_hdb_ext_import.sql new file mode 100644 index 0000000000000000000000000000000000000000..f08cc3610df1a96bb207eef24fca29e274c8e5d7 --- /dev/null +++ b/docker-compose/timescaledb/resources/05_hdb_ext_import.sql @@ -0,0 +1,19 @@ +\c hdb + +CREATE OR REPLACE FUNCTION expand_name() RETURNS TRIGGER AS $$ +DECLARE + len integer; +BEGIN + IF (NEW.cs_name <> '' AND NEW.domain <> '' AND NEW.family <> '' AND NEW.member <> '' AND NEW.name <> '') IS NOT TRUE THEN + len = (SELECT cardinality((SELECT regexp_split_to_array(NEW.att_name, E'/')))); + NEW.name := (SELECT split_part(NEW.att_name, '/', len)); + NEW.member := (SELECT split_part(NEW.att_name, '/', len - 1)); + NEW.family := (SELECT split_part(NEW.att_name, '/', len - 2)); + NEW.domain := (SELECT split_part(NEW.att_name, '/', len - 3)); + NEW.cs_name := (SELECT split_part(NEW.att_name, '/', len - 4)); + END IF; + RETURN NEW; +END +$$ LANGUAGE plpgsql; + +CREATE TRIGGER expand_name_trigger BEFORE INSERT ON att_conf FOR EACH ROW EXECUTE PROCEDURE expand_name(); diff --git a/docker-compose/timescaledb/resources/04_hdb_ext_aggregates.sql b/docker-compose/timescaledb/resources/06_hdb_ext_aggregates.sql similarity index 99% rename from docker-compose/timescaledb/resources/04_hdb_ext_aggregates.sql rename to docker-compose/timescaledb/resources/06_hdb_ext_aggregates.sql index 7c3484299f5ab982be7021bd5d48491f08ec67e2..a253ccd04f3ef74728cdacc4884c7fb31806ab17 100644 --- a/docker-compose/timescaledb/resources/04_hdb_ext_aggregates.sql +++ b/docker-compose/timescaledb/resources/06_hdb_ext_aggregates.sql @@ -1290,3 +1290,4 @@ GRANT SELECT ON cagg_scalar_devushort_1day TO readonly; -- DROP VIEW cagg_scalar_devushort_1hour CASCADE; -- DROP VIEW cagg_scalar_devushort_8hour CASCADE; -- DROP VIEW cagg_scalar_devushort_1day CASCADE; + diff --git a/docker-compose/timescaledb/resources/07_hdb_ext_arrays_aggregates_helper.sql b/docker-compose/timescaledb/resources/07_hdb_ext_arrays_aggregates_helper.sql new file mode 100644 index 0000000000000000000000000000000000000000..7c67f65366984268f337133c938b9bda909e95fe --- /dev/null +++ b/docker-compose/timescaledb/resources/07_hdb_ext_arrays_aggregates_helper.sql @@ -0,0 +1,2336 @@ +-- ----------------------------------------------------------------------------- +-- This file is part of the hdbpp-timescale-project +-- +-- Copyright (C) : 2014-2019 +-- European Synchrotron Radiation Facility +-- BP 220, Grenoble 38043, FRANCE +-- +-- libhdb++timescale is free software: you can redistribute it and/or modify +-- it under the terms of the Lesser GNU General Public License as published by +-- the Free Software Foundation, either version 3 of the License, or +-- (at your option) any later version. +-- +-- libhdb++timescale is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser +-- GNU General Public License for more details. +-- +-- You should have received a copy of the Lesser GNU General Public License +-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. +-- ----------------------------------------------------------------------------- + +-- Aggregates function helper for the continuous aggregates views for the array attributes. + +\c hdb + +/* +NOT USED +Keep as a different approach, should be benchmarked for execution speed + +-- Special type to be used as input by compute_element_agg +-- It contains past aggregates result and the new values +create type double_agg_input as ( + value_r double precision, + value_w double precision, + count_r integer, + count_nan_r integer, + avg_r decimal, + min_r double precision, + max_r double precision, + stddev_r decimal, + count_w integer, + count_nan_w integer, + avg_w decimal, + min_w double precision, + max_w double precision, + stddev_w decimal +); + +-- Function to compute the aggregates from the new values and old aggregates +-- result. +-- It computes the result for an array of input and return a table so that it +-- can be used in a FROM clause +CREATE OR REPLACE FUNCTION compute_element_agg(inp_arr double_agg_input[] + ) RETURNS SETOF RECORD as $$ + +DECLARE + ret RECORD; + inp double_agg_input; + value_r double precision; + value_w double precision; + count_r integer; + count_nan_r integer; + avg_r decimal; + min_r double precision; + max_r double precision; + stddev_r decimal; + count_w integer; + count_nan_w integer; + avg_w decimal; + min_w double precision; + max_w double precision; + stddev_w decimal; + n_count_r integer; + n_count_nan_r integer; + n_avg_r decimal; + n_min_r double precision; + n_max_r double precision; + n_stddev_r decimal; + n_count_w integer; + n_count_nan_w integer; + n_avg_w decimal; + n_min_w double precision; + n_max_w double precision; + n_stddev_w decimal; + +BEGIN + FOREACH inp IN ARRAY inp_arr + LOOP + + value_r := inp.value_r; + value_w := inp.value_w; + count_r := inp.count_r; + count_nan_r := inp.count_nan_r; + avg_r := inp.avg_r; + min_r := inp.min_r; + max_r := inp.max_r; + stddev_r := inp.stddev_r; + count_w := inp.count_w; + count_nan_w := inp.count_nan_w; + avg_w := inp.avg_w; + min_w := inp.min_w; + stddev_w := inp.stddev_w; + + IF value_r IS NULL OR value_r='NaN'::float8 OR value_r='Infinity' OR value_r='-Infinity' + THEN + + IF count_r IS NULL + THEN + n_count_r = 0; + ELSE + n_count_r = count_r; + END IF; + + IF value_r IS NULL + THEN + + IF count_nan_r IS NULL + THEN + n_count_nan_r = 0; + ELSE + n_count_nan_r = count_nan_r; + END IF; + + ELSE + + IF count_nan_r IS NULL + THEN + n_count_nan_r = 1; + ELSE + n_count_nan_r = count_nan_r + 1; + END IF; + END IF; + + n_avg_r = avg_r; + n_min_r = min_r; + n_max_r = max_r; + n_stddev_r = stddev_r; + + ELSE + + IF count_nan_r IS NULL + THEN + n_count_nan_r = 0; + ELSE + n_count_nan_r = count_nan_r; + END IF; + + IF count_r IS NULL + THEN + n_count_r = 1; + ELSE + n_count_r = count_r + 1; + END IF; + + IF avg_r IS NULL + THEN + n_avg_r = value_r; + ELSE + n_avg_r = avg_r + (value_r-avg_r)/(count_r+1.)::decimal; + END IF; + + n_min_r = LEAST(value_r, min_r); + n_max_r = GREATEST(value_r, max_r); + + IF stddev_r IS NULL + THEN + n_stddev_r = 0; + ELSE + n_stddev_r = stddev_r + ((count_r + 0.)/(count_r+1.))*power(value_r - avg_r, 2); + END IF; + END IF; + + IF value_w IS NULL OR value_w='NaN'::float8 OR value_w='Infinity' OR value_w='-Infinity' + THEN + + IF count_w IS NULL + THEN + n_count_w = 0; + ELSE + n_count_w = count_w; + END IF; + + IF value_w IS NULL + THEN + + IF count_nan_w IS NULL + THEN + n_count_nan_w = 0; + ELSE + n_count_nan_w = count_nan_w; + END IF; + + ELSE + + IF count_nan_w IS NULL + THEN + n_count_nan_w = 1; + ELSE + n_count_nan_w = count_nan_w + 1; + END IF; + END IF; + + n_avg_w = avg_w; + n_min_w = min_w; + n_max_w = max_w; + n_stddev_w = stddev_w; + + ELSE + + IF count_nan_w IS NULL + THEN + n_count_nan_w = 0; + ELSE + n_count_nan_w = count_nan_w; + END IF; + + IF count_w IS NULL + THEN + n_count_w = 1; + ELSE + n_count_w = count_w + 1; + END IF; + + IF avg_w IS NULL + THEN + n_avg_w = value_w; + ELSE + n_avg_w = avg_w + (value_w-avg_w)/(count_w+1); + END IF; + + n_min_w = LEAST(value_w, min_w); + n_max_w = GREATEST(value_w, max_w); + + IF stddev_w IS NULL + THEN + n_stddev_w = 0; + ELSE + n_stddev_w = stddev_w + ((count_w + 0.)/(count_w+1.)*power(value_w - avg_w, 2)); + END IF; + END IF; + + ret := (n_count_r, n_count_nan_r, n_avg_r, n_min_r, n_max_r, n_stddev_r + , n_count_w, n_count_nan_w, n_avg_w, n_min_w, n_max_w, n_stddev_w); + + return next ret; + END LOOP; +END; +$$ +LANGUAGE 'plpgsql'; +*/ + + +-- Special types to store the aggregations data during computation +create type double_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + count_nan_r integer[], + avg_r decimal[], + min_r double precision[], + max_r double precision[], + stddev_r decimal[], + count_w integer[], + count_nan_w integer[], + avg_w decimal[], + min_w double precision[], + max_w double precision[], + stddev_w decimal[] +); + +create type float_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + count_nan_r integer[], + avg_r decimal[], + min_r real[], + max_r real[], + stddev_r decimal[], + count_w integer[], + count_nan_w integer[], + avg_w decimal[], + min_w real[], + max_w real[], + stddev_w decimal[] +); + +create type long_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r integer[], + max_r integer[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w integer[], + max_w integer[], + stddev_w decimal[] +); + +create type long64_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r bigint[], + max_r bigint[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w bigint[], + max_w bigint[], + stddev_w decimal[] +); + +create type short_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r smallint[], + max_r smallint[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w smallint[], + max_w smallint[], + stddev_w decimal[] +); + +create type ulong_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r ulong[], + max_r ulong[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w ulong[], + max_w ulong[], + stddev_w decimal[] +); + +create type ulong64_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r ulong64[], + max_r ulong64[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w ulong64[], + max_w ulong64[], + stddev_w decimal[] +); + +create type ushort_array_agg_state as ( + count integer, + count_errors integer, + count_r integer[], + avg_r decimal[], + min_r ushort[], + max_r ushort[], + stddev_r decimal[], + count_w integer[], + avg_w decimal[], + min_w ushort[], + max_w ushort[], + stddev_w decimal[] +); + +-- Function to combine to aggregate state into a new one +-- needed for the aggregate function to be used for partial aggregation +CREATE OR REPLACE FUNCTION fn_double_combine(double_array_agg_state, double_array_agg_state) + RETURNS double_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result double_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::float8[], ARRAY[]::float8[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::float8[], ARRAY[]::float8[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.count_nan_r) AS nan_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.count_nan_w) AS nan_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.count_nan_r) AS nan_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.count_nan_w) AS nan_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), array_agg(count_nan_r1+count_nan_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), array_agg(count_nan_w1+count_nan_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_float_combine(float_array_agg_state, float_array_agg_state) + RETURNS float_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result float_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::real[], ARRAY[]::real[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::real[], ARRAY[]::real[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.count_nan_r) AS nan_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.count_nan_w) AS nan_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.count_nan_r) AS nan_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.count_nan_w) AS nan_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), array_agg(count_nan_r1+count_nan_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), array_agg(count_nan_w1+count_nan_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long_combine(long_array_agg_state, long_array_agg_state) + RETURNS long_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result long_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.count_nan_r) AS nan_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.count_nan_w) AS nan_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.count_nan_r) AS nan_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.count_nan_w) AS nan_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long64_combine(long64_array_agg_state, long64_array_agg_state) + RETURNS long64_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result long64_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::bigint[], ARRAY[]::bigint[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::bigint[], ARRAY[]::bigint[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_short_combine(short_array_agg_state, short_array_agg_state) + RETURNS short_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result short_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::smallint[], ARRAY[]::smallint[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::smallint[], ARRAY[]::smallint[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong_combine(ulong_array_agg_state, ulong_array_agg_state) + RETURNS ulong_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result ulong_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong[], ARRAY[]::ulong[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong[], ARRAY[]::ulong[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong64_combine(ulong64_array_agg_state, ulong64_array_agg_state) + RETURNS ulong64_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result ulong64_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong64[], ARRAY[]::ulong64[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong64[], ARRAY[]::ulong64[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ushort_combine(ushort_array_agg_state, ushort_array_agg_state) + RETURNS ushort_array_agg_state AS $$ + +DECLARE + state1 ALIAS FOR $1; + state2 ALIAS FOR $2; + count integer; + count_errors integer; + result ushort_array_agg_state%ROWTYPE; + +BEGIN + + -- Limit cases. + IF state1 is NULL + THEN + return state2; + END IF; + + IF state2 is NULL + THEN + return state1; + END IF; + + -- if there is a discrepancy in the arrays sizes + IF CARDINALITY(state1.avg_r) != CARDINALITY(state2.avg_r) OR CARDINALITY(state1.avg_w) != CARDINALITY(state2.avg_w) THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ushort[], ARRAY[]::ushort[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ushort[], ARRAY[]::ushort[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state1.count + state2.count; + count_errors := state1.count_errors + state2.count_errors; + + WITH arrays AS( + SELECT + UNNEST(state1.count_r) AS count_r1, UNNEST(state1.avg_r) AS avg_r1, + UNNEST(state1.min_r) AS min_r1, UNNEST(state1.max_r) AS max_r1, UNNEST(state1.stddev_r) AS stddev_r1, + UNNEST(state1.count_w) AS count_w1, UNNEST(state1.avg_w) AS avg_w1, + UNNEST(state1.min_w) AS min_w1, UNNEST(state1.max_w) AS max_w1, UNNEST(state1.stddev_w) AS stddev_w1, + UNNEST(state2.count_r) AS count_r2, UNNEST(state2.avg_r) AS avg_r2, + UNNEST(state2.min_r) AS min_r2, UNNEST(state2.max_r) AS max_r2, UNNEST(state2.stddev_r) AS stddev_r2, + UNNEST(state2.count_w) AS count_w2, UNNEST(state2.avg_w) AS avg_w2, + UNNEST(state2.min_w) AS min_w2, UNNEST(state2.max_w) AS max_w2, UNNEST(state2.stddev_w) AS stddev_w2 + ) + SELECT count, count_errors, + array_agg(count_r1+count_r2), + array_agg(avg_r1 + (count_r2/(count_r1+count_r2))*(avg_r2-avg_r1)), array_agg(LEAST(min_r1, min_r2)), array_agg(GREATEST(max_r1, max_r2)), + array_agg(stddev_r1 + (count_r2*count_r1/count_r1+count_r2)*power(avg_r2 - avg_r1, 2)), + array_agg(count_w1+count_w2), + array_agg(avg_w1 + (count_w2/(count_w1+count_w2))*(avg_w2-avg_w1)), array_agg(LEAST(min_w1, min_w2)), array_agg(GREATEST(max_w1, max_w2)), + array_agg(stddev_w1 + (count_w2*count_w1/count_w1+count_w2)*power(avg_w2 - avg_w1, 2)) + INTO result FROM arrays; + END IF; + + return result; +END; +$$ +LANGUAGE 'plpgsql'; + +-- Function to compute next aggregate from last state and current row +CREATE OR REPLACE FUNCTION fn_double_array_agg(double_array_agg_state,new_row att_array_devdouble) + RETURNS double_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result double_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read='NaN'::float8 THEN 0 + WHEN read='Infinity'::float8 THEN 0 + WHEN read='-Infinity'::float8 THEN 0 + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN read='NaN'::float8 THEN 1 + WHEN read='Infinity'::float8 THEN 1 + WHEN read='-Infinity'::float8 THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN read='NaN'::float8 THEN NULL + WHEN read='Infinity'::float8 THEN NULL + WHEN read='-Infinity'::float8 THEN NULL + ELSE read::decimal + END + ), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read='NaN'::float8 THEN NULL + WHEN read='Infinity'::float8 THEN NULL + WHEN read='-Infinity'::float8 THEN NULL + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write='NaN'::float8 THEN 0 + WHEN write='Infinity'::float8 THEN 0 + WHEN write='-Infinity'::float8 THEN 0 + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN write='NaN'::float8 THEN 1 + WHEN write='Infinity'::float8 THEN 1 + WHEN write='-Infinity'::float8 THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN write='NaN'::float8 THEN NULL + WHEN write='Infinity'::float8 THEN NULL + WHEN write='-Infinity'::float8 THEN NULL + ELSE write::decimal + END + ), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write='NaN'::float8 THEN NULL + WHEN write='Infinity'::float8 THEN NULL + WHEN write='-Infinity'::float8 THEN NULL + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::float8[], ARRAY[]::float8[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::float8[], ARRAY[]::float8[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.count_nan_r) AS nan_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.count_nan_w) AS nan_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read='NaN'::float8 THEN count_r + WHEN read='Infinity'::float8 THEN count_r + WHEN read='-Infinity'::float8 THEN count_r + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN nan_r + 1 + WHEN read='Infinity'::float8 THEN nan_r + 1 + WHEN read='-Infinity'::float8 THEN nan_r + 1 + ELSE nan_r + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN avg_r + WHEN read='Infinity'::float8 THEN avg_r + WHEN read='-Infinity'::float8 THEN avg_r + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read::decimal + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal::decimal + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN min_r + WHEN read='Infinity'::float8 THEN min_r + WHEN read='-Infinity'::float8 THEN min_r + ELSE LEAST(read, min_r) + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN max_r + WHEN read='Infinity'::float8 THEN max_r + WHEN read='-Infinity'::float8 THEN max_r + ELSE GREATEST(read, max_r) + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN stddev_r + WHEN read='Infinity'::float8 THEN stddev_r + WHEN read='-Infinity'::float8 THEN stddev_r + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN count_w + WHEN write='Infinity'::float8 THEN count_w + WHEN write='-Infinity'::float8 THEN count_w + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN nan_w + 1 + WHEN write='Infinity'::float8 THEN nan_w + 1 + WHEN write='-Infinity'::float8 THEN nan_w + 1 + ELSE nan_w + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN avg_w + WHEN write='Infinity'::float8 THEN avg_w + WHEN write='-Infinity'::float8 THEN avg_w + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN min_w + WHEN write='Infinity'::float8 THEN min_w + WHEN write='-Infinity'::float8 THEN min_w + ELSE LEAST(write, min_w) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN max_w + WHEN write='Infinity'::float8 THEN max_w + WHEN write='-Infinity'::float8 THEN max_w + ELSE GREATEST(write, max_w) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN stddev_w + WHEN write='Infinity'::float8 THEN stddev_w + WHEN write='-Infinity'::float8 THEN stddev_w + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; +/* +* Different method using compute_element_agg + + SELECT n_count_r, n_count_nan_r, n_avg_r, n_min_r, n_max_r, n_stddev_r + , n_count_w, n_count_nan_w, n_avg_w, n_min_w, n_max_w, n_stddev_w + FROM compute_element_agg( + ( SELECT array_agg(ROW(read, write + , count_r, nan_r, avg_r, min_r, max_r, stddev_r + , count_w, nan_w, avg_w, min_w, max_w, stddev_w)::double_agg_input) from arrays ) + ) as (n_count_r integer, n_count_nan_r integer, n_avg_r decimal, n_min_r double precision, n_max_r double precision, n_stddev_r decimal + , n_count_w integer, n_count_nan_w integer, n_avg_w decimal, n_min_w double precision, n_max_w double precision, n_stddev_w decimal) + ) + SELECT count, state.count_errors+count_err + , array_agg(aggregates.n_count_r), array_agg(aggregates.n_count_nan_r), array_agg(aggregates.n_avg_r), array_agg(aggregates.n_min_r), array_agg(aggregates.n_max_r), array_agg(aggregates.n_stddev_r) + , array_agg(aggregates.n_count_w), array_agg(aggregates.n_count_nan_w), array_agg(aggregates.n_avg_w), array_agg(aggregates.n_min_w), array_agg(aggregates.n_max_w), array_agg(aggregates.n_stddev_w) + into result from aggregates; +*/ + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_float_array_agg(float_array_agg_state,new_row att_array_devfloat) + RETURNS float_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result float_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read='NaN'::float8 THEN 0 + WHEN read='Infinity'::float8 THEN 0 + WHEN read='-Infinity'::float8 THEN 0 + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN read='NaN'::float8 THEN 1 + WHEN read='Infinity'::float8 THEN 1 + WHEN read='-Infinity'::float8 THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN read='NaN'::float8 THEN NULL + WHEN read='Infinity'::float8 THEN NULL + WHEN read='-Infinity'::float8 THEN NULL + ELSE read::decimal + END + ), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read='NaN'::float8 THEN NULL + WHEN read='Infinity'::float8 THEN NULL + WHEN read='-Infinity'::float8 THEN NULL + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write='NaN'::float8 THEN 0 + WHEN write='Infinity'::float8 THEN 0 + WHEN write='-Infinity'::float8 THEN 0 + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN write='NaN'::float8 THEN 1 + WHEN write='Infinity'::float8 THEN 1 + WHEN write='-Infinity'::float8 THEN 1 + ELSE 0 + END + ), array_agg( + CASE + WHEN write='NaN'::float8 THEN NULL + WHEN write='Infinity'::float8 THEN NULL + WHEN write='-Infinity'::float8 THEN NULL + ELSE write::decimal + END + ), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write='NaN'::float8 THEN NULL + WHEN write='Infinity'::float8 THEN NULL + WHEN write='-Infinity'::float8 THEN NULL + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::real[], ARRAY[]::real[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::real[], ARRAY[]::real[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.count_nan_r) AS nan_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.count_nan_w) AS nan_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read='NaN'::float8 THEN count_r + WHEN read='Infinity'::float8 THEN count_r + WHEN read='-Infinity'::float8 THEN count_r + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN nan_r + 1 + WHEN read='Infinity'::float8 THEN nan_r + 1 + WHEN read='-Infinity'::float8 THEN nan_r + 1 + ELSE nan_r + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN avg_r + WHEN read='Infinity'::float8 THEN avg_r + WHEN read='-Infinity'::float8 THEN avg_r + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN min_r + WHEN read='Infinity'::float8 THEN min_r + WHEN read='-Infinity'::float8 THEN min_r + ELSE LEAST(read, min_r) + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN max_r + WHEN read='Infinity'::float8 THEN max_r + WHEN read='-Infinity'::float8 THEN max_r + ELSE GREATEST(read, max_r) + END + ), array_agg(CASE + WHEN read='NaN'::float8 THEN stddev_r + WHEN read='Infinity'::float8 THEN stddev_r + WHEN read='-Infinity'::float8 THEN stddev_r + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN count_w + WHEN write='Infinity'::float8 THEN count_w + WHEN write='-Infinity'::float8 THEN count_w + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN nan_w + 1 + WHEN write='Infinity'::float8 THEN nan_w + 1 + WHEN write='-Infinity'::float8 THEN nan_w + 1 + ELSE nan_w + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN avg_w + WHEN write='Infinity'::float8 THEN avg_w + WHEN write='-Infinity'::float8 THEN avg_w + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN min_w + WHEN write='Infinity'::float8 THEN min_w + WHEN write='-Infinity'::float8 THEN min_w + ELSE LEAST(write, min_w) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN max_w + WHEN write='Infinity'::float8 THEN max_w + WHEN write='-Infinity'::float8 THEN max_w + ELSE GREATEST(write, max_w) + END + ), array_agg(CASE + WHEN write='NaN'::float8 THEN stddev_w + WHEN write='Infinity'::float8 THEN stddev_w + WHEN write='-Infinity'::float8 THEN stddev_w + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long_array_agg(long_array_agg_state,new_row att_array_devlong) + RETURNS long_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result long_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::integer[], ARRAY[]::integer[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long64_array_agg(long64_array_agg_state,new_row att_array_devlong64) + RETURNS long64_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result long64_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::bigint[], ARRAY[]::bigint[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::bigint[], ARRAY[]::bigint[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_short_array_agg(short_array_agg_state,new_row att_array_devshort) + RETURNS short_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result short_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::smallint[], ARRAY[]::smallint[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::smallint[], ARRAY[]::smallint[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong_array_agg(ulong_array_agg_state,new_row att_array_devulong) + RETURNS ulong_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result ulong_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong[], ARRAY[]::ulong[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong[], ARRAY[]::ulong[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong64_array_agg(ulong64_array_agg_state,new_row att_array_devulong64) + RETURNS ulong64_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result ulong64_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong64[], ARRAY[]::ulong64[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ulong64[], ARRAY[]::ulong64[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ushort_array_agg(ushort_array_agg_state,new_row att_array_devushort) + RETURNS ushort_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + count integer; + count_err integer; + result ushort_array_agg_state%ROWTYPE; + +BEGIN + + -- Increment error count if needed + IF new_row.att_error_desc_id > 0 THEN + count_err = 1; + ELSE + count_err = 0; + END IF; + + IF state is NULL + THEN + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write) + SELECT 1, count_err, + array_agg( + CASE + WHEN read IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(read::decimal), array_agg(read), array_agg(read), array_agg( + CASE + WHEN read IS NOT NULL THEN 0 + ELSE read + END + ), + array_agg( + CASE + WHEN write IS NOT NULL THEN 1 + ELSE 0 + END + ), array_agg(write::decimal), array_agg(write), array_agg(write), array_agg( + CASE + WHEN write IS NOT NULL THEN 0 + ELSE write + END + ) + INTO result FROM arrays; + ELSE + + IF CARDINALITY(state.avg_r) != CARDINALITY(new_row.value_r) or CARDINALITY(state.avg_w) != CARDINALITY(new_row.value_w) + THEN + SELECT 0, 0, + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ushort[], ARRAY[]::ushort[], ARRAY[]::decimal[], + ARRAY[]::integer[], ARRAY[]::decimal[], ARRAY[]::ushort[], ARRAY[]::ushort[], ARRAY[]::decimal[] + INTO result; + ELSE + + count := state.count + 1; + WITH arrays AS( + SELECT UNNEST(new_row.value_r) AS read, UNNEST(new_row.value_w) AS write, + UNNEST(state.count_r) AS count_r, UNNEST(state.avg_r) AS avg_r, + UNNEST(state.min_r) AS min_r, UNNEST(state.max_r) AS max_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.avg_w) AS avg_w, + UNNEST(state.min_w) AS min_w, UNNEST(state.max_w) AS max_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT count, state.count_errors+count_err + , array_agg(CASE + WHEN read IS NOT NULL THEN count_r+1 + ELSE count_r + END + ), array_agg(CASE + WHEN read IS NULL THEN avg_r + WHEN avg_r IS NULL THEN read + ELSE avg_r + (read::decimal-avg_r)/(count_r+1.)::decimal + END + ), array_agg(LEAST(read, min_r)), array_agg(GREATEST(read, max_r)) + , array_agg(CASE + WHEN read IS NULL THEN stddev_r + WHEN stddev_r IS NULL THEN 0 + ELSE stddev_r + ((count_r+0.)/(count_r+1.))::decimal*power(read::decimal - avg_r, 2) + END + ), array_agg(CASE + WHEN write IS NOT NULL THEN count_w+1 + ELSE count_w + END + ), array_agg(CASE + WHEN write IS NULL THEN avg_w + WHEN avg_w IS NULL THEN write + ELSE avg_w + (write::decimal-avg_w)/(count_w+1.)::decimal + END + ), array_agg(LEAST(write, min_w)), array_agg(GREATEST(write, max_w)) + , array_agg(CASE + WHEN write IS NULL THEN stddev_w + WHEN stddev_w IS NULL THEN 0 + ELSE stddev_w + ((count_w+0.)/(count_w+1.))::decimal*power(write::decimal - avg_w, 2) + END + ) + INTO result FROM arrays; + END IF; + END IF; + + return result; + +END; +$$ +LANGUAGE 'plpgsql'; + +-- Function to compute the real aggregate results from the internal state +-- in this case only the stddev has to be computed +CREATE OR REPLACE FUNCTION fn_double_array_final(double_array_agg_state) + RETURNS double_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result double_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.count_nan_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.count_nan_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END + ) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_float_array_final(float_array_agg_state) + RETURNS float_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result float_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.count_nan_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.count_nan_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long_array_final(long_array_agg_state) + RETURNS long_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result long_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_long64_array_final(long64_array_agg_state) + RETURNS long64_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result long64_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_short_array_final(short_array_agg_state) + RETURNS short_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result short_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong_array_final(ulong_array_agg_state) + RETURNS ulong_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result ulong_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ulong64_array_final(ulong64_array_agg_state) + RETURNS ulong64_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result ulong64_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +CREATE OR REPLACE FUNCTION fn_ushort_array_final(ushort_array_agg_state) + RETURNS ushort_array_agg_state AS $$ + +DECLARE + state ALIAS FOR $1; + result ushort_array_agg_state%ROWTYPE; + +BEGIN + + IF state IS NULL + THEN + return NULL; + END IF; + + IF state.count = 0 THEN + return NULL; + + ELSE + WITH arrays AS( + SELECT UNNEST(state.count_r) AS count_r, UNNEST(state.stddev_r) AS stddev_r, + UNNEST(state.count_w) AS count_w, UNNEST(state.stddev_w) AS stddev_w + ) + SELECT state.count, state.count_errors, + state.count_r, state.avg_r, + state.min_r, state.max_r, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_r/(count_r)) + END + ), + state.count_w, state.avg_w, + state.min_w, state.max_w, array_agg(CASE + WHEN count_r=0 THEN NULL + WHEN count_r=1 THEN 0 + ELSE sqrt(stddev_w/(count_w)) + END) + INTO result FROM arrays; + + return result; + + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +-- Aggregate function declaration +CREATE AGGREGATE double_array_aggregate(att_array_devdouble) +( + sfunc = fn_double_array_agg, + stype = double_array_agg_state, + combinefunc = fn_double_combine, + finalfunc = fn_double_array_final +); + +CREATE AGGREGATE float_array_aggregate(att_array_devfloat) +( + sfunc = fn_float_array_agg, + stype = float_array_agg_state, + combinefunc = fn_float_combine, + finalfunc = fn_float_array_final +); + +CREATE AGGREGATE long_array_aggregate(att_array_devlong) +( + sfunc = fn_long_array_agg, + stype = long_array_agg_state, + combinefunc = fn_long_combine, + finalfunc = fn_long_array_final +); + +CREATE AGGREGATE long64_array_aggregate(att_array_devlong64) +( + sfunc = fn_long64_array_agg, + stype = long64_array_agg_state, + combinefunc = fn_long64_combine, + finalfunc = fn_long64_array_final +); + +CREATE AGGREGATE short_array_aggregate(att_array_devshort) +( + sfunc = fn_short_array_agg, + stype = short_array_agg_state, + combinefunc = fn_short_combine, + finalfunc = fn_short_array_final +); + +CREATE AGGREGATE ulong_array_aggregate(att_array_devulong) +( + sfunc = fn_ulong_array_agg, + stype = ulong_array_agg_state, + combinefunc = fn_ulong_combine, + finalfunc = fn_ulong_array_final +); + +CREATE AGGREGATE ulong64_array_aggregate(att_array_devulong64) +( + sfunc = fn_ulong64_array_agg, + stype = ulong64_array_agg_state, + combinefunc = fn_ulong64_combine, + finalfunc = fn_ulong64_array_final +); + +CREATE AGGREGATE ushort_array_aggregate(att_array_devushort) +( + sfunc = fn_ushort_array_agg, + stype = ushort_array_agg_state, + combinefunc = fn_ushort_combine, + finalfunc = fn_ushort_array_final +); diff --git a/docker-compose/timescaledb/resources/08_hdb_ext_arrays_aggregates.sql b/docker-compose/timescaledb/resources/08_hdb_ext_arrays_aggregates.sql new file mode 100644 index 0000000000000000000000000000000000000000..028712989defdbd6f30bcdde61e98790a9940192 --- /dev/null +++ b/docker-compose/timescaledb/resources/08_hdb_ext_arrays_aggregates.sql @@ -0,0 +1,328 @@ +-- ----------------------------------------------------------------------------- +-- This file is part of the hdbpp-timescale-project +-- +-- Copyright (C) : 2014-2019 +-- European Synchrotron Radiation Facility +-- BP 220, Grenoble 38043, FRANCE +-- +-- libhdb++timescale is free software: you can redistribute it and/or modify +-- it under the terms of the Lesser GNU General Public License as published by +-- the Free Software Foundation, either version 3 of the License, or +-- (at your option) any later version. +-- +-- libhdb++timescale is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser +-- GNU General Public License for more details. +-- +-- You should have received a copy of the Lesser GNU General Public License +-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. +-- ----------------------------------------------------------------------------- + +-- Continuous aggregates views for the array attributes. +\c hdb +-- Double attributes +CREATE MATERIALIZED VIEW cagg_array_devdouble_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (double_array_aggregate(t)).count, (double_array_aggregate(t)).count_errors + , (double_array_aggregate(t)).count_r, (double_array_aggregate(t)).count_nan_r, (double_array_aggregate(t)).avg_r::float8[], (double_array_aggregate(t)).min_r, (double_array_aggregate(t)).max_r, (double_array_aggregate(t)).stddev_r::float8[] + , (double_array_aggregate(t)).count_w, (double_array_aggregate(t)).count_nan_w, (double_array_aggregate(t)).avg_w::float8[], (double_array_aggregate(t)).min_w, (double_array_aggregate(t)).max_w, (double_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devdouble as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devdouble_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (double_array_aggregate(t)).count, (double_array_aggregate(t)).count_errors + , (double_array_aggregate(t)).count_r, (double_array_aggregate(t)).count_nan_r, (double_array_aggregate(t)).avg_r::float8[], (double_array_aggregate(t)).min_r, (double_array_aggregate(t)).max_r, (double_array_aggregate(t)).stddev_r::float8[] + , (double_array_aggregate(t)).count_w, (double_array_aggregate(t)).count_nan_w, (double_array_aggregate(t)).avg_w::float8[], (double_array_aggregate(t)).min_w, (double_array_aggregate(t)).max_w, (double_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devdouble as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devdouble_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (double_array_aggregate(t)).count, (double_array_aggregate(t)).count_errors + , (double_array_aggregate(t)).count_r, (double_array_aggregate(t)).count_nan_r, (double_array_aggregate(t)).avg_r::float8[], (double_array_aggregate(t)).min_r, (double_array_aggregate(t)).max_r, (double_array_aggregate(t)).stddev_r::float8[] + , (double_array_aggregate(t)).count_w, (double_array_aggregate(t)).count_nan_w, (double_array_aggregate(t)).avg_w::float8[], (double_array_aggregate(t)).min_w, (double_array_aggregate(t)).max_w, (double_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devdouble as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Float attributes +CREATE MATERIALIZED VIEW cagg_array_devfloat_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (float_array_aggregate(t)).count, (float_array_aggregate(t)).count_errors + , (float_array_aggregate(t)).count_r, (float_array_aggregate(t)).count_nan_r, (float_array_aggregate(t)).avg_r::float8[], (float_array_aggregate(t)).min_r, (float_array_aggregate(t)).max_r, (float_array_aggregate(t)).stddev_r::float8[] + , (float_array_aggregate(t)).count_w, (float_array_aggregate(t)).count_nan_w, (float_array_aggregate(t)).avg_w::float8[], (float_array_aggregate(t)).min_w, (float_array_aggregate(t)).max_w, (float_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devfloat as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devfloat_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (float_array_aggregate(t)).count, (float_array_aggregate(t)).count_errors + , (float_array_aggregate(t)).count_r, (float_array_aggregate(t)).count_nan_r, (float_array_aggregate(t)).avg_r::float8[], (float_array_aggregate(t)).min_r, (float_array_aggregate(t)).max_r, (float_array_aggregate(t)).stddev_r::float8[] + , (float_array_aggregate(t)).count_w, (float_array_aggregate(t)).count_nan_w, (float_array_aggregate(t)).avg_w::float8[], (float_array_aggregate(t)).min_w, (float_array_aggregate(t)).max_w, (float_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devfloat as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devfloat_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, count_nan_r, mean_r, min_r, max_r, stddev_r + , count_w, count_nan_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (float_array_aggregate(t)).count, (float_array_aggregate(t)).count_errors + , (float_array_aggregate(t)).count_r, (float_array_aggregate(t)).count_nan_r, (float_array_aggregate(t)).avg_r::float8[], (float_array_aggregate(t)).min_r, (float_array_aggregate(t)).max_r, (float_array_aggregate(t)).stddev_r::float8[] + , (float_array_aggregate(t)).count_w, (float_array_aggregate(t)).count_nan_w, (float_array_aggregate(t)).avg_w::float8[], (float_array_aggregate(t)).min_w, (float_array_aggregate(t)).max_w, (float_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devfloat as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Long attributes +CREATE MATERIALIZED VIEW cagg_array_devlong_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (long_array_aggregate(t)).count, (long_array_aggregate(t)).count_errors + , (long_array_aggregate(t)).count_r, (long_array_aggregate(t)).avg_r::float8[], (long_array_aggregate(t)).min_r, (long_array_aggregate(t)).max_r, (long_array_aggregate(t)).stddev_r::float8[] + , (long_array_aggregate(t)).count_w, (long_array_aggregate(t)).avg_w::float8[], (long_array_aggregate(t)).min_w, (long_array_aggregate(t)).max_w, (long_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devlong_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (long_array_aggregate(t)).count, (long_array_aggregate(t)).count_errors + , (long_array_aggregate(t)).count_r, (long_array_aggregate(t)).avg_r::float8[], (long_array_aggregate(t)).min_r, (long_array_aggregate(t)).max_r, (long_array_aggregate(t)).stddev_r::float8[] + , (long_array_aggregate(t)).count_w, (long_array_aggregate(t)).avg_w::float8[], (long_array_aggregate(t)).min_w, (long_array_aggregate(t)).max_w, (long_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devlong_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (long_array_aggregate(t)).count, (long_array_aggregate(t)).count_errors + , (long_array_aggregate(t)).count_r, (long_array_aggregate(t)).avg_r::float8[], (long_array_aggregate(t)).min_r, (long_array_aggregate(t)).max_r, (long_array_aggregate(t)).stddev_r::float8[] + , (long_array_aggregate(t)).count_w, (long_array_aggregate(t)).avg_w::float8[], (long_array_aggregate(t)).min_w, (long_array_aggregate(t)).max_w, (long_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Long 64 attributes +CREATE MATERIALIZED VIEW cagg_array_devlong64_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (long64_array_aggregate(t)).count, (long64_array_aggregate(t)).count_errors + , (long64_array_aggregate(t)).count_r, (long64_array_aggregate(t)).avg_r::float8[], (long64_array_aggregate(t)).min_r, (long64_array_aggregate(t)).max_r, (long64_array_aggregate(t)).stddev_r::float8[] + , (long64_array_aggregate(t)).count_w, (long64_array_aggregate(t)).avg_w::float8[], (long64_array_aggregate(t)).min_w, (long64_array_aggregate(t)).max_w, (long64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong64 as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devlong64_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (long64_array_aggregate(t)).count, (long64_array_aggregate(t)).count_errors + , (long64_array_aggregate(t)).count_r, (long64_array_aggregate(t)).avg_r::float8[], (long64_array_aggregate(t)).min_r, (long64_array_aggregate(t)).max_r, (long64_array_aggregate(t)).stddev_r::float8[] + , (long64_array_aggregate(t)).count_w, (long64_array_aggregate(t)).avg_w::float8[], (long64_array_aggregate(t)).min_w, (long64_array_aggregate(t)).max_w, (long64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong64 as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devlong64_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (long64_array_aggregate(t)).count, (long64_array_aggregate(t)).count_errors + , (long64_array_aggregate(t)).count_r, (long64_array_aggregate(t)).avg_r::float8[], (long64_array_aggregate(t)).min_r, (long64_array_aggregate(t)).max_r, (long64_array_aggregate(t)).stddev_r::float8[] + , (long64_array_aggregate(t)).count_w, (long64_array_aggregate(t)).avg_w::float8[], (long64_array_aggregate(t)).min_w, (long64_array_aggregate(t)).max_w, (long64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devlong64 as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Short attributes +CREATE MATERIALIZED VIEW cagg_array_devshort_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (short_array_aggregate(t)).count, (short_array_aggregate(t)).count_errors + , (short_array_aggregate(t)).count_r, (short_array_aggregate(t)).avg_r::float8[], (short_array_aggregate(t)).min_r, (short_array_aggregate(t)).max_r, (short_array_aggregate(t)).stddev_r::float8[] + , (short_array_aggregate(t)).count_w, (short_array_aggregate(t)).avg_w::float8[], (short_array_aggregate(t)).min_w, (short_array_aggregate(t)).max_w, (short_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devshort as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devshort_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (short_array_aggregate(t)).count, (short_array_aggregate(t)).count_errors + , (short_array_aggregate(t)).count_r, (short_array_aggregate(t)).avg_r::float8[], (short_array_aggregate(t)).min_r, (short_array_aggregate(t)).max_r, (short_array_aggregate(t)).stddev_r::float8[] + , (short_array_aggregate(t)).count_w, (short_array_aggregate(t)).avg_w::float8[], (short_array_aggregate(t)).min_w, (short_array_aggregate(t)).max_w, (short_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devshort as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devshort_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (short_array_aggregate(t)).count, (short_array_aggregate(t)).count_errors + , (short_array_aggregate(t)).count_r, (short_array_aggregate(t)).avg_r::float8[], (short_array_aggregate(t)).min_r, (short_array_aggregate(t)).max_r, (short_array_aggregate(t)).stddev_r::float8[] + , (short_array_aggregate(t)).count_w, (short_array_aggregate(t)).avg_w::float8[], (short_array_aggregate(t)).min_w, (short_array_aggregate(t)).max_w, (short_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devshort as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Unsigned long attributes +CREATE MATERIALIZED VIEW cagg_array_devulong_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (ulong_array_aggregate(t)).count, (ulong_array_aggregate(t)).count_errors + , (ulong_array_aggregate(t)).count_r, (ulong_array_aggregate(t)).avg_r::float8[], (ulong_array_aggregate(t)).min_r, (ulong_array_aggregate(t)).max_r, (ulong_array_aggregate(t)).stddev_r::float8[] + , (ulong_array_aggregate(t)).count_w, (ulong_array_aggregate(t)).avg_w::float8[], (ulong_array_aggregate(t)).min_w, (ulong_array_aggregate(t)).max_w, (ulong_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devulong_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (ulong_array_aggregate(t)).count, (ulong_array_aggregate(t)).count_errors + , (ulong_array_aggregate(t)).count_r, (ulong_array_aggregate(t)).avg_r::float8[], (ulong_array_aggregate(t)).min_r, (ulong_array_aggregate(t)).max_r, (ulong_array_aggregate(t)).stddev_r::float8[] + , (ulong_array_aggregate(t)).count_w, (ulong_array_aggregate(t)).avg_w::float8[], (ulong_array_aggregate(t)).min_w, (ulong_array_aggregate(t)).max_w, (ulong_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devulong_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (ulong_array_aggregate(t)).count, (ulong_array_aggregate(t)).count_errors + , (ulong_array_aggregate(t)).count_r, (ulong_array_aggregate(t)).avg_r::float8[], (ulong_array_aggregate(t)).min_r, (ulong_array_aggregate(t)).max_r, (ulong_array_aggregate(t)).stddev_r::float8[] + , (ulong_array_aggregate(t)).count_w, (ulong_array_aggregate(t)).avg_w::float8[], (ulong_array_aggregate(t)).min_w, (ulong_array_aggregate(t)).max_w, (ulong_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Unsigned long 64 attributes +CREATE MATERIALIZED VIEW cagg_array_devulong64_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (ulong64_array_aggregate(t)).count, (ulong64_array_aggregate(t)).count_errors + , (ulong64_array_aggregate(t)).count_r, (ulong64_array_aggregate(t)).avg_r::float8[], (ulong64_array_aggregate(t)).min_r, (ulong64_array_aggregate(t)).max_r, (ulong64_array_aggregate(t)).stddev_r::float8[] + , (ulong64_array_aggregate(t)).count_w, (ulong64_array_aggregate(t)).avg_w::float8[], (ulong64_array_aggregate(t)).min_w, (ulong64_array_aggregate(t)).max_w, (ulong64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong64 as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devulong64_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (ulong64_array_aggregate(t)).count, (ulong64_array_aggregate(t)).count_errors + , (ulong64_array_aggregate(t)).count_r, (ulong64_array_aggregate(t)).avg_r::float8[], (ulong64_array_aggregate(t)).min_r, (ulong64_array_aggregate(t)).max_r, (ulong64_array_aggregate(t)).stddev_r::float8[] + , (ulong64_array_aggregate(t)).count_w, (ulong64_array_aggregate(t)).avg_w::float8[], (ulong64_array_aggregate(t)).min_w, (ulong64_array_aggregate(t)).max_w, (ulong64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong64 as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devulong64_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (ulong64_array_aggregate(t)).count, (ulong64_array_aggregate(t)).count_errors + , (ulong64_array_aggregate(t)).count_r, (ulong64_array_aggregate(t)).avg_r::float8[], (ulong64_array_aggregate(t)).min_r, (ulong64_array_aggregate(t)).max_r, (ulong64_array_aggregate(t)).stddev_r::float8[] + , (ulong64_array_aggregate(t)).count_w, (ulong64_array_aggregate(t)).avg_w::float8[], (ulong64_array_aggregate(t)).min_w, (ulong64_array_aggregate(t)).max_w, (ulong64_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devulong64 as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Unsigned short attributes +CREATE MATERIALIZED VIEW cagg_array_devushort_1hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 hour', data_time), (ushort_array_aggregate(t)).count, (ushort_array_aggregate(t)).count_errors + , (ushort_array_aggregate(t)).count_r, (ushort_array_aggregate(t)).avg_r::float8[], (ushort_array_aggregate(t)).min_r, (ushort_array_aggregate(t)).max_r, (ushort_array_aggregate(t)).stddev_r::float8[] + , (ushort_array_aggregate(t)).count_w, (ushort_array_aggregate(t)).avg_w::float8[], (ushort_array_aggregate(t)).min_w, (ushort_array_aggregate(t)).max_w, (ushort_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devushort as t + GROUP BY time_bucket('1 hour', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devushort_8hour( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('8 hours', data_time), (ushort_array_aggregate(t)).count, (ushort_array_aggregate(t)).count_errors + , (ushort_array_aggregate(t)).count_r, (ushort_array_aggregate(t)).avg_r::float8[], (ushort_array_aggregate(t)).min_r, (ushort_array_aggregate(t)).max_r, (ushort_array_aggregate(t)).stddev_r::float8[] + , (ushort_array_aggregate(t)).count_w, (ushort_array_aggregate(t)).avg_w::float8[], (ushort_array_aggregate(t)).min_w, (ushort_array_aggregate(t)).max_w, (ushort_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devushort as t + GROUP BY time_bucket('8 hours', data_time), att_conf_id; + +CREATE MATERIALIZED VIEW cagg_array_devushort_1day( + att_conf_id, data_time, count_rows, count_errors + , count_r, mean_r, min_r, max_r, stddev_r + , count_w, mean_w, min_w, max_w, stddev_w + ) WITH (timescaledb.continuous) + AS SELECT att_conf_id, time_bucket('1 day', data_time), (ushort_array_aggregate(t)).count, (ushort_array_aggregate(t)).count_errors + , (ushort_array_aggregate(t)).count_r, (ushort_array_aggregate(t)).avg_r::float8[], (ushort_array_aggregate(t)).min_r, (ushort_array_aggregate(t)).max_r, (ushort_array_aggregate(t)).stddev_r::float8[] + , (ushort_array_aggregate(t)).count_w, (ushort_array_aggregate(t)).avg_w::float8[], (ushort_array_aggregate(t)).min_w, (ushort_array_aggregate(t)).max_w, (ushort_array_aggregate(t)).stddev_w::float8[] + FROM att_array_devushort as t + GROUP BY time_bucket('1 day', data_time), att_conf_id; + +-- Drop all the views +-- DROP VIEW cagg_array_devdouble_1hour CASCADE; +-- DROP VIEW cagg_array_devdouble_8hour CASCADE; +-- DROP VIEW cagg_array_devdouble_1day CASCADE; + +-- DROP VIEW cagg_array_devfloat_1hour CASCADE; +-- DROP VIEW cagg_array_devfloat_8hour CASCADE; +-- DROP VIEW cagg_array_devfloat_1day CASCADE; + +-- DROP VIEW cagg_array_devlong_1hour CASCADE; +-- DROP VIEW cagg_array_devlong_8hour CASCADE; +-- DROP VIEW cagg_array_devlong_1day CASCADE; + +-- DROP VIEW cagg_array_devlong64_1hour CASCADE; +-- DROP VIEW cagg_array_devlong64_8hour CASCADE; +-- DROP VIEW cagg_array_devlong64_1day CASCADE; + +-- DROP VIEW cagg_array_devshort_1hour CASCADE; +-- DROP VIEW cagg_array_devshort_8hour CASCADE; +-- DROP VIEW cagg_array_devshort_1day CASCADE; + +-- DROP VIEW cagg_array_devulong_1hour CASCADE; +-- DROP VIEW cagg_array_devulong_8hour CASCADE; +-- DROP VIEW cagg_array_devulong_1day CASCADE; + +-- DROP VIEW cagg_array_devulong64_1hour CASCADE; +-- DROP VIEW cagg_array_devulong64_8hour CASCADE; +-- DROP VIEW cagg_array_devulong64_1day CASCADE; + +-- DROP VIEW cagg_array_devushort_1hour CASCADE; +-- DROP VIEW cagg_array_devushort_8hour CASCADE; +-- DROP VIEW cagg_array_devushort_1day CASCADE; + diff --git a/docker-compose/timescaledb/resources/09_hdb_ext_compress_policy.sql b/docker-compose/timescaledb/resources/09_hdb_ext_compress_policy.sql new file mode 100644 index 0000000000000000000000000000000000000000..a815950e82b9d88d6e992fda23e8e3a5d5b84a26 --- /dev/null +++ b/docker-compose/timescaledb/resources/09_hdb_ext_compress_policy.sql @@ -0,0 +1,118 @@ +-- ----------------------------------------------------------------------------- +-- This file is part of the hdbpp-timescale-project +-- +-- Copyright (C) : 2014-2019 +-- European Synchrotron Radiation Facility +-- BP 220, Grenoble 38043, FRANCE +-- +-- libhdb++timescale is free software: you can redistribute it and/or modify +-- it under the terms of the Lesser GNU General Public License as published by +-- the Free Software Foundation, either version 3 of the License, or +-- (at your option) any later version. +-- +-- libhdb++timescale is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser +-- GNU General Public License for more details. +-- +-- You should have received a copy of the Lesser GNU General Public License +-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. +-- ----------------------------------------------------------------------------- +\c hdb +-- Compress chunk policy +-- Allow compression on the table +ALTER TABLE att_scalar_devboolean SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devdouble SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devfloat SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devencoded SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devenum SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devstate SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devstring SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devuchar SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devulong SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devulong64 SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devlong64 SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devlong SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devushort SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_scalar_devshort SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); + +ALTER TABLE att_array_devboolean SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devdouble SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devfloat SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devencoded SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devenum SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devstate SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devstring SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devuchar SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devulong SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devulong64 SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devlong64 SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devlong SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devushort SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); +ALTER TABLE att_array_devshort SET(timescaledb.compress, timescaledb.compress_segmentby = 'att_conf_id, att_error_desc_id', timescaledb.compress_orderby = 'data_time DESC'); + +DO $$ BEGIN + IF (SELECT extversion>'2.0.0' FROM pg_extension where extname = 'timescaledb') THEN + -- If using timescaledb v2 + PERFORM add_compression_policy('att_scalar_devboolean', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devdouble', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devfloat', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devencoded', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devenum', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devstate', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devstring', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devuchar', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devulong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devulong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devlong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devlong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devushort', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_scalar_devshort', INTERVAL '200d', if_not_exists => true); + + PERFORM add_compression_policy('att_array_devboolean', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devdouble', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devfloat', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devencoded', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devenum', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devstate', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devstring', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devuchar', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devulong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devulong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devlong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devlong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devushort', INTERVAL '200d', if_not_exists => true); + PERFORM add_compression_policy('att_array_devshort', INTERVAL '200d', if_not_exists => true); + ELSE + -- If using timescaledb v1.7 + PERFORM add_compress_chunks_policy('att_scalar_devboolean', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devdouble', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devfloat', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devencoded', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devenum', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devstate', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devstring', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devuchar', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devulong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devulong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devlong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devlong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devushort', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_scalar_devshort', INTERVAL '200d', if_not_exists => true); + + PERFORM add_compress_chunks_policy('att_array_devboolean', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devdouble', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devfloat', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devencoded', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devenum', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devstate', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devstring', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devuchar', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devulong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devulong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devlong64', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devlong', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devushort', INTERVAL '200d', if_not_exists => true); + PERFORM add_compress_chunks_policy('att_array_devshort', INTERVAL '200d', if_not_exists => true); + END IF; +END $$; diff --git a/docker-compose/timescaledb/resources/10_hdb_ext_reorder_policy.sql b/docker-compose/timescaledb/resources/10_hdb_ext_reorder_policy.sql new file mode 100644 index 0000000000000000000000000000000000000000..e8e0f3911b6945159945d8e9b0caafeaad1741e1 --- /dev/null +++ b/docker-compose/timescaledb/resources/10_hdb_ext_reorder_policy.sql @@ -0,0 +1,53 @@ +-- ----------------------------------------------------------------------------- +-- This file is part of the hdbpp-timescale-project +-- +-- Copyright (C) : 2014-2019 +-- European Synchrotron Radiation Facility +-- BP 220, Grenoble 38043, FRANCE +-- +-- libhdb++timescale is free software: you can redistribute it and/or modify +-- it under the terms of the Lesser GNU General Public License as published by +-- the Free Software Foundation, either version 3 of the License, or +-- (at your option) any later version. +-- +-- libhdb++timescale is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Lesser +-- GNU General Public License for more details. +-- +-- You should have received a copy of the Lesser GNU General Public License +-- along with libhdb++timescale. If not, see <http://www.gnu.org/licenses/>. +-- ----------------------------------------------------------------------------- + +\c hdb +-- Reorder chunk policy + +SELECT add_reorder_policy('att_scalar_devboolean', 'att_scalar_devboolean_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devdouble', 'att_scalar_devdouble_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devfloat', 'att_scalar_devfloat_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devencoded', 'att_scalar_devencoded_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devenum', 'att_scalar_devenum_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devstate', 'att_scalar_devstate_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devstring', 'att_scalar_devstring_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devuchar', 'att_scalar_devuchar_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devulong', 'att_scalar_devulong_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devulong64', 'att_scalar_devulong64_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devlong64', 'att_scalar_devlong64_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devlong', 'att_scalar_devlong_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devushort', 'att_scalar_devushort_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_scalar_devshort', 'att_scalar_devshort_att_conf_id_data_time_idx', if_not_exists => true); + +SELECT add_reorder_policy('att_array_devboolean', 'att_array_devboolean_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devdouble', 'att_array_devdouble_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devfloat', 'att_array_devfloat_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devencoded', 'att_array_devencoded_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devenum', 'att_array_devenum_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devstate', 'att_array_devstate_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devstring', 'att_array_devstring_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devuchar', 'att_array_devuchar_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devulong', 'att_array_devulong_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devulong64', 'att_array_devulong64_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devlong64', 'att_array_devlong64_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devlong', 'att_array_devlong_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devushort', 'att_array_devushort_att_conf_id_data_time_idx', if_not_exists => true); +SELECT add_reorder_policy('att_array_devshort', 'att_array_devshort_att_conf_id_data_time_idx', if_not_exists => true); diff --git a/docker-compose/timescaledb/resources/05_lofar_func.sh b/docker-compose/timescaledb/resources/11_lofar_func.sh similarity index 100% rename from docker-compose/timescaledb/resources/05_lofar_func.sh rename to docker-compose/timescaledb/resources/11_lofar_func.sh diff --git a/docker-compose/timescaledb/resources/06_lofar_views.sql b/docker-compose/timescaledb/resources/12_lofar_views.sql similarity index 100% rename from docker-compose/timescaledb/resources/06_lofar_views.sql rename to docker-compose/timescaledb/resources/12_lofar_views.sql diff --git a/docker-compose/timescaledb/resources/07_cleanup.sql b/docker-compose/timescaledb/resources/13_cleanup.sql similarity index 100% rename from docker-compose/timescaledb/resources/07_cleanup.sql rename to docker-compose/timescaledb/resources/13_cleanup.sql diff --git a/jupyter-notebooks/Beamforming_Test.ipynb b/jupyter-notebooks/Beamforming_Test.ipynb index 0ae1c7631d123aafe85e5bc4c91ffa0c200f8c0f..323982e348f818f54349f6f7b526adc6a793730a 100644 --- a/jupyter-notebooks/Beamforming_Test.ipynb +++ b/jupyter-notebooks/Beamforming_Test.ipynb @@ -105,11 +105,11 @@ } ], "source": [ - "# Beam device\n", - "b_name = 'STAT/Beam/1'\n", - "b = DeviceProxy(b_name)\n", - "state = str(b.state())\n", - "print(b_name + ' : ' + state)" + "# TileBeam device\n", + "tb_name = 'STAT/TileBeam/1'\n", + "tb = DeviceProxy(tb_name)\n", + "state = str(tb.state())\n", + "print(tb_name + ' : ' + state)" ] }, { @@ -130,15 +130,15 @@ "# Start Beam device\n", "if state == \"OFF\":\n", " time.sleep(1)\n", - " b.initialise()\n", + " tb.initialise()\n", " time.sleep(1)\n", - "state = str(b.state())\n", + "state = str(tb.state())\n", "if state == \"STANDBY\":\n", - " b.set_defaults()\n", - " b.on()\n", - "state = str(b.state())\n", + " tb.set_defaults()\n", + " tb.on()\n", + "state = str(tb.state())\n", "if state == \"ON\":\n", - " print(f\"Device {b_name} is now in ON state\")" + " print(f\"Device {tb_name} is now in ON state\")" ] }, { @@ -158,9 +158,9 @@ ], "source": [ "# Test Beam attribute\n", - "beam = b\n", - "print(96==len(beam.HBAT_pointing_direction_R))\n", - "print(96==len(beam.HBAT_pointing_timestamp_R))" + "tilebeam = tb\n", + "print(96==len(tilebeam.HBAT_pointing_direction_R))\n", + "print(96==len(tilebeam.HBAT_pointing_timestamp_R))" ] }, { @@ -182,7 +182,7 @@ "source": [ "# Test HBAT delays\n", "pointing_direction = numpy.array([[\"J2000\",\"0deg\",\"0deg\"]] * 96).flatten()\n", - "delays = b.HBAT_delays(pointing_direction)\n", + "delays = tb.HBAT_delays(pointing_direction)\n", "print(delays)\n", "print(96*16==len(delays))" ] @@ -239,7 +239,7 @@ "source": [ "# Test whole main function\n", "print(recv.read_attribute('HBAT_BF_delays_RW').value)\n", - "beam.HBAT_set_pointing(pointing_direction)\n", + "tilebeam.HBAT_set_pointing(pointing_direction)\n", "print(recv.read_attribute('HBAT_BF_delays_RW').value)" ] }, @@ -254,9 +254,9 @@ ], "metadata": { "kernelspec": { - "display_name": "StationControl", + "display_name": "Python 3", "language": "python", - "name": "stationcontrol" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -268,7 +268,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/jupyter-notebooks/Tile Beam Steering.ipynb b/jupyter-notebooks/Tile Beam Steering.ipynb index 8041f6b99ed6431fc5a46044a1bc5d44ee39590d..956b8bb1572a9c17e65cc35fce56b51ced89a496 100644 --- a/jupyter-notebooks/Tile Beam Steering.ipynb +++ b/jupyter-notebooks/Tile Beam Steering.ipynb @@ -32,7 +32,7 @@ ], "source": [ "# point all tiles at the horizon at 24°\n", - "beam.HBAT_pointing_direction_RW=[(\"AZELGEO\",\"24deg\",\"0deg\")]*96\n", + "tilebeam.HBAT_pointing_direction_RW=[(\"AZELGEO\",\"24deg\",\"0deg\")]*96\n", " \n", "# wait for pointing to be applied\n", "time.sleep(0.5)\n", @@ -94,7 +94,7 @@ "source": [ "for angle, direction in zip(angles, directions):\n", " # set angles\n", - " beam.HBAT_pointing_direction_RW=[(\"AZELGEO\",f\"{angle}deg\",\"0deg\")]*96\n", + " tilebeam.HBAT_pointing_direction_RW=[(\"AZELGEO\",f\"{angle}deg\",\"0deg\")]*96\n", " \n", " # obtain delays\n", " time.sleep(0.5)\n", @@ -143,7 +143,7 @@ "\n", "for idx, (angle, direction) in enumerate(zip(angles, directions)):\n", " # set angles\n", - " beam.HBAT_pointing_direction_RW=[(\"AZELGEO\",f\"{angle}deg\",\"0deg\")]*96\n", + " tilebeam.HBAT_pointing_direction_RW=[(\"AZELGEO\",f\"{angle}deg\",\"0deg\")]*96\n", " \n", " # obtain delays\n", " time.sleep(0.5)\n", @@ -172,9 +172,9 @@ ], "metadata": { "kernelspec": { - "display_name": "StationControl", + "display_name": "Python 3", "language": "python", - "name": "stationcontrol" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -186,7 +186,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.8.10" } }, "nbformat": 4, diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh index 17fa54803986d93886138a0bc3a1815a04f330e0..c3351457189b36495fb5f1738098e9b947e41c1b 100755 --- a/sbin/run_integration_test.sh +++ b/sbin/run_integration_test.sh @@ -14,13 +14,13 @@ cd "$LOFAR20_DIR/docker-compose" || exit 1 # Build only the required images, please do not build everything that makes CI # take really long to finish, especially grafana / jupyter / prometheus. # jupyter is physically large > 2.5gb and overlayfs is really slow. -make build device-sdp device-recv device-sst device-unb2 device-xst device-beamlet device-beam +make build device-sdp device-recv device-sst device-unb2 device-xst device-beamlet device-tilebeam make build sdptr-sim recv-sim unb2-sim apsct-sim apspu-sim make build databaseds dsconfig elk integration-test make build archiver-timescale hdbppts-cm hdbppts-es # Start and stop sequence -make stop device-boot device-docker device-apsct device-apspu device-sdp device-recv device-sst device-unb2 device-xst device-beamlet device-beam sdptr-sim recv-sim unb2-sim apsct-sim apspu-sim hdbppts-es hdbppts-cm archiver-timescale +make stop device-boot device-docker device-apsct device-apspu device-sdp device-recv device-sst device-unb2 device-xst device-beamlet device-tilebeam sdptr-sim recv-sim unb2-sim apsct-sim apspu-sim hdbppts-es hdbppts-cm archiver-timescale make start databaseds dsconfig elk # Give dsconfig and databaseds time to start @@ -38,7 +38,7 @@ make start sdptr-sim recv-sim unb2-sim apsct-sim apspu-sim # Give the simulators time to start sleep 5 -make start device-boot device-apsct device-apspu device-sdp device-recv device-sst device-unb2 device-xst device-beam device-beamlet +make start device-boot device-apsct device-apspu device-sdp device-recv device-sst device-unb2 device-xst device-tilebeam device-beamlet # Archive devices -> starting order is important make start archiver-timescale hdbppts-cm hdbppts-es diff --git a/tangostationcontrol/docs/source/beam_tracking.rst b/tangostationcontrol/docs/source/beam_tracking.rst index a689739a5b471fed20655f52a1219bd267892993..a9860420af79ffa6ddc9e348abfb122911a8efc9 100644 --- a/tangostationcontrol/docs/source/beam_tracking.rst +++ b/tangostationcontrol/docs/source/beam_tracking.rst @@ -34,7 +34,7 @@ Coordinates in a celestial coordinate system result in a different direction dep Positions -------------------- -The positions of the antennas are required in ITRF, a carthesian geocentric coordinate system ("XYZ" with its origin at the center of the Earth). These coordinates provide an absolute 3D position on Earth and form the basis for most delay computations. These positions are not stationary however, due to the tectonic plate movement. Instead, we use source coordinates in ETRS89 (also carthesian geocentric), and convert those to ITRF using updated and extrapolated tectonic models (see ``tangostationcontrol.beam.geo.ETRS_to_ITRF``). +The positions of the antennas are required in ITRF, a carthesian geocentric coordinate system ("XYZ" with its origin at the center of the Earth). These coordinates provide an absolute 3D position on Earth and form the basis for most delay computations. These positions are not stationary however, due to the tectonic plate movement. Instead, we use source coordinates in ETRS89 (also carthesian geocentric), and convert those to ITRF using updated and extrapolated tectonic models (see ``tangostationcontrol.tilebeam.geo.ETRS_to_ITRF``). :recv.HBAT_reference_ETRS: (property) The reference position of each HBA tile, in ETRS89. @@ -65,7 +65,7 @@ The positions of the elements within an HBA tile are handled differently. Instea - The R axis is roughly down, - The HBA tiles on a station all lie on the same PQ plane, so R == 0. -These facts allow us to use the following information to calculate the absolute position of each tile element. The conversion takes the relative offsets of the elements within a tile, rotates them in PQR space, rotates those into relative ETRS offsets, and finally into absolute positions in ETRS. See ``tangostationcontrol.beam.hba_tile`` for these computations. +These facts allow us to use the following information to calculate the absolute position of each tile element. The conversion takes the relative offsets of the elements within a tile, rotates them in PQR space, rotates those into relative ETRS offsets, and finally into absolute positions in ETRS. See ``tangostationcontrol.tilebeam.hba_tile`` for these computations. :recv.HBAT_base_antenna_offsets: (property) The relative offsets of each element within a tile, with respect to the tile's reference position (the center of the ground plane). diff --git a/tangostationcontrol/docs/source/devices/beam.rst b/tangostationcontrol/docs/source/devices/tilebeam.rst similarity index 86% rename from tangostationcontrol/docs/source/devices/beam.rst rename to tangostationcontrol/docs/source/devices/tilebeam.rst index 8075cde9903c8818d7752a2d8675f9f8e9397a3b..e0c4ce810434da3347b0ec9804937c9d69d9ec66 100644 --- a/tangostationcontrol/docs/source/devices/beam.rst +++ b/tangostationcontrol/docs/source/devices/tilebeam.rst @@ -1,7 +1,7 @@ Beam ==================== -The ``beam == DeviceProxy("STAT/Beam/1")`` device sets up the beamforming on the station. It configures the HBA tile beam former, which adds the signals of its 16 elements within the tile. These element signals can be delayed a configurable amount of time, allowing their sum to become more sensitive in a certain direction. Each tile can have their own pointing direction configured. +The ``tilebeam == DeviceProxy("STAT/TileBeam/1")`` device sets up the beamforming on the station. It configures the HBA tile beam former, which adds the signals of its 16 elements within the tile. These element signals can be delayed a configurable amount of time, allowing their sum to become more sensitive in a certain direction. Each tile can have their own pointing direction configured. Beam Tracking -------------------- @@ -81,6 +81,6 @@ We use `python-casacore <https://casacore.github.io/python-casacore/index.html>` :returns: ``str`` -:use_measures(dir): Activate the measures tables in the provided directory. This necessitates turning off and restarting the Beam device, so the command will always appear to fail. Turn the device back and the selected measures tables will be active. +:use_measures(dir): Activate the measures tables in the provided directory. This necessitates turning off and restarting the TileBeam device, so the command will always appear to fail. Turn the device back and the selected measures tables will be active. :returns: ``(does not return)`` diff --git a/tangostationcontrol/docs/source/index.rst b/tangostationcontrol/docs/source/index.rst index f72e468ddd068cf5be6901373ad420919de9500e..0fd199ad8648ca1783d0565cd9f9c336ac4f0ab3 100644 --- a/tangostationcontrol/docs/source/index.rst +++ b/tangostationcontrol/docs/source/index.rst @@ -19,7 +19,7 @@ Even without having access to any LOFAR2.0 hardware, you can install the full st installation interfaces/overview devices/using - devices/beam + devices/tilebeam devices/beamlet devices/boot devices/docker diff --git a/tangostationcontrol/requirements.txt b/tangostationcontrol/requirements.txt index a93c35d3d5a643afaf0d78f53cdd2c36be65f8f2..0cb186f10ceb7b41693b948a4abd81b17c61c053 100644 --- a/tangostationcontrol/requirements.txt +++ b/tangostationcontrol/requirements.txt @@ -11,5 +11,5 @@ h5py >= 3.1.0 # BSD psutil >= 5.8.0 # BSD docker >= 5.0.3 # Apache 2 python-logstash-async >= 2.3.0 # MIT -python-casacore >= 3.3.1 # GPL2 +python-casacore >= 3.3.1 # LGPLv3 etrs-itrs@git+https://github.com/brentjens/etrs-itrs # license pending diff --git a/tangostationcontrol/setup.cfg b/tangostationcontrol/setup.cfg index 44e21e97a1b61338773a49d2fcd3601a2d4f9d8c..cfc0a249b75a93c5fc80b4dc892434f779581e8e 100644 --- a/tangostationcontrol/setup.cfg +++ b/tangostationcontrol/setup.cfg @@ -35,7 +35,7 @@ where=./ console_scripts = l2ss-apsct = tangostationcontrol.devices.apsct:main l2ss-apspu = tangostationcontrol.devices.apspu:main - l2ss-beam = tangostationcontrol.devices.beam:main + l2ss-tilebeam = tangostationcontrol.devices.tilebeam:main l2ss-beamlet = tangostationcontrol.devices.sdp.beamlet:main l2ss-boot = tangostationcontrol.devices.boot:main l2ss-docker-device = tangostationcontrol.devices.docker_device:main diff --git a/tangostationcontrol/tangostationcontrol/devices/boot.py b/tangostationcontrol/tangostationcontrol/devices/boot.py index 3ae76d7f8c3e2254962f794f8159ec307a2c0d17..ba9279092ba0fc3685eb3401b78e630f9ea5cd2a 100644 --- a/tangostationcontrol/tangostationcontrol/devices/boot.py +++ b/tangostationcontrol/tangostationcontrol/devices/boot.py @@ -218,7 +218,7 @@ class Boot(lofar_device): DeviceProxy_Time_Out = device_property( dtype='DevDouble', mandatory=False, - default_value=10.0, + default_value=30.0, ) # Initialise the hardware when initialising a station. Can end badly when using simulators. @@ -241,7 +241,7 @@ class Boot(lofar_device): "STAT/SST/1", "STAT/XST/1", "STAT/Beamlet/1", - "STAT/Beam/1", # Accesses RECV and Beamlet + "STAT/TileBeam/1", # Accesses RECV and Beamlet ], ) diff --git a/tangostationcontrol/tangostationcontrol/devices/lofar_device.py b/tangostationcontrol/tangostationcontrol/devices/lofar_device.py index 6943f86d4bde44010662cbcea45cb350ca271cea..067f09538d4cb972c0b1a186e726792b69132fbd 100644 --- a/tangostationcontrol/tangostationcontrol/devices/lofar_device.py +++ b/tangostationcontrol/tangostationcontrol/devices/lofar_device.py @@ -13,7 +13,7 @@ # PyTango imports from tango.server import attribute, command, Device, DeviceMeta -from tango import AttrWriteType, DevState, DebugIt, Attribute, DeviceProxy, AttrDataFormat +from tango import AttrWriteType, DevState, DebugIt, Attribute, DeviceProxy, AttrDataFormat, DevSource import time import math import numpy @@ -83,26 +83,6 @@ class lofar_device(Device, metaclass=DeviceMeta): return self.get_state() in [DevState.STANDBY, DevState.ON, DevState.ALARM] - def clear_poll_cache(self): - """ Remove all attributes from the poll cache, to remove stale entries - once we know the values can be read. """ - - # we use proxy.attribute_query_list(), as proxy.get_attribute_list() will throw if we - # call it too soon when starting everything (database, device) from scratch. - attr_names = [config.name for config in self.proxy.attribute_list_query()] - - for attr_name in attr_names: - if self.proxy.is_attribute_polled(attr_name): - # save poll period - poll_period = self.proxy.get_attribute_poll_period(attr_name) - - try: - # stop polling to remove cache entry - self.proxy.stop_poll_attribute(attr_name) - finally: - # start polling again - self.proxy.poll_attribute(attr_name, poll_period) - @log_exceptions() def init_device(self): """ Instantiates the device in the OFF state. """ @@ -123,6 +103,7 @@ class lofar_device(Device, metaclass=DeviceMeta): # we cannot write directly to our attribute, as that would not # trigger a write_{name} call. See https://www.tango-controls.org/community/forum/c/development/c/accessing-own-deviceproxy-class/?page=1#post-2021 self.proxy = DeviceProxy(self.get_name()) + self.proxy.set_source(DevSource.DEV) @log_exceptions() def delete_device(self): @@ -163,8 +144,14 @@ class lofar_device(Device, metaclass=DeviceMeta): self.configure_for_initialise() - # any values read so far are stale. clear the polling cache. - self.clear_poll_cache() + # WARNING: any values read so far are stale. + # Proxies either need to wait for the next poll round, or + # use proxy.set_source(DevSource.DEV) to avoid the cache + # alltogether. + # + # Actually clearing the polling cache runs into performance + # problems if a lot of attributes are polled, and into race + # conditions with the polling thread itself. self.set_state(DevState.STANDBY) self.set_status("Device is in the STANDBY state.") diff --git a/tangostationcontrol/tangostationcontrol/devices/recv.py b/tangostationcontrol/tangostationcontrol/devices/recv.py index 2eb1de134e70586d45eaae65647a0fe0529b5d61..eaf0f189b53ca3f64606d196298f6c9dc27708a1 100644 --- a/tangostationcontrol/tangostationcontrol/devices/recv.py +++ b/tangostationcontrol/tangostationcontrol/devices/recv.py @@ -92,7 +92,7 @@ class RECV(opcua_device): doc='Maximum amount of time to wait after turning dithering on or off', dtype='DevFloat', mandatory=False, - default_value=10.0 + default_value=20.0 ) # ----- Calibration values diff --git a/tangostationcontrol/tangostationcontrol/devices/sdp/sdp.py b/tangostationcontrol/tangostationcontrol/devices/sdp/sdp.py index c84fe448cf2841af39068cef90b09909bb056650..bedae17dac39d06b39635d2ee9a2fed508fac5f4 100644 --- a/tangostationcontrol/tangostationcontrol/devices/sdp/sdp.py +++ b/tangostationcontrol/tangostationcontrol/devices/sdp/sdp.py @@ -194,12 +194,17 @@ class SDP(opcua_device): # -------- def _prepare_hardware(self): + # FPGA firmware loading disabled, as it causes SDPTR to crash, + # see https://support.astron.nl/jira/browse/L2SDP-670 + """ # FPGAs need the correct firmware loaded self.FPGA_boot_image_RW = [1] * self.N_pn # wait for the firmware to be loaded (ignoring masked out elements) mask = self.proxy.TR_fpga_mask_RW self.wait_attribute("FPGA_boot_image_R", lambda attr: ((attr == 1) | ~mask).all(), 10) + """ + pass # -------- # Commands diff --git a/tangostationcontrol/tangostationcontrol/devices/beam.py b/tangostationcontrol/tangostationcontrol/devices/tilebeam.py similarity index 99% rename from tangostationcontrol/tangostationcontrol/devices/beam.py rename to tangostationcontrol/tangostationcontrol/devices/tilebeam.py index afcdf78871e321a2489339f747f61351643790b2..948a3657279bf489039bd372b1a096052d3556ec 100644 --- a/tangostationcontrol/tangostationcontrol/devices/beam.py +++ b/tangostationcontrol/tangostationcontrol/devices/tilebeam.py @@ -3,7 +3,7 @@ # Distributed under the terms of the APACHE license. # See LICENSE.txt for more info. -""" Beam Device Server for LOFAR2.0 +""" TileBeam Device Server for LOFAR2.0 """ @@ -28,10 +28,10 @@ logger = logging.getLogger() -__all__ = ["Beam", "main", "BeamTracker"] +__all__ = ["TileBeam", "main", "BeamTracker"] @device_logging_to_python() -class Beam(lofar_device): +class TileBeam(lofar_device): # ----------------- # Device Properties @@ -320,7 +320,7 @@ class Beam(lofar_device): # ---------- def main(**kwargs): """Main function of the ObservationControl module.""" - return entry(Beam, **kwargs) + return entry(TileBeam, **kwargs) # ---------- # Beam Tracker diff --git a/tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_beam.py b/tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_tilebeam.py similarity index 97% rename from tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_beam.py rename to tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_tilebeam.py index 381678118b123793767aff753e4461b7916fe5bd..4948dd7f70a1d1c122dcd3a3c50cda6184bb44d3 100644 --- a/tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_beam.py +++ b/tangostationcontrol/tangostationcontrol/integration_test/default/devices/test_device_tilebeam.py @@ -24,12 +24,12 @@ class NumpyEncoder(json.JSONEncoder): return json.JSONEncoder.default(self, obj) -class TestDeviceBeam(AbstractTestBases.TestDeviceBase): +class TestDeviceTileBeam(AbstractTestBases.TestDeviceBase): pointing_direction = numpy.array([["J2000","0deg","0deg"]] * 96).flatten() def setUp(self): - super().setUp("STAT/Beam/1") + super().setUp("STAT/TileBeam/1") def setup_recv_proxy(self): # setup RECV @@ -146,7 +146,7 @@ class TestDeviceBeam(AbstractTestBases.TestDeviceBase): numpy.testing.assert_equal(calculated_HBAT_delay_steps[0], expected_HBAT_delay_steps) numpy.testing.assert_equal(calculated_HBAT_delay_steps[48], expected_HBAT_delay_steps) - def test_beam_tracking(self): + def test_tilebeam_tracking(self): # setup RECV as well recv_proxy = self.setup_recv_proxy() diff --git a/tangostationcontrol/tangostationcontrol/test/devices/test_beam_device.py b/tangostationcontrol/tangostationcontrol/test/devices/test_beam_device.py index 48ce76bf15d0c445b58320d223285153b7d57259..905c5a3b12f43a85562c32f1e509fe5e3a1e7baf 100644 --- a/tangostationcontrol/tangostationcontrol/test/devices/test_beam_device.py +++ b/tangostationcontrol/tangostationcontrol/test/devices/test_beam_device.py @@ -9,7 +9,7 @@ from tango.test_context import DeviceTestContext -from tangostationcontrol.devices import beam, lofar_device +from tangostationcontrol.devices import tilebeam, lofar_device import mock @@ -22,7 +22,7 @@ class TestBeamDevice(base.TestCase): # Patch DeviceProxy to allow making the proxies during initialisation # that we otherwise avoid using - for device in [beam, lofar_device]: + for device in [tilebeam, lofar_device]: proxy_patcher = mock.patch.object( device, 'DeviceProxy') proxy_patcher.start() @@ -30,14 +30,14 @@ class TestBeamDevice(base.TestCase): def test_get_pointing_directions(self): """Verify can read pointings attribute and length matches without err""" - with DeviceTestContext(beam.Beam, process=True, timeout=10) as proxy: + with DeviceTestContext(tilebeam.TileBeam, process=True, timeout=10) as proxy: proxy.initialise() self.assertEqual(96, len(proxy.read_attribute( "HBAT_pointing_direction_R").value)) def test_get_pointing_timestamps(self): """Verify can read timestamps attribute and length matches without err""" - with DeviceTestContext(beam.Beam, process=True, timeout=10) as proxy: + with DeviceTestContext(tilebeam.TileBeam, process=True, timeout=10) as proxy: proxy.initialise() self.assertEqual(96, len(proxy.read_attribute( "HBAT_pointing_timestamp_R").value))