diff --git a/.gitmodules b/.gitmodules index 77e67527b8c9f1a7edd847ad3dfd9de6d675f0ac..1c9e69fc593c305a941f8d35e16f2efb531cefb5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "tangostationcontrol/tangostationcontrol/toolkit/libhdbpp-python"] path = tangostationcontrol/tangostationcontrol/toolkit/libhdbpp-python url = https://gitlab.com/tango-controls/hdbpp/libhdbpp-python.git -[submodule "docker-compose/grafana/dashboards"] - path = docker-compose/grafana/dashboards - url = https://git.astron.nl/lofar2.0/grafana-station-dashboards.git diff --git a/CDB/integrations/recvcluster_ConfigDb.json b/CDB/integrations/recvcluster_ConfigDb.json index 23659cb18c4dea5badc2c5c4ff5201aba94cde36..71f8d94745d2d44616742c9776587697e060d008 100644 --- a/CDB/integrations/recvcluster_ConfigDb.json +++ b/CDB/integrations/recvcluster_ConfigDb.json @@ -117,7 +117,7 @@ "24", "24" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.1195951054", "-0.7919544517", "0.5987530018", " 0.9928227484", "-0.0954186800", "0.0720990002", " 0.0000330969", " 0.6030782884", "0.7976820024" @@ -229,7 +229,7 @@ "24", "24" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.1195951054", "-0.7919544517", "0.5987530018", " 0.9928227484", "-0.0954186800", "0.0720990002", " 0.0000330969", " 0.6030782884", "0.7976820024" @@ -341,7 +341,7 @@ "24", "24" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.1195951054", "-0.7919544517", "0.5987530018", " 0.9928227484", "-0.0954186800", "0.0720990002", " 0.0000330969", " 0.6030782884", "0.7976820024" @@ -453,7 +453,7 @@ "24", "24" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.1195951054", "-0.7919544517", "0.5987530018", " 0.9928227484", "-0.0954186800", "0.0720990002", " 0.0000330969", " 0.6030782884", "0.7976820024" diff --git a/CDB/stations/CS001_ConfigDb.json b/CDB/stations/CS001_ConfigDb.json index 2af80f6f751b9dacf6546ef24125370005a78e42..12411a45994cd0cd9697190204cf1a13d37f3e6d 100644 --- a/CDB/stations/CS001_ConfigDb.json +++ b/CDB/stations/CS001_ConfigDb.json @@ -188,10 +188,20 @@ "H36", "H37", "H38", "H39", "H40", "H41", "H42", "H43", "H44", "H45", "H46", "H47" ], + "Antenna_Cables": [ + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "115m" + ], "Control_to_RECV_mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "1", "25", "1", "27", "1", "29", "0", "-1", + "0", "-1", "0", "-1", "1", "31", "1", "33", "1", "35", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", @@ -200,8 +210,8 @@ ], "Power_to_RECV_mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "1", "24", "1", "26", "1", "28", "0", "-1", + "0", "-1", "0", "-1", "1", "30", "1", "32", "1", "34", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", @@ -210,8 +220,8 @@ ], "Antenna_to_SDP_Mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "3", "0", "3", "1", "3", "2", "0", "-1", + "0", "-1", "0", "-1", "3", "3", "3", "4", "3", "5", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", @@ -314,59 +324,77 @@ "L84", "L85", "L86", "L87", "L88", "L89", "L90", "L91", "L92", "L93", "L94", "L95" ], + "Antenna_Cables": [ + "80m", "80m", "80m", "80m", "80m", "80m", + "80m", "80m", "80m", "80m", "80m", "80m", + "115m", "80m", "80m", "80m", "80m", "80m", + "80m", "80m", "80m", "80m", "80m", "80m", + "115m", "115m", "115m", "80m", "80m", "80m", + "80m", "80m", "80m", "80m", "80m", "80m", + "80m", "80m", "80m", "80m", "115m", "115m", + "115m", "80m", "80m", "80m", "80m", "80m", + "80m", "80m", "115m", "50m", "50m", "80m", + "80m", "80m", "115m", "115m", "115m", "115m", + "80m", "80m", "80m", "50m", "50m", "50m", + "80m", "80m", "80m", "115m", "115m", "115m", + "115m", "115m", "80m", "80m", "50m", "50m", + "50m", "50m", "80m", "80m", "115m", "115m", + "115m", "115m", "115m", "115m", "115m", "80m", + "50m", "50m", "80m", "80m", "115m", "115m" + ], "Control_to_RECV_mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "1", "3", "1", "5", "0", "-1", + "1", "7", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "1", "9", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "11", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "13", "1", "15", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1" + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "17", "0", "-1" ], "Power_to_RECV_mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "0", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "1", "2", "1", "4", "0", "-1", + "1", "6", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "1", "8", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "10", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "12", "1", "14", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1" + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "16", "0", "-1" ], "Antenna_to_SDP_Mapping": [ "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "0", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "-1", "0", "-1", "0", "-1", "0", "1", "0", "2", "0", "-1", + "0", "3", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", + "0", "4", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "5", + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "6", "1", "7", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", - "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1", "0", "-1" + "0", "-1", "0", "-1", "0", "-1", "0", "-1", "1", "8", "0", "-1" ], "Antenna_Field_Reference_ETRS": [ "3826923.942", "460915.117", "5064643.229" diff --git a/CDB/stations/DTS_ConfigDb.json b/CDB/stations/DTS_ConfigDb.json index 84836d8e1c33e9619ff0b6a89ffa07a28f3b5bd3..21941beaa1056f2a28360b9f055e91504bedd3ef 100644 --- a/CDB/stations/DTS_ConfigDb.json +++ b/CDB/stations/DTS_ConfigDb.json @@ -181,7 +181,7 @@ "45.73", "54.40" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.11660087", "-0.79095632", "0.60065992", " 0.99317077", "-0.09529842", "0.06730545", " 0.00400627", " 0.60440575", "0.79666658" diff --git a/CDB/stations/DTS_Outside_ConfigDb.json b/CDB/stations/DTS_Outside_ConfigDb.json index 07ab30786c114b735b9b3660eac74a1d71080da1..c2ed04867ab540dc1ca913aac6680f8bd2019dbf 100644 --- a/CDB/stations/DTS_Outside_ConfigDb.json +++ b/CDB/stations/DTS_Outside_ConfigDb.json @@ -213,7 +213,7 @@ "45.73", "54.40" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.11660087","-0.79095632","0.60065992", " 0.99317077","-0.09529842","0.06730545", " 0.00400627"," 0.60440575","0.79666658" diff --git a/CDB/stations/dummy_positions_ConfigDb.json b/CDB/stations/dummy_positions_ConfigDb.json index 44f80657f8bd28a5dd9a885a7808c262d31b0d1c..7403bc8bb43ae1c4b9495b12bba38f8bf6e99a9d 100644 --- a/CDB/stations/dummy_positions_ConfigDb.json +++ b/CDB/stations/dummy_positions_ConfigDb.json @@ -208,7 +208,7 @@ "24", "24" ], - "HBAT_PQR_to_ETRS_rotation_matrix": [ + "PQR_to_ETRS_rotation_matrix": [ "-0.1195951054", "-0.7919544517", "0.5987530018", " 0.9928227484", "-0.0954186800", "0.0720990002", " 0.0000330969", " 0.6030782884", "0.7976820024" diff --git a/docker-compose/grafana/Dockerfile b/docker-compose/grafana/Dockerfile index 72da4bebc7539a5c903c613b9cb1022baaa4f162..1d13c0b4c6f524089139fd4e779c0d3be4491f49 100644 --- a/docker-compose/grafana/Dockerfile +++ b/docker-compose/grafana/Dockerfile @@ -1,19 +1,3 @@ -FROM grafana/grafana +FROM git.astron.nl:5000/lofar2.0/grafana-station-dashboards:latest -# Install some plugins -RUN grafana-cli plugins install briangann-datatable-panel -RUN grafana-cli plugins install ae3e-plotly-panel -RUN grafana-cli plugins install yesoreyeram-infinity-datasource -RUN grafana-cli plugins install aceiot-svg-panel -RUN grafana-cli plugins install yesoreyeram-boomtable-panel -RUN grafana-cli plugins install orchestracities-map-panel - -RUN wget https://algenty.github.io/flowcharting-repository/archives/agenty-flowcharting-panel-1.0.0b-SNAPSHOT.zip -O /tmp/agenty-flowcharting-panel.zip -RUN cd /var/lib/grafana/plugins/ && unzip /tmp/agenty-flowcharting-panel.zip && mv grafana-flowcharting agenty-flowcharting-panel - -COPY grafana.ini /etc/grafana/ - -# Add default configuration through provisioning (see https://grafana.com/docs/grafana/latest/administration/provisioning) -COPY datasources /etc/grafana/provisioning/datasources/ -COPY dashboards /var/lib/grafana/dashboards/ COPY stationcontrol-dashboards.yaml /etc/grafana/provisioning/dashboards/ diff --git a/docker-compose/grafana/alerting.json b/docker-compose/grafana/alerting.json deleted file mode 100644 index bc5c76e7f8870efa52e60e21bf621ae0f1cd8418..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/alerting.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "template_files": {}, - "alertmanager_config": { - "route": { - "receiver": "Alerta", - "repeat_interval": "10m" - }, - "templates": null, - "receivers": [ - { - "name": "Alerta", - "grafana_managed_receiver_configs": [ - { - "name": "Alerta", - "type": "webhook", - "disableResolveMessage": false, - "settings": { - "url": "http://alerta-server:8080/api/webhooks/prometheus?api-key=demo-key" - }, - "secureFields": {} - } - ] - } - ] - } -} diff --git a/docker-compose/grafana/dashboards b/docker-compose/grafana/dashboards deleted file mode 160000 index faf5cbb2fc7981ca4430a9e341145ce66d304851..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/dashboards +++ /dev/null @@ -1 +0,0 @@ -Subproject commit faf5cbb2fc7981ca4430a9e341145ce66d304851 diff --git a/docker-compose/grafana/datasources/alertaui.yaml b/docker-compose/grafana/datasources/alertaui.yaml deleted file mode 100644 index 7a3b62425a71ddf39642fa5f0fd515f7032170f7..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/datasources/alertaui.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: 1 - -datasources: - # <string, required> name of the datasource. Required - - name: Alerta UI - # <string, required> datasource type. Required - type: yesoreyeram-infinity-datasource - # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # <int> org id. will default to orgId 1 if not specified - orgId: 1 - # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically - uid: alertaui - # <string> url - url: http://alerta-server:8080/api - # <string> Deprecated, use secureJsonData.password - password: - # <string> database user, if used - user: postgres - # <string> database name, if used - database: hdb - # <bool> enable/disable basic auth - basicAuth: false - # <string> basic auth username - basicAuthUser: - # <string> Deprecated, use secureJsonData.basicAuthPassword - basicAuthPassword: - # <bool> enable/disable with credentials headers - withCredentials: - # <bool> mark as default datasource. Max one per org - isDefault: false - # <map> fields that will be converted to json and stored in jsonData - jsonData: - secureQueryName1: "api-key" - # <string> json object of data that will be encrypted. - secureJsonData: - secureQueryValue1: "demo-key" - version: 1 - # <bool> allow users to edit datasources from the UI. - editable: false - diff --git a/docker-compose/grafana/datasources/grafanaapi.yaml b/docker-compose/grafana/datasources/grafanaapi.yaml deleted file mode 100644 index a2310cdf2f4432c09581b1f60bbf9ec16a573606..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/datasources/grafanaapi.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: 1 - -datasources: - # <string, required> name of the datasource. Required - - name: Grafana API - # <string, required> datasource type. Required - type: yesoreyeram-infinity-datasource - # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # <int> org id. will default to orgId 1 if not specified - orgId: 1 - # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically - uid: grafanaapi - # <string> url - url: http://localhost:3000/api - # <string> Deprecated, use secureJsonData.password - password: - # <string> database user, if used - user: postgres - # <string> database name, if used - database: hdb - # <bool> enable/disable basic auth - basicAuth: false - # <string> basic auth username - basicAuthUser: - # <string> Deprecated, use secureJsonData.basicAuthPassword - basicAuthPassword: - # <bool> enable/disable with credentials headers - withCredentials: - # <bool> mark as default datasource. Max one per org - isDefault: false - # <map> fields that will be converted to json and stored in jsonData - version: 1 - # <bool> allow users to edit datasources from the UI. - editable: false - diff --git a/docker-compose/grafana/datasources/prometheus.yaml b/docker-compose/grafana/datasources/prometheus.yaml deleted file mode 100644 index efea8bd474db6eb6c9865c1731be5e4a46c42fcc..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/datasources/prometheus.yaml +++ /dev/null @@ -1,39 +0,0 @@ -apiVersion: 1 - -datasources: - # <string, required> name of the datasource. Required - - name: Prometheus - # <string, required> datasource type. Required - type: prometheus - # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # <int> org id. will default to orgId 1 if not specified - orgId: 1 - # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically - uid: prometheus - # <string> url - url: http://prometheus:9090 - # <string> Deprecated, use secureJsonData.password - password: - # <string> database user, if used - user: - # <string> database name, if used - database: - # <bool> enable/disable basic auth - basicAuth: false - # <string> basic auth username - basicAuthUser: - # <string> Deprecated, use secureJsonData.basicAuthPassword - basicAuthPassword: - # <bool> enable/disable with credentials headers - withCredentials: - # <bool> mark as default datasource. Max one per org - isDefault: true - # <map> fields that will be converted to json and stored in jsonData - jsonData: - httpMethod: POST - # <string> json object of data that will be encrypted. - secureJsonData: - version: 1 - # <bool> allow users to edit datasources from the UI. - editable: false diff --git a/docker-compose/grafana/grafana.ini b/docker-compose/grafana/grafana.ini deleted file mode 100644 index acfabe0f10190c2b07ae579d21bd1abfc1891ff3..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/grafana.ini +++ /dev/null @@ -1,1008 +0,0 @@ -##################### Grafana Configuration Example ##################### -# -# Everything has defaults so you only need to uncomment things you want to -# change - -# possible values : production, development -;app_mode = production - -# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty -;instance_name = ${HOSTNAME} - -#################################### Paths #################################### -[paths] -# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) -;data = /var/lib/grafana - -# Temporary files in `data` directory older than given duration will be removed -;temp_data_lifetime = 24h - -# Directory where grafana can store logs -;logs = /var/log/grafana - -# Directory where grafana will automatically scan and look for plugins -;plugins = /var/lib/grafana/plugins - -# folder that contains provisioning config files that grafana will apply on startup and while running. -;provisioning = conf/provisioning - -#################################### Server #################################### -[server] -# Protocol (http, https, h2, socket) -;protocol = http - -# The ip address to bind to, empty will bind to all interfaces -;http_addr = - -# The http port to use -;http_port = 3000 - -# The public facing domain name used to access grafana from a browser -;domain = localhost - -# Redirect to correct domain if host header does not match domain -# Prevents DNS rebinding attacks -;enforce_domain = false - -# The full public facing url you use in browser, used for redirects and emails -# If you use reverse proxy and sub path specify full url (with sub path) -;root_url = %(protocol)s://%(domain)s:%(http_port)s/ - -# Serve Grafana from subpath specified in `root_url` setting. By default it is set to `false` for compatibility reasons. -;serve_from_sub_path = false - -# Log web requests -;router_logging = false - -# the path relative working path -;static_root_path = public - -# enable gzip -enable_gzip = true - -# https certs & key file -;cert_file = -;cert_key = - -# Unix socket path -;socket = - -# CDN Url -;cdn_url = - -# Sets the maximum time using a duration format (5s/5m/5ms) before timing out read of an incoming request and closing idle connections. -# `0` means there is no timeout for reading the request. -;read_timeout = 0 - -#################################### Database #################################### -[database] -# You can configure the database connection by specifying type, host, name, user and password -# as separate properties or as on string using the url properties. - -# Either "mysql", "postgres" or "sqlite3", it's your choice -;type = sqlite3 -;host = 127.0.0.1:3306 -;name = grafana -;user = root -# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" -;password = - -# Use either URL or the previous fields to configure the database -# Example: mysql://user:secret@host:port/database -;url = - -# For "postgres" only, either "disable", "require" or "verify-full" -;ssl_mode = disable - -# Database drivers may support different transaction isolation levels. -# Currently, only "mysql" driver supports isolation levels. -# If the value is empty - driver's default isolation level is applied. -# For "mysql" use "READ-UNCOMMITTED", "READ-COMMITTED", "REPEATABLE-READ" or "SERIALIZABLE". -;isolation_level = - -;ca_cert_path = -;client_key_path = -;client_cert_path = -;server_cert_name = - -# For "sqlite3" only, path relative to data_path setting -;path = grafana.db - -# Max idle conn setting default is 2 -;max_idle_conn = 2 - -# Max conn setting default is 0 (mean not set) -;max_open_conn = - -# Connection Max Lifetime default is 14400 (means 14400 seconds or 4 hours) -;conn_max_lifetime = 14400 - -# Set to true to log the sql calls and execution times. -;log_queries = - -# For "sqlite3" only. cache mode setting used for connecting to the database. (private, shared) -;cache_mode = private - -################################### Data sources ######################### -[datasources] -# Upper limit of data sources that Grafana will return. This limit is a temporary configuration and it will be deprecated when pagination will be introduced on the list data sources API. -;datasource_limit = 5000 - -#################################### Cache server ############################# -[remote_cache] -# Either "redis", "memcached" or "database" default is "database" -;type = database - -# cache connectionstring options -# database: will use Grafana primary database. -# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=0,ssl=false`. Only addr is required. ssl may be 'true', 'false', or 'insecure'. -# memcache: 127.0.0.1:11211 -;connstr = - -#################################### Data proxy ########################### -[dataproxy] - -# This enables data proxy logging, default is false -;logging = false - -# How long the data proxy waits to read the headers of the response before timing out, default is 30 seconds. -# This setting also applies to core backend HTTP data sources where query requests use an HTTP client with timeout set. -;timeout = 30 - -# How long the data proxy waits to establish a TCP connection before timing out, default is 10 seconds. -;dialTimeout = 10 - -# How many seconds the data proxy waits before sending a keepalive probe request. -;keep_alive_seconds = 30 - -# How many seconds the data proxy waits for a successful TLS Handshake before timing out. -;tls_handshake_timeout_seconds = 10 - -# How many seconds the data proxy will wait for a server's first response headers after -# fully writing the request headers if the request has an "Expect: 100-continue" -# header. A value of 0 will result in the body being sent immediately, without -# waiting for the server to approve. -;expect_continue_timeout_seconds = 1 - -# Optionally limits the total number of connections per host, including connections in the dialing, -# active, and idle states. On limit violation, dials will block. -# A value of zero (0) means no limit. -;max_conns_per_host = 0 - -# The maximum number of idle connections that Grafana will keep alive. -;max_idle_connections = 100 - -# How many seconds the data proxy keeps an idle connection open before timing out. -;idle_conn_timeout_seconds = 90 - -# If enabled and user is not anonymous, data proxy will add X-Grafana-User header with username into the request, default is false. -;send_user_header = false - -#################################### Analytics #################################### -[analytics] -# Server reporting, sends usage counters to stats.grafana.org every 24 hours. -# No ip addresses are being tracked, only simple counters to track -# running instances, dashboard and error counts. It is very helpful to us. -# Change this option to false to disable reporting. -;reporting_enabled = true - -# The name of the distributor of the Grafana instance. Ex hosted-grafana, grafana-labs -;reporting_distributor = grafana-labs - -# Set to false to disable all checks to https://grafana.net -# for new versions (grafana itself and plugins), check is used -# in some UI views to notify that grafana or plugin update exists -# This option does not cause any auto updates, nor send any information -# only a GET request to http://grafana.com to get latest versions -;check_for_updates = true - -# Google Analytics universal tracking code, only enabled if you specify an id here -;google_analytics_ua_id = - -# Google Tag Manager ID, only enabled if you specify an id here -;google_tag_manager_id = - -#################################### Security #################################### -[security] -# disable creation of admin user on first start of grafana -;disable_initial_admin_creation = false - -# default admin user, created on startup -;admin_user = admin - -# default admin password, can be changed before first start of grafana, or in profile settings -;admin_password = admin - -# used for signing -;secret_key = SW2YcwTIb9zpOOhoPsMm - -# disable gravatar profile images -;disable_gravatar = false - -# data source proxy whitelist (ip_or_domain:port separated by spaces) -;data_source_proxy_whitelist = - -# disable protection against brute force login attempts -;disable_brute_force_login_protection = false - -# set to true if you host Grafana behind HTTPS. default is false. -;cookie_secure = false - -# set cookie SameSite attribute. defaults to `lax`. can be set to "lax", "strict", "none" and "disabled" -;cookie_samesite = lax - -# set to true if you want to allow browsers to render Grafana in a <frame>, <iframe>, <embed> or <object>. default is false. -;allow_embedding = false - -# Set to true if you want to enable http strict transport security (HSTS) response header. -# This is only sent when HTTPS is enabled in this configuration. -# HSTS tells browsers that the site should only be accessed using HTTPS. -;strict_transport_security = false - -# Sets how long a browser should cache HSTS. Only applied if strict_transport_security is enabled. -;strict_transport_security_max_age_seconds = 86400 - -# Set to true if to enable HSTS preloading option. Only applied if strict_transport_security is enabled. -;strict_transport_security_preload = false - -# Set to true if to enable the HSTS includeSubDomains option. Only applied if strict_transport_security is enabled. -;strict_transport_security_subdomains = false - -# Set to true to enable the X-Content-Type-Options response header. -# The X-Content-Type-Options response HTTP header is a marker used by the server to indicate that the MIME types advertised -# in the Content-Type headers should not be changed and be followed. -;x_content_type_options = true - -# Set to true to enable the X-XSS-Protection header, which tells browsers to stop pages from loading -# when they detect reflected cross-site scripting (XSS) attacks. -;x_xss_protection = true - -# Enable adding the Content-Security-Policy header to your requests. -# CSP allows to control resources the user agent is allowed to load and helps prevent XSS attacks. -;content_security_policy = false - -# Set Content Security Policy template used when adding the Content-Security-Policy header to your requests. -# $NONCE in the template includes a random nonce. -# $ROOT_PATH is server.root_url without the protocol. -;content_security_policy_template = """script-src 'self' 'unsafe-eval' 'unsafe-inline' 'strict-dynamic' $NONCE;object-src 'none';font-src 'self';style-src 'self' 'unsafe-inline' blob:;img-src * data:;base-uri 'self';connect-src 'self' grafana.com ws://$ROOT_PATH wss://$ROOT_PATH;manifest-src 'self';media-src 'none';form-action 'self';""" - -#################################### Snapshots ########################### -[snapshots] -# snapshot sharing options -;external_enabled = true -;external_snapshot_url = https://snapshots-origin.raintank.io -;external_snapshot_name = Publish to snapshot.raintank.io - -# Set to true to enable this Grafana instance act as an external snapshot server and allow unauthenticated requests for -# creating and deleting snapshots. -;public_mode = false - -# remove expired snapshot -;snapshot_remove_expired = true - -#################################### Dashboards History ################## -[dashboards] -# Number dashboard versions to keep (per dashboard). Default: 20, Minimum: 1 -;versions_to_keep = 20 - -# Minimum dashboard refresh interval. When set, this will restrict users to set the refresh interval of a dashboard lower than given interval. Per default this is 5 seconds. -# The interval string is a possibly signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s or 1m. -;min_refresh_interval = 5s - -# Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json" -default_home_dashboard_path = /var/lib/grafana/dashboards/home.json - -#################################### Users ############################### -[users] -# disable user signup / registration -;allow_sign_up = true - -# Allow non admin users to create organizations -;allow_org_create = true - -# Set to true to automatically assign new users to the default organization (id 1) -;auto_assign_org = true - -# Set this value to automatically add new users to the provided organization (if auto_assign_org above is set to true) -;auto_assign_org_id = 1 - -# Default role new users will be automatically assigned (if disabled above is set to true) -;auto_assign_org_role = Viewer - -# Require email validation before sign up completes -;verify_email_enabled = false - -# Background text for the user field on the login page -;login_hint = email or username -;password_hint = password - -# Default UI theme ("dark" or "light") -;default_theme = dark - -# Path to a custom home page. Users are only redirected to this if the default home dashboard is used. It should match a frontend route and contain a leading slash. -; home_page = - -# External user management, these options affect the organization users view -;external_manage_link_url = -;external_manage_link_name = -;external_manage_info = - -# Viewers can edit/inspect dashboard settings in the browser. But not save the dashboard. -;viewers_can_edit = false - -# Editors can administrate dashboard, folders and teams they create -;editors_can_admin = false - -# The duration in time a user invitation remains valid before expiring. This setting should be expressed as a duration. Examples: 6h (hours), 2d (days), 1w (week). Default is 24h (24 hours). The minimum supported duration is 15m (15 minutes). -;user_invite_max_lifetime_duration = 24h - -# Enter a comma-separated list of users login to hide them in the Grafana UI. These users are shown to Grafana admins and themselves. -; hidden_users = - -[auth] -# Login cookie name -;login_cookie_name = grafana_session - -# The maximum lifetime (duration) an authenticated user can be inactive before being required to login at next visit. Default is 7 days (7d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). The lifetime resets at each successful token rotation. -;login_maximum_inactive_lifetime_duration = - -# The maximum lifetime (duration) an authenticated user can be logged in since login time before being required to login. Default is 30 days (30d). This setting should be expressed as a duration, e.g. 5m (minutes), 6h (hours), 10d (days), 2w (weeks), 1M (month). -;login_maximum_lifetime_duration = - -# How often should auth tokens be rotated for authenticated users when being active. The default is each 10 minutes. -;token_rotation_interval_minutes = 10 - -# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false -;disable_login_form = false - -# Set to true to disable the sign out link in the side menu. Useful if you use auth.proxy or auth.jwt, defaults to false -;disable_signout_menu = false - -# URL to redirect the user to after sign out -;signout_redirect_url = - -# Set to true to attempt login with OAuth automatically, skipping the login screen. -# This setting is ignored if multiple OAuth providers are configured. -;oauth_auto_login = false - -# OAuth state max age cookie duration in seconds. Defaults to 600 seconds. -;oauth_state_cookie_max_age = 600 - -# limit of api_key seconds to live before expiration -;api_key_max_seconds_to_live = -1 - -# Set to true to enable SigV4 authentication option for HTTP-based datasources. -;sigv4_auth_enabled = false - -#################################### Anonymous Auth ###################### -[auth.anonymous] -# enable anonymous access -enabled = true - -# specify organization name that should be used for unauthenticated users -;org_name = Main Org. - -# specify role for unauthenticated users -;org_role = Viewer - -# mask the Grafana version number for unauthenticated users -;hide_version = false - -#################################### GitHub Auth ########################## -[auth.github] -;enabled = false -;allow_sign_up = true -;client_id = some_id -;client_secret = some_secret -;scopes = user:email,read:org -;auth_url = https://github.com/login/oauth/authorize -;token_url = https://github.com/login/oauth/access_token -;api_url = https://api.github.com/user -;allowed_domains = -;team_ids = -;allowed_organizations = - -#################################### GitLab Auth ######################### -[auth.gitlab] -;enabled = false -;allow_sign_up = true -;client_id = some_id -;client_secret = some_secret -;scopes = api -;auth_url = https://gitlab.com/oauth/authorize -;token_url = https://gitlab.com/oauth/token -;api_url = https://gitlab.com/api/v4 -;allowed_domains = -;allowed_groups = - -#################################### Google Auth ########################## -[auth.google] -;enabled = false -;allow_sign_up = true -;client_id = some_client_id -;client_secret = some_client_secret -;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email -;auth_url = https://accounts.google.com/o/oauth2/auth -;token_url = https://accounts.google.com/o/oauth2/token -;api_url = https://www.googleapis.com/oauth2/v1/userinfo -;allowed_domains = -;hosted_domain = - -#################################### Grafana.com Auth #################### -[auth.grafana_com] -;enabled = false -;allow_sign_up = true -;client_id = some_id -;client_secret = some_secret -;scopes = user:email -;allowed_organizations = - -#################################### Azure AD OAuth ####################### -[auth.azuread] -;name = Azure AD -;enabled = false -;allow_sign_up = true -;client_id = some_client_id -;client_secret = some_client_secret -;scopes = openid email profile -;auth_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/authorize -;token_url = https://login.microsoftonline.com/<tenant-id>/oauth2/v2.0/token -;allowed_domains = -;allowed_groups = - -#################################### Okta OAuth ####################### -[auth.okta] -;name = Okta -;enabled = false -;allow_sign_up = true -;client_id = some_id -;client_secret = some_secret -;scopes = openid profile email groups -;auth_url = https://<tenant-id>.okta.com/oauth2/v1/authorize -;token_url = https://<tenant-id>.okta.com/oauth2/v1/token -;api_url = https://<tenant-id>.okta.com/oauth2/v1/userinfo -;allowed_domains = -;allowed_groups = -;role_attribute_path = -;role_attribute_strict = false - -#################################### Generic OAuth ########################## -[auth.generic_oauth] -;enabled = false -;name = OAuth -;allow_sign_up = true -;client_id = some_id -;client_secret = some_secret -;scopes = user:email,read:org -;empty_scopes = false -;email_attribute_name = email:primary -;email_attribute_path = -;login_attribute_path = -;name_attribute_path = -;id_token_attribute_name = -;auth_url = https://foo.bar/login/oauth/authorize -;token_url = https://foo.bar/login/oauth/access_token -;api_url = https://foo.bar/user -;allowed_domains = -;team_ids = -;allowed_organizations = -;role_attribute_path = -;role_attribute_strict = false -;groups_attribute_path = -;tls_skip_verify_insecure = false -;tls_client_cert = -;tls_client_key = -;tls_client_ca = - -#################################### Basic Auth ########################## -[auth.basic] -;enabled = true - -#################################### Auth Proxy ########################## -[auth.proxy] -;enabled = false -;header_name = X-WEBAUTH-USER -;header_property = username -;auto_sign_up = true -;sync_ttl = 60 -;whitelist = 192.168.1.1, 192.168.2.1 -;headers = Email:X-User-Email, Name:X-User-Name -# Read the auth proxy docs for details on what the setting below enables -;enable_login_token = false - -#################################### Auth JWT ########################## -[auth.jwt] -;enabled = true -;header_name = X-JWT-Assertion -;email_claim = sub -;username_claim = sub -;jwk_set_url = https://foo.bar/.well-known/jwks.json -;jwk_set_file = /path/to/jwks.json -;cache_ttl = 60m -;expected_claims = {"aud": ["foo", "bar"]} -;key_file = /path/to/key/file - -#################################### Auth LDAP ########################## -[auth.ldap] -;enabled = false -;config_file = /etc/grafana/ldap.toml -;allow_sign_up = true - -# LDAP background sync (Enterprise only) -# At 1 am every day -;sync_cron = "0 0 1 * * *" -;active_sync_enabled = true - -#################################### AWS ########################### -[aws] -# Enter a comma-separated list of allowed AWS authentication providers. -# Options are: default (AWS SDK Default), keys (Access && secret key), credentials (Credentials field), ec2_iam_role (EC2 IAM Role) -; allowed_auth_providers = default,keys,credentials - -# Allow AWS users to assume a role using temporary security credentials. -# If true, assume role will be enabled for all AWS authentication providers that are specified in aws_auth_providers -; assume_role_enabled = true - -#################################### Azure ############################### -[azure] -# Azure cloud environment where Grafana is hosted -# Possible values are AzureCloud, AzureChinaCloud, AzureUSGovernment and AzureGermanCloud -# Default value is AzureCloud (i.e. public cloud) -;cloud = AzureCloud - -# Specifies whether Grafana hosted in Azure service with Managed Identity configured (e.g. Azure Virtual Machines instance) -# If enabled, the managed identity can be used for authentication of Grafana in Azure services -# Disabled by default, needs to be explicitly enabled -;managed_identity_enabled = false - -# Client ID to use for user-assigned managed identity -# Should be set for user-assigned identity and should be empty for system-assigned identity -;managed_identity_client_id = - -#################################### SMTP / Emailing ########################## -[smtp] -;enabled = false -;host = localhost:25 -;user = -# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;""" -;password = -;cert_file = -;key_file = -;skip_verify = false -;from_address = admin@grafana.localhost -;from_name = Grafana -# EHLO identity in SMTP dialog (defaults to instance_name) -;ehlo_identity = dashboard.example.com -# SMTP startTLS policy (defaults to 'OpportunisticStartTLS') -;startTLS_policy = NoStartTLS - -[emails] -;welcome_email_on_sign_up = false -;templates_pattern = emails/*.html, emails/*.txt -;content_types = text/html - -#################################### Logging ########################## -[log] -# Either "console", "file", "syslog". Default is console and file -# Use space to separate multiple modes, e.g. "console file" -;mode = console file - -# Either "debug", "info", "warn", "error", "critical", default is "info" -;level = info - -# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug -;filters = - -# For "console" mode only -[log.console] -;level = - -# log line format, valid options are text, console and json -;format = console - -# For "file" mode only -[log.file] -;level = - -# log line format, valid options are text, console and json -;format = text - -# This enables automated log rotate(switch of following options), default is true -;log_rotate = true - -# Max line number of single file, default is 1000000 -;max_lines = 1000000 - -# Max size shift of single file, default is 28 means 1 << 28, 256MB -;max_size_shift = 28 - -# Segment log daily, default is true -;daily_rotate = true - -# Expired days of log file(delete after max days), default is 7 -;max_days = 7 - -[log.syslog] -;level = - -# log line format, valid options are text, console and json -;format = text - -# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. -;network = -;address = - -# Syslog facility. user, daemon and local0 through local7 are valid. -;facility = - -# Syslog tag. By default, the process' argv[0] is used. -;tag = - -[log.frontend] -# Should Sentry javascript agent be initialized -;enabled = false - -# Sentry DSN if you want to send events to Sentry. -;sentry_dsn = - -# Custom HTTP endpoint to send events captured by the Sentry agent to. Default will log the events to stdout. -;custom_endpoint = /log - -# Rate of events to be reported between 0 (none) and 1 (all), float -;sample_rate = 1.0 - -# Requests per second limit enforced an extended period, for Grafana backend log ingestion endpoint (/log). -;log_endpoint_requests_per_second_limit = 3 - -# Max requests accepted per short interval of time for Grafana backend log ingestion endpoint (/log). -;log_endpoint_burst_limit = 15 - -#################################### Usage Quotas ######################## -[quota] -; enabled = false - -#### set quotas to -1 to make unlimited. #### -# limit number of users per Org. -; org_user = 10 - -# limit number of dashboards per Org. -; org_dashboard = 100 - -# limit number of data_sources per Org. -; org_data_source = 10 - -# limit number of api_keys per Org. -; org_api_key = 10 - -# limit number of alerts per Org. -;org_alert_rule = 100 - -# limit number of orgs a user can create. -; user_org = 10 - -# Global limit of users. -; global_user = -1 - -# global limit of orgs. -; global_org = -1 - -# global limit of dashboards -; global_dashboard = -1 - -# global limit of api_keys -; global_api_key = -1 - -# global limit on number of logged in users. -; global_session = -1 - -# global limit of alerts -;global_alert_rule = -1 - -#################################### Alerting ############################ -[alerting] -# Disable alerting engine & UI features -;enabled = true -# Makes it possible to turn off alert rule execution but alerting UI is visible -;execute_alerts = true - -# Default setting for new alert rules. Defaults to categorize error and timeouts as alerting. (alerting, keep_state) -;error_or_timeout = alerting - -# Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok) -;nodata_or_nullvalues = no_data - -# Alert notifications can include images, but rendering many images at the same time can overload the server -# This limit will protect the server from render overloading and make sure notifications are sent out quickly -;concurrent_render_limit = 5 - - -# Default setting for alert calculation timeout. Default value is 30 -;evaluation_timeout_seconds = 30 - -# Default setting for alert notification timeout. Default value is 30 -;notification_timeout_seconds = 30 - -# Default setting for max attempts to sending alert notifications. Default value is 3 -;max_attempts = 3 - -# Makes it possible to enforce a minimal interval between evaluations, to reduce load on the backend -;min_interval_seconds = 1 - -# Configures for how long alert annotations are stored. Default is 0, which keeps them forever. -# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month). -;max_annotation_age = - -# Configures max number of alert annotations that Grafana stores. Default value is 0, which keeps all alert annotations. -;max_annotations_to_keep = - -#################################### Annotations ######################### -[annotations] -# Configures the batch size for the annotation clean-up job. This setting is used for dashboard, API, and alert annotations. -;cleanupjob_batchsize = 100 - -[annotations.dashboard] -# Dashboard annotations means that annotations are associated with the dashboard they are created on. - -# Configures how long dashboard annotations are stored. Default is 0, which keeps them forever. -# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month). -;max_age = - -# Configures max number of dashboard annotations that Grafana stores. Default value is 0, which keeps all dashboard annotations. -;max_annotations_to_keep = - -[annotations.api] -# API annotations means that the annotations have been created using the API without any -# association with a dashboard. - -# Configures how long Grafana stores API annotations. Default is 0, which keeps them forever. -# This setting should be expressed as a duration. Examples: 6h (hours), 10d (days), 2w (weeks), 1M (month). -;max_age = - -# Configures max number of API annotations that Grafana keeps. Default value is 0, which keeps all API annotations. -;max_annotations_to_keep = - -#################################### Explore ############################# -[explore] -# Enable the Explore section -;enabled = true - -#################################### Internal Grafana Metrics ########################## -# Metrics available at HTTP API Url /metrics -[metrics] -# Disable / Enable internal metrics -;enabled = true -# Graphite Publish interval -;interval_seconds = 10 -# Disable total stats (stat_totals_*) metrics to be generated -;disable_total_stats = false - -#If both are set, basic auth will be required for the metrics endpoint. -; basic_auth_username = -; basic_auth_password = - -# Metrics environment info adds dimensions to the `grafana_environment_info` metric, which -# can expose more information about the Grafana instance. -[metrics.environment_info] -#exampleLabel1 = exampleValue1 -#exampleLabel2 = exampleValue2 - -# Send internal metrics to Graphite -[metrics.graphite] -# Enable by setting the address setting (ex localhost:2003) -;address = -;prefix = prod.grafana.%(instance_name)s. - -#################################### Grafana.com integration ########################## -# Url used to import dashboards directly from Grafana.com -[grafana_com] -;url = https://grafana.com - -#################################### Distributed tracing ############ -[tracing.jaeger] -# Enable by setting the address sending traces to jaeger (ex localhost:6831) -;address = localhost:6831 -# Tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2) -;always_included_tag = tag1:value1 -# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote -;sampler_type = const -# jaeger samplerconfig param -# for "const" sampler, 0 or 1 for always false/true respectively -# for "probabilistic" sampler, a probability between 0 and 1 -# for "rateLimiting" sampler, the number of spans per second -# for "remote" sampler, param is the same as for "probabilistic" -# and indicates the initial sampling rate before the actual one -# is received from the mothership -;sampler_param = 1 -# sampling_server_url is the URL of a sampling manager providing a sampling strategy. -;sampling_server_url = -# Whether or not to use Zipkin propagation (x-b3- HTTP headers). -;zipkin_propagation = false -# Setting this to true disables shared RPC spans. -# Not disabling is the most common setting when using Zipkin elsewhere in your infrastructure. -;disable_shared_zipkin_spans = false - -#################################### External image storage ########################## -[external_image_storage] -# Used for uploading images to public servers so they can be included in slack/email messages. -# you can choose between (s3, webdav, gcs, azure_blob, local) -;provider = - -[external_image_storage.s3] -;endpoint = -;path_style_access = -;bucket = -;region = -;path = -;access_key = -;secret_key = - -[external_image_storage.webdav] -;url = -;public_url = -;username = -;password = - -[external_image_storage.gcs] -;key_file = -;bucket = -;path = - -[external_image_storage.azure_blob] -;account_name = -;account_key = -;container_name = - -[external_image_storage.local] -# does not require any configuration - -[rendering] -# Options to configure a remote HTTP image rendering service, e.g. using https://github.com/grafana/grafana-image-renderer. -# URL to a remote HTTP image renderer service, e.g. http://localhost:8081/render, will enable Grafana to render panels and dashboards to PNG-images using HTTP requests to an external service. -;server_url = -# If the remote HTTP image renderer service runs on a different server than the Grafana server you may have to configure this to a URL where Grafana is reachable, e.g. http://grafana.domain/. -;callback_url = -# Concurrent render request limit affects when the /render HTTP endpoint is used. Rendering many images at the same time can overload the server, -# which this setting can help protect against by only allowing a certain amount of concurrent requests. -;concurrent_render_request_limit = 30 - -[panels] -# If set to true Grafana will allow script tags in text panels. Not recommended as it enable XSS vulnerabilities. - -# enable this to allow us to create mash ups with other pages -disable_sanitize_html = true - -[plugins] -;enable_alpha = false -;app_tls_skip_verify_insecure = false -# Enter a comma-separated list of plugin identifiers to identify plugins to load even if they are unsigned. Plugins with modified signatures are never loaded. -;allow_loading_unsigned_plugins = -# Enable or disable installing plugins directly from within Grafana. -;plugin_admin_enabled = false -;plugin_admin_external_manage_enabled = false -;plugin_catalog_url = https://grafana.com/grafana/plugins/ - -#################################### Grafana Live ########################################## -[live] -# max_connections to Grafana Live WebSocket endpoint per Grafana server instance. See Grafana Live docs -# if you are planning to make it higher than default 100 since this can require some OS and infrastructure -# tuning. 0 disables Live, -1 means unlimited connections. -;max_connections = 100 - -# allowed_origins is a comma-separated list of origins that can establish connection with Grafana Live. -# If not set then origin will be matched over root_url. Supports wildcard symbol "*". -;allowed_origins = - -# engine defines an HA (high availability) engine to use for Grafana Live. By default no engine used - in -# this case Live features work only on a single Grafana server. Available options: "redis". -# Setting ha_engine is an EXPERIMENTAL feature. -;ha_engine = - -# ha_engine_address sets a connection address for Live HA engine. Depending on engine type address format can differ. -# For now we only support Redis connection address in "host:port" format. -# This option is EXPERIMENTAL. -;ha_engine_address = "127.0.0.1:6379" - -#################################### Grafana Image Renderer Plugin ########################## -[plugin.grafana-image-renderer] -# Instruct headless browser instance to use a default timezone when not provided by Grafana, e.g. when rendering panel image of alert. -# See ICU’s metaZones.txt (https://cs.chromium.org/chromium/src/third_party/icu/source/data/misc/metaZones.txt) for a list of supported -# timezone IDs. Fallbacks to TZ environment variable if not set. -;rendering_timezone = - -# Instruct headless browser instance to use a default language when not provided by Grafana, e.g. when rendering panel image of alert. -# Please refer to the HTTP header Accept-Language to understand how to format this value, e.g. 'fr-CH, fr;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5'. -;rendering_language = - -# Instruct headless browser instance to use a default device scale factor when not provided by Grafana, e.g. when rendering panel image of alert. -# Default is 1. Using a higher value will produce more detailed images (higher DPI), but will require more disk space to store an image. -;rendering_viewport_device_scale_factor = - -# Instruct headless browser instance whether to ignore HTTPS errors during navigation. Per default HTTPS errors are not ignored. Due to -# the security risk it's not recommended to ignore HTTPS errors. -;rendering_ignore_https_errors = - -# Instruct headless browser instance whether to capture and log verbose information when rendering an image. Default is false and will -# only capture and log error messages. When enabled, debug messages are captured and logged as well. -# For the verbose information to be included in the Grafana server log you have to adjust the rendering log level to debug, configure -# [log].filter = rendering:debug. -;rendering_verbose_logging = - -# Instruct headless browser instance whether to output its debug and error messages into running process of remote rendering service. -# Default is false. This can be useful to enable (true) when troubleshooting. -;rendering_dumpio = - -# Additional arguments to pass to the headless browser instance. Default is --no-sandbox. The list of Chromium flags can be found -# here (https://peter.sh/experiments/chromium-command-line-switches/). Multiple arguments is separated with comma-character. -;rendering_args = - -# You can configure the plugin to use a different browser binary instead of the pre-packaged version of Chromium. -# Please note that this is not recommended, since you may encounter problems if the installed version of Chrome/Chromium is not -# compatible with the plugin. -;rendering_chrome_bin = - -# Instruct how headless browser instances are created. Default is 'default' and will create a new browser instance on each request. -# Mode 'clustered' will make sure that only a maximum of browsers/incognito pages can execute concurrently. -# Mode 'reusable' will have one browser instance and will create a new incognito page on each request. -;rendering_mode = - -# When rendering_mode = clustered you can instruct how many browsers or incognito pages can execute concurrently. Default is 'browser' -# and will cluster using browser instances. -# Mode 'context' will cluster using incognito pages. -;rendering_clustering_mode = -# When rendering_mode = clustered you can define maximum number of browser instances/incognito pages that can execute concurrently.. -;rendering_clustering_max_concurrency = - -# Limit the maximum viewport width, height and device scale factor that can be requested. -;rendering_viewport_max_width = -;rendering_viewport_max_height = -;rendering_viewport_max_device_scale_factor = - -# Change the listening host and port of the gRPC server. Default host is 127.0.0.1 and default port is 0 and will automatically assign -# a port not in use. -;grpc_host = -;grpc_port = - -[enterprise] -# Path to a valid Grafana Enterprise license.jwt file -;license_path = - -[feature_toggles] -# enable features, separated by spaces -enable = ngalert - -[date_formats] -# For information on what formatting patterns that are supported https://momentjs.com/docs/#/displaying/ - -# Default system date format used in time range picker and other places where full time is displayed -;full_date = YYYY-MM-DD HH:mm:ss - -# Used by graph and other places where we only show small intervals -;interval_second = HH:mm:ss -;interval_minute = HH:mm -;interval_hour = MM/DD HH:mm -;interval_day = MM/DD -;interval_month = YYYY-MM -;interval_year = YYYY - -# Experimental feature -;use_browser_locale = false - -# Default timezone for user preferences. Options are 'browser' for the browser local timezone or a timezone name from IANA Time Zone database, e.g. 'UTC' or 'Europe/Amsterdam' etc. -;default_timezone = browser - -[expressions] -# Enable or disable the expressions functionality. -;enabled = true - -[geomap] -# Set the JSON configuration for the default basemap -;default_baselayer_config = `{ -; "type": "xyz", -; "config": { -; "attribution": "Open street map", -; "url": "https://tile.openstreetmap.org/{z}/{x}/{y}.png" -; } -;}` - -# Enable or disable loading other base map layers -;enable_custom_baselayers = true diff --git a/docker-compose/grafana/import-rules.py b/docker-compose/grafana/import-rules.py deleted file mode 100755 index 340215ce1e53744aef3a2722f69c3ecdfd28ca82..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/import-rules.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/python3 -import json -import os -import argparse - -parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description= -""" -Generate rule import files and script for Grafana. - -This script expands a given rules.json file into individual rules and -prints the bash commands to import them in Grafana. - -To export rules from Grafana, use - curl <grafana>/api/ruler/grafana/api/v1/rules > rules.json -""") -parser.add_argument( - '-c', '--alert-config-file', type=str, required=False, help="Input alertmanager configuration JSON to parse, output of 'curl <grafana>/api/ruler/grafana/api/v1/rules' [%(default)s]") -parser.add_argument( - '-r', '--rules-file', type=str, required=True, help="Input rules JSON to parse, output of 'curl <grafana>/api/ruler/grafana/api/v1/rules' [%(default)s]") -parser.add_argument( - '-o', '--output-dir', type=str, default="rules", help="Directory to store the output [%(default)s]") -parser.add_argument( - '-B', '--authorization-bearer', type=str, default="abcdefghijklmnopqrstuvwxyz", help="Authorization bearer from the Grafana 'editor' API key [%(default)s]") -parser.add_argument( - '-g', '--grafana_url', type=str, default="http://localhost:3000", help="Base URL of Grafana [%(default)s]") -parser.add_argument( - '-u', '--update', default=False, action='store_true', help="Update existing alerts, instead of creating new ones [%(default)s]") - -args = parser.parse_args() - -if args.alert_config_file: - print(f"echo Importing alert configuration file {args.alert_config_file}") - print(f"curl -X POST {args.grafana_url}/api/alertmanager/grafana/config/api/v1/alerts -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Authorization: Bearer {args.authorization_bearer}' -d '@{args.alert_config_file}'") - print(f"echo ''") - -with open(args.rules_file) as f: - data=json.load(f) - - try: - os.mkdir(args.output_dir) - except FileExistsError as e: - pass - - # the rules are of format {"folder": [{alert}, {alert}] } - for folder, rules in data.items(): - try: - os.mkdir(f"{args.output_dir}/{folder}") - except FileExistsError as e: - pass - - # print command to create folder - payload = json.dumps({"title": folder}) - print(f"echo Creating folder {folder}") - print(f"curl -X POST {args.grafana_url}/api/folders -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Authorization: Bearer {args.authorization_bearer}' -d '{payload}'") - print(f"echo ''") - - for rule in rules: - rule_filename = f"{args.output_dir}/{folder}/{rule['name']}.json" - - if not args.update: - # strip rule UIDs - for subrule in rule["rules"]: - del subrule["grafana_alert"]["uid"] - - # dump this rule - with open(rule_filename, "w") as rule_file: - json.dump(rule, rule_file) - - # print import statement for this rule - print(f"echo Processing rule {folder}/{rule['name']}") - print(f"curl -X POST {args.grafana_url}/api/ruler/grafana/api/v1/rules/{folder} -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Authorization: Bearer {args.authorization_bearer}' -d '@{rule_filename}'") - print(f"echo ''") diff --git a/docker-compose/grafana/rules.json b/docker-compose/grafana/rules.json deleted file mode 100644 index b870d4cbc2b8e5708213fd4e4f3d1c5501d4467d..0000000000000000000000000000000000000000 --- a/docker-compose/grafana/rules.json +++ /dev/null @@ -1 +0,0 @@ -{"station":[{"name":"FPGA processing error","interval":"10s","rules":[{"expr":"","for":"20s","labels":{"severity":"major"},"annotations":{"__dashboardUid__":"nC8N_kO7k","__panelId__":"9","summary":"One or more FPGAs are unusable."},"grafana_alert":{"id":1,"orgId":1,"title":"FPGA processing error","condition":"B","data":[{"refId":"A","queryType":"","relativeTimeRange":{"from":600,"to":0},"datasourceUid":"timescaledb","model":{"format":"time_series","group":[],"hide":false,"intervalMs":1000,"maxDataPoints":43200,"metricColumn":"none","rawQuery":true,"rawSql":"SELECT\n $__timeGroup(data_time, $__interval),\n x::text,\n device,\n name,\n value\nFROM lofar_array_boolean\nWHERE\n $__timeFilter(data_time) AND\n name = 'fpga_error_r'\nORDER BY 1,2","refId":"A","select":[[{"params":["value_r"],"type":"column"}]],"table":"att_scalar_devdouble","timeColumn":"data_time","timeColumnType":"timestamp","where":[{"name":"$__timeFilter","params":[],"type":"macro"}]}},{"refId":"B","queryType":"","relativeTimeRange":{"from":0,"to":0},"datasourceUid":"-100","model":{"conditions":[{"evaluator":{"params":[0],"type":"gt"},"operator":{"type":"and"},"query":{"params":["A"]},"reducer":{"params":[],"type":"last"},"type":"query"}],"datasource":{"type":"__expr__","uid":"-100"},"expression":"A","hide":false,"intervalMs":1000,"maxDataPoints":43200,"reducer":"last","refId":"B","settings":{"mode":"dropNN"},"type":"reduce"}}],"updated":"2022-04-04T18:01:53Z","intervalSeconds":10,"version":3,"uid":"kujybCynk","namespace_uid":"R_jsbCynz","namespace_id":6,"rule_group":"FPGA processing error","no_data_state":"NoData","exec_err_state":"Alerting"}}]}]} \ No newline at end of file diff --git a/docker-compose/jupyter-lab.yml b/docker-compose/jupyter-lab.yml new file mode 100644 index 0000000000000000000000000000000000000000..2c5984dc6eac7327f0af29a9251e4834a7f9c2ff --- /dev/null +++ b/docker-compose/jupyter-lab.yml @@ -0,0 +1,42 @@ +# +# Docker compose file that launches Jupyter Lab for interactive iTango sessions over HTTP. +# +# Connect by surfing to http://localhost:8889/ +# View logs through 'docker logs -f -t jupyter-lab' +# +# Defines: +# - jupyter-lab: Jupyter Lab with iTango support +# + +version: '2.1' + +services: + jupyter-lab: + build: + context: jupyterlab + args: + CONTAINER_EXECUTION_UID: ${CONTAINER_EXECUTION_UID} + SOURCE_IMAGE: ${LOCAL_DOCKER_REGISTRY_HOST}/${LOCAL_DOCKER_REGISTRY_USER}/tango-itango:${TANGO_ITANGO_VERSION} + container_name: ${CONTAINER_NAME_PREFIX}jupyter-lab + logging: + driver: "json-file" + options: + max-size: "100m" + max-file: "10" + networks: + - control + volumes: + - ..:/opt/lofar/tango:rw + - ../jupyter-notebooks:/jupyter-notebooks:rw + - ${HOME}:/hosthome + - ${SCRATCH}:/scratch:rw + environment: + - TANGO_HOST=${TANGO_HOST} + ports: + - "8889:8889" + user: ${CONTAINER_EXECUTION_UID} + working_dir: /jupyter-notebooks + entrypoint: + - /opt/lofar/tango/bin/start-ds.sh + - jupyter lab --port=8889 --no-browser --ip=0.0.0.0 --allow-root --NotebookApp.token= --NotebookApp.password= + restart: unless-stopped diff --git a/docker-compose/jupyterlab/Dockerfile b/docker-compose/jupyterlab/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..448d9a7928ed879b285baf75e02a2c1604a433da --- /dev/null +++ b/docker-compose/jupyterlab/Dockerfile @@ -0,0 +1,61 @@ +ARG SOURCE_IMAGE +FROM ${SOURCE_IMAGE} + +# UID if the user that this container will run under. This is needed to give directories +# that are needed for temporary storage the proper owner and access rights. +ARG CONTAINER_EXECUTION_UID=1000 + +# Create new user with uid but only if uid not used +RUN sudo adduser --disabled-password --system --uid ${CONTAINER_EXECUTION_UID} --no-create-home --home ${HOME} user || exit 0 +RUN sudo chown ${CONTAINER_EXECUTION_UID} -R ${HOME} + +# Add compiler to install python packages which come with C++ code +RUN sudo apt-get update -y +RUN sudo apt-get install -y g++ gcc python3-dev + +# Install git to install pip requirements from git +RUN sudo apt-get install -y git + +# Install dependencies of our scripts (bin/start-ds.sh) +RUN sudo apt-get install -y rsync + +COPY requirements.txt ./ +RUN sudo pip3 install -r requirements.txt + +# Install some version of the casacore measures tables, to allow basic delay computation analysis in the notebooks +RUN sudo apt-get install -y casacore-data + +# see https://github.com/jupyter/nbconvert/issues/1434 +RUN sudo bash -c "echo DEFAULT_ARGS += [\\\"--no-sandbox\\\"] >> /usr/local/lib/python3.7/dist-packages/pyppeteer/launcher.py" +RUN sudo apt-get update -y +RUN sudo apt-get install -y git gconf-service libasound2 libatk1.0-0 libatk-bridge2.0-0 libc6 libcairo2 libcups2 libdbus-1-3 libexpat1 libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 libxcursor1 libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 ca-certificates fonts-liberation libappindicator1 libnss3 lsb-release xdg-utils wget libcairo-gobject2 libxinerama1 libgtk2.0-0 libpangoft2-1.0-0 libthai0 libpixman-1-0 libxcb-render0 libharfbuzz0b libdatrie1 libgraphite2-3 libgbm1 + +# Allow Download as -> PDF via LaTeX +RUN sudo apt-get install -y texlive-xetex texlive-fonts-recommended texlive-latex-recommended cm-super + +# Configure jupyter_bokeh +RUN sudo mkdir -p /usr/share/jupyter /usr/etc +RUN sudo chmod a+rwx /usr/share/jupyter /usr/etc +RUN sudo jupyter nbextension install --sys-prefix --symlink --py jupyter_bokeh +RUN sudo jupyter nbextension enable jupyter_bokeh --py --sys-prefix + +# Install profiles for ipython & jupyter +COPY ipython-profiles /opt/ipython-profiles/ +RUN sudo chown ${CONTAINER_EXECUTION_UID} -R /opt/ipython-profiles +COPY jupyter-kernels /usr/local/share/jupyter/kernels/ + +# Install patched jupyter executable +COPY jupyter-notebook /usr/local/bin/jupyter-notebook + +# Add Tini. Tini operates as a process subreaper for jupyter. This prevents kernel crashes. +ENV TINI_VERSION v0.6.0 +ENV JUPYTER_RUNTIME_DIR=/tmp +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /usr/bin/tini +RUN sudo chmod +x /usr/bin/tini + +USER ${CONTAINER_EXECUTION_UID} +# pyppeteer-install installs in the homedir, so run it as the user that will execute the notebook +RUN pyppeteer-install + +# Enable Jupyter lab +ENV JUPYTER_ENABLE_LAB=yes diff --git a/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/ipython_config.py b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/ipython_config.py new file mode 100644 index 0000000000000000000000000000000000000000..91b04aaa3a20232b60e5ced00a99648891955ce5 --- /dev/null +++ b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/ipython_config.py @@ -0,0 +1,578 @@ +# Configuration file for ipython. + +#------------------------------------------------------------------------------ +# InteractiveShellApp(Configurable) configuration +#------------------------------------------------------------------------------ + +## A Mixin for applications that start InteractiveShell instances. +# +# Provides configurables for loading extensions and executing files as part of +# configuring a Shell environment. +# +# The following methods should be called by the :meth:`initialize` method of the +# subclass: +# +# - :meth:`init_path` +# - :meth:`init_shell` (to be implemented by the subclass) +# - :meth:`init_gui_pylab` +# - :meth:`init_extensions` +# - :meth:`init_code` + +## Execute the given command string. +#c.InteractiveShellApp.code_to_run = '' + +## Run the file referenced by the PYTHONSTARTUP environment variable at IPython +# startup. +#c.InteractiveShellApp.exec_PYTHONSTARTUP = True + +## List of files to run at IPython startup. +#c.InteractiveShellApp.exec_files = [] + +## lines of code to run at IPython startup. +#c.InteractiveShellApp.exec_lines = [] + +## A list of dotted module names of IPython extensions to load. +#c.InteractiveShellApp.extensions = [] + +## dotted module name of an IPython extension to load. +#c.InteractiveShellApp.extra_extension = '' + +## A file to be run +#c.InteractiveShellApp.file_to_run = '' + +## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3', +# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4'). +#c.InteractiveShellApp.gui = None + +## Should variables loaded at startup (by startup files, exec_lines, etc.) be +# hidden from tools like %who? +#c.InteractiveShellApp.hide_initial_ns = True + +## Configure matplotlib for interactive use with the default matplotlib backend. +#c.InteractiveShellApp.matplotlib = None + +## Run the module as a script. +#c.InteractiveShellApp.module_to_run = '' + +## Pre-load matplotlib and numpy for interactive use, selecting a particular +# matplotlib backend and loop integration. +#c.InteractiveShellApp.pylab = None + +## If true, IPython will populate the user namespace with numpy, pylab, etc. and +# an ``import *`` is done from numpy and pylab, when using pylab mode. +# +# When False, pylab mode should not import any names into the user namespace. +#c.InteractiveShellApp.pylab_import_all = True + +## Reraise exceptions encountered loading IPython extensions? +#c.InteractiveShellApp.reraise_ipython_extension_failures = False + +#------------------------------------------------------------------------------ +# Application(SingletonConfigurable) configuration +#------------------------------------------------------------------------------ + +## This is an application. + +## The date format used by logging formatters for %(asctime)s +#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S' + +## The Logging format template +#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s' + +## Set the log level by value or name. +#c.Application.log_level = 30 + +#------------------------------------------------------------------------------ +# BaseIPythonApplication(Application) configuration +#------------------------------------------------------------------------------ + +## IPython: an enhanced interactive Python shell. + +## Whether to create profile dir if it doesn't exist +#c.BaseIPythonApplication.auto_create = False + +## Whether to install the default config files into the profile dir. If a new +# profile is being created, and IPython contains config files for that profile, +# then they will be staged into the new directory. Otherwise, default config +# files will be automatically generated. +#c.BaseIPythonApplication.copy_config_files = False + +## Path to an extra config file to load. +# +# If specified, load this config file in addition to any other IPython config. +#c.BaseIPythonApplication.extra_config_file = '' + +## The name of the IPython directory. This directory is used for logging +# configuration (through profiles), history storage, etc. The default is usually +# $HOME/.ipython. This option can also be specified through the environment +# variable IPYTHONDIR. +#c.BaseIPythonApplication.ipython_dir = '' + +## Whether to overwrite existing config files when copying +#c.BaseIPythonApplication.overwrite = False + +## The IPython profile to use. +#c.BaseIPythonApplication.profile = 'default' + +## Create a massive crash report when IPython encounters what may be an internal +# error. The default is to append a short message to the usual traceback +#c.BaseIPythonApplication.verbose_crash = False + +#------------------------------------------------------------------------------ +# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration +#------------------------------------------------------------------------------ + +## Whether to display a banner upon starting IPython. +#c.TerminalIPythonApp.display_banner = True + +## If a command or file is given via the command-line, e.g. 'ipython foo.py', +# start an interactive shell after executing the file or command. +#c.TerminalIPythonApp.force_interact = False + +## Class to use to instantiate the TerminalInteractiveShell object. Useful for +# custom Frontends +#c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell' + +## Start IPython quickly by skipping the loading of config files. +#c.TerminalIPythonApp.quick = False + +#------------------------------------------------------------------------------ +# InteractiveShell(SingletonConfigurable) configuration +#------------------------------------------------------------------------------ + +## An enhanced, interactive shell for Python. + +## 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run +# interactively (displaying output from expressions). +#c.InteractiveShell.ast_node_interactivity = 'last_expr' + +## A list of ast.NodeTransformer subclass instances, which will be applied to +# user input before code is run. +#c.InteractiveShell.ast_transformers = [] + +## Make IPython automatically call any callable object even if you didn't type +# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically. +# The value can be '0' to disable the feature, '1' for 'smart' autocall, where +# it is not applied if there are no more arguments on the line, and '2' for +# 'full' autocall, where all callable objects are automatically called (even if +# no arguments are present). +#c.InteractiveShell.autocall = 0 + +## Autoindent IPython code entered interactively. +#c.InteractiveShell.autoindent = True + +## Enable magic commands to be called without the leading %. +#c.InteractiveShell.automagic = True + +## The part of the banner to be printed before the profile +#c.InteractiveShell.banner1 = 'Python 3.7.3 (default, Jul 25 2020, 13:03:44) \nType "copyright", "credits" or "license" for more information.\n\nIPython 5.8.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n' + +## The part of the banner to be printed after the profile +#c.InteractiveShell.banner2 = '' + +## Set the size of the output cache. The default is 1000, you can change it +# permanently in your config file. Setting it to 0 completely disables the +# caching system, and the minimum value accepted is 20 (if you provide a value +# less than 20, it is reset to 0 and a warning is issued). This limit is +# defined because otherwise you'll spend more time re-flushing a too small cache +# than working +#c.InteractiveShell.cache_size = 1000 + +## Use colors for displaying information about objects. Because this information +# is passed through a pager (like 'less'), and some pagers get confused with +# color codes, this capability can be turned off. +#c.InteractiveShell.color_info = True + +## Set the color scheme (NoColor, Neutral, Linux, or LightBG). +#c.InteractiveShell.colors = 'Neutral' + +## +#c.InteractiveShell.debug = False + +## **Deprecated** +# +# Will be removed in IPython 6.0 +# +# Enable deep (recursive) reloading by default. IPython can use the deep_reload +# module which reloads changes in modules recursively (it replaces the reload() +# function, so you don't need to change anything to use it). `deep_reload` +# forces a full reload of modules whose code may have changed, which the default +# reload() function does not. When deep_reload is off, IPython will use the +# normal reload(), but deep_reload will still be available as dreload(). +#c.InteractiveShell.deep_reload = False + +## Don't call post-execute functions that have failed in the past. +#c.InteractiveShell.disable_failing_post_execute = False + +## If True, anything that would be passed to the pager will be displayed as +# regular output instead. +#c.InteractiveShell.display_page = False + +## (Provisional API) enables html representation in mime bundles sent to pagers. +#c.InteractiveShell.enable_html_pager = False + +## Total length of command history +#c.InteractiveShell.history_length = 10000 + +## The number of saved history entries to be loaded into the history buffer at +# startup. +#c.InteractiveShell.history_load_length = 1000 + +## +#c.InteractiveShell.ipython_dir = '' + +## Start logging to the given file in append mode. Use `logfile` to specify a log +# file to **overwrite** logs to. +#c.InteractiveShell.logappend = '' + +## The name of the logfile to use. +#c.InteractiveShell.logfile = '' + +## Start logging to the default log file in overwrite mode. Use `logappend` to +# specify a log file to **append** logs to. +#c.InteractiveShell.logstart = False + +## +#c.InteractiveShell.object_info_string_level = 0 + +## Automatically call the pdb debugger after every exception. +#c.InteractiveShell.pdb = False + +## Deprecated since IPython 4.0 and ignored since 5.0, set +# TerminalInteractiveShell.prompts object directly. +#c.InteractiveShell.prompt_in1 = 'In [\\#]: ' + +## Deprecated since IPython 4.0 and ignored since 5.0, set +# TerminalInteractiveShell.prompts object directly. +#c.InteractiveShell.prompt_in2 = ' .\\D.: ' + +## Deprecated since IPython 4.0 and ignored since 5.0, set +# TerminalInteractiveShell.prompts object directly. +#c.InteractiveShell.prompt_out = 'Out[\\#]: ' + +## Deprecated since IPython 4.0 and ignored since 5.0, set +# TerminalInteractiveShell.prompts object directly. +#c.InteractiveShell.prompts_pad_left = True + +## +#c.InteractiveShell.quiet = False + +## +#c.InteractiveShell.separate_in = '\n' + +## +#c.InteractiveShell.separate_out = '' + +## +#c.InteractiveShell.separate_out2 = '' + +## Show rewritten input, e.g. for autocall. +#c.InteractiveShell.show_rewritten_input = True + +## Enables rich html representation of docstrings. (This requires the docrepr +# module). +#c.InteractiveShell.sphinxify_docstring = False + +## +#c.InteractiveShell.wildcards_case_sensitive = True + +## +#c.InteractiveShell.xmode = 'Context' + +#------------------------------------------------------------------------------ +# TerminalInteractiveShell(InteractiveShell) configuration +#------------------------------------------------------------------------------ + +## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix, +# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a +# direct exit without any confirmation. +#c.TerminalInteractiveShell.confirm_exit = True + +## Options for displaying tab completions, 'column', 'multicolumn', and +# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit` +# documentation for more information. +#c.TerminalInteractiveShell.display_completions = 'multicolumn' + +## Shortcut style to use at the prompt. 'vi' or 'emacs'. +#c.TerminalInteractiveShell.editing_mode = 'emacs' + +## Set the editor used by IPython (default to $EDITOR/vi/notepad). +#c.TerminalInteractiveShell.editor = 'vi' + +## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is +# in addition to the F2 binding, which is always enabled. +#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False + +## Highlight matching brackets. +#c.TerminalInteractiveShell.highlight_matching_brackets = True + +## The name or class of a Pygments style to use for syntax highlighting. To see +# available styles, run `pygmentize -L styles`. +#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined + +## Override highlighting format for specific tokens +#c.TerminalInteractiveShell.highlighting_style_overrides = {} + +## Enable mouse support in the prompt +#c.TerminalInteractiveShell.mouse_support = False + +## Class used to generate Prompt token for prompt_toolkit +#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts' + +## Use `raw_input` for the REPL, without completion and prompt colors. +# +# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. +# Known usage are: IPython own testing machinery, and emacs inferior-shell +# integration through elpy. +# +# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment +# variable is set, or the current terminal is not a tty. +#c.TerminalInteractiveShell.simple_prompt = False + +## Number of line at the bottom of the screen to reserve for the completion menu +#c.TerminalInteractiveShell.space_for_menu = 6 + +## Automatically set the terminal title +#c.TerminalInteractiveShell.term_title = True + +## Use 24bit colors instead of 256 colors in prompt highlighting. If your +# terminal supports true color, the following command should print 'TRUECOLOR' +# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n" +#c.TerminalInteractiveShell.true_color = False + +#------------------------------------------------------------------------------ +# HistoryAccessor(HistoryAccessorBase) configuration +#------------------------------------------------------------------------------ + +## Access the history database without adding to it. +# +# This is intended for use by standalone history tools. IPython shells use +# HistoryManager, below, which is a subclass of this. + +## Options for configuring the SQLite connection +# +# These options are passed as keyword args to sqlite3.connect when establishing +# database conenctions. +#c.HistoryAccessor.connection_options = {} + +## enable the SQLite history +# +# set enabled=False to disable the SQLite history, in which case there will be +# no stored history, no SQLite connection, and no background saving thread. +# This may be necessary in some threaded environments where IPython is embedded. +#c.HistoryAccessor.enabled = True + +## Path to file to use for SQLite history database. +# +# By default, IPython will put the history database in the IPython profile +# directory. If you would rather share one history among profiles, you can set +# this value in each, so that they are consistent. +# +# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts. +# If you see IPython hanging, try setting this to something on a local disk, +# e.g:: +# +# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite +# +# you can also use the specific value `:memory:` (including the colon at both +# end but not the back ticks), to avoid creating an history file. +#c.HistoryAccessor.hist_file = '' + +#------------------------------------------------------------------------------ +# HistoryManager(HistoryAccessor) configuration +#------------------------------------------------------------------------------ + +## A class to organize all history-related functionality in one place. + +## Write to database every x commands (higher values save disk access & power). +# Values of 1 or less effectively disable caching. +#c.HistoryManager.db_cache_size = 0 + +## Should the history database include output? (default: no) +#c.HistoryManager.db_log_output = False + +#------------------------------------------------------------------------------ +# ProfileDir(LoggingConfigurable) configuration +#------------------------------------------------------------------------------ + +## An object to manage the profile directory and its resources. +# +# The profile directory is used by all IPython applications, to manage +# configuration, logging and security. +# +# This object knows how to find, create and manage these directories. This +# should be used by any code that wants to handle profiles. + +## Set the profile location directly. This overrides the logic used by the +# `profile` option. +#c.ProfileDir.location = '' + +#------------------------------------------------------------------------------ +# BaseFormatter(Configurable) configuration +#------------------------------------------------------------------------------ + +## A base formatter class that is configurable. +# +# This formatter should usually be used as the base class of all formatters. It +# is a traited :class:`Configurable` class and includes an extensible API for +# users to determine how their objects are formatted. The following logic is +# used to find a function to format an given object. +# +# 1. The object is introspected to see if it has a method with the name +# :attr:`print_method`. If is does, that object is passed to that method +# for formatting. +# 2. If no print method is found, three internal dictionaries are consulted +# to find print method: :attr:`singleton_printers`, :attr:`type_printers` +# and :attr:`deferred_printers`. +# +# Users should use these dictionaries to register functions that will be used to +# compute the format data for their objects (if those objects don't have the +# special print methods). The easiest way of using these dictionaries is through +# the :meth:`for_type` and :meth:`for_type_by_name` methods. +# +# If no function/callable is found to compute the format data, ``None`` is +# returned and this format type is not used. + +## +#c.BaseFormatter.deferred_printers = {} + +## +#c.BaseFormatter.enabled = True + +## +#c.BaseFormatter.singleton_printers = {} + +## +#c.BaseFormatter.type_printers = {} + +#------------------------------------------------------------------------------ +# PlainTextFormatter(BaseFormatter) configuration +#------------------------------------------------------------------------------ + +## The default pretty-printer. +# +# This uses :mod:`IPython.lib.pretty` to compute the format data of the object. +# If the object cannot be pretty printed, :func:`repr` is used. See the +# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty +# printers. Here is a simple example:: +# +# def dtype_pprinter(obj, p, cycle): +# if cycle: +# return p.text('dtype(...)') +# if hasattr(obj, 'fields'): +# if obj.fields is None: +# p.text(repr(obj)) +# else: +# p.begin_group(7, 'dtype([') +# for i, field in enumerate(obj.descr): +# if i > 0: +# p.text(',') +# p.breakable() +# p.pretty(field) +# p.end_group(7, '])') + +## +#c.PlainTextFormatter.float_precision = '' + +## Truncate large collections (lists, dicts, tuples, sets) to this size. +# +# Set to 0 to disable truncation. +#c.PlainTextFormatter.max_seq_length = 1000 + +## +#c.PlainTextFormatter.max_width = 79 + +## +#c.PlainTextFormatter.newline = '\n' + +## +#c.PlainTextFormatter.pprint = True + +## +#c.PlainTextFormatter.verbose = False + +#------------------------------------------------------------------------------ +# Completer(Configurable) configuration +#------------------------------------------------------------------------------ + +## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex +# commands, unicode names, and expanding unicode characters back to latex +# commands. +#c.Completer.backslash_combining_completions = True + +## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care +# of with Jedi. +# +# This will enable completion on elements of lists, results of function calls, +# etc., but can be unsafe because the code is actually evaluated on TAB. +#c.Completer.greedy = False + +#------------------------------------------------------------------------------ +# IPCompleter(Completer) configuration +#------------------------------------------------------------------------------ + +## Extension of the completer class with IPython-specific features + +## DEPRECATED as of version 5.0. +# +# Instruct the completer to use __all__ for the completion +# +# Specifically, when completing on ``object.<tab>``. +# +# When True: only those names in obj.__all__ will be included. +# +# When False [default]: the __all__ attribute is ignored +#c.IPCompleter.limit_to__all__ = False + +## Whether to merge completion results into a single list +# +# If False, only the completion results from the first non-empty completer will +# be returned. +#c.IPCompleter.merge_completions = True + +## Instruct the completer to omit private method names +# +# Specifically, when completing on ``object.<tab>``. +# +# When 2 [default]: all names that start with '_' will be excluded. +# +# When 1: all 'magic' names (``__foo__``) will be excluded. +# +# When 0: nothing will be excluded. +#c.IPCompleter.omit__names = 2 + +#------------------------------------------------------------------------------ +# ScriptMagics(Magics) configuration +#------------------------------------------------------------------------------ + +## Magics for talking to scripts +# +# This defines a base `%%script` cell magic for running a cell with a program in +# a subprocess, and registers a few top-level magics that call %%script with +# common interpreters. + +## Extra script cell magics to define +# +# This generates simple wrappers of `%%script foo` as `%%foo`. +# +# If you want to add script magics that aren't on your path, specify them in +# script_paths +#c.ScriptMagics.script_magics = [] + +## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' +# +# Only necessary for items in script_magics where the default path will not find +# the right interpreter. +#c.ScriptMagics.script_paths = {} + +#------------------------------------------------------------------------------ +# StoreMagics(Magics) configuration +#------------------------------------------------------------------------------ + +## Lightweight persistence for python variables. +# +# Provides the %store magic. + +## If True, any %store-d variables will be automatically restored when IPython +# starts. +#c.StoreMagics.autorestore = False diff --git a/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/00-tango.py b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/00-tango.py new file mode 100644 index 0000000000000000000000000000000000000000..38fcb84c3417c6b19d89527be6f8122bd0249765 --- /dev/null +++ b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/00-tango.py @@ -0,0 +1 @@ +from tango import * diff --git a/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py new file mode 100644 index 0000000000000000000000000000000000000000..350ecb1e87f4829ddd60698831bbf75d941782a9 --- /dev/null +++ b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py @@ -0,0 +1,21 @@ +# Create shortcuts for our devices +apsct = DeviceProxy("STAT/APSCT/1") +ccd = DeviceProxy("STAT/CCD/1") +apspu = DeviceProxy("STAT/APSPU/1") +recv = DeviceProxy("STAT/RECV/1") +sdp = DeviceProxy("STAT/SDP/1") +bst = DeviceProxy("STAT/BST/1") +sst = DeviceProxy("STAT/SST/1") +xst = DeviceProxy("STAT/XST/1") +unb2 = DeviceProxy("STAT/UNB2/1") +boot = DeviceProxy("STAT/Boot/1") +tilebeam = DeviceProxy("STAT/TileBeam/1") +psoc = DeviceProxy("STAT/PSOC/1") +beamlet = DeviceProxy("STAT/Beamlet/1") +digitalbeam = DeviceProxy("STAT/DigitalBeam/1") +antennafield = DeviceProxy("STAT/AntennaField/1") +docker = DeviceProxy("STAT/Docker/1") +temperaturemanager = DeviceProxy("STAT/TemperatureManager/1") + +# Put them in a list in case one wants to iterate +devices = [apsct, ccd, apspu, recv, sdp, bst, sst, xst, unb2, boot, tilebeam, beamlet, digitalbeam, antennafield, temperaturemanager, docker] diff --git a/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/02-stationcontrol.py b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/02-stationcontrol.py new file mode 100644 index 0000000000000000000000000000000000000000..d21ed1cf013d73b700cbc72e3d89ef9541efcacc --- /dev/null +++ b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/02-stationcontrol.py @@ -0,0 +1 @@ +import tangostationcontrol diff --git a/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/README.md b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/README.md new file mode 100644 index 0000000000000000000000000000000000000000..61d470004218ae459ce7bfdc974f7c86e0790486 --- /dev/null +++ b/docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/README.md @@ -0,0 +1,11 @@ +This is the IPython startup directory + +.py and .ipy files in this directory will be run *prior* to any code or files specified +via the exec_lines or exec_files configurables whenever you load this profile. + +Files will be run in lexicographical order, so you can control the execution order of files +with a prefix, e.g.:: + + 00-first.py + 50-middle.py + 99-last.ipy diff --git a/docker-compose/jupyterlab/jupyter-kernels/stationcontrol/kernel.json b/docker-compose/jupyterlab/jupyter-kernels/stationcontrol/kernel.json new file mode 100644 index 0000000000000000000000000000000000000000..ff6d4a1a01d0f7bd6eda3a40886eae74b451a5a4 --- /dev/null +++ b/docker-compose/jupyterlab/jupyter-kernels/stationcontrol/kernel.json @@ -0,0 +1,13 @@ + { + "argv": [ + "python", + "-m", + "ipykernel", + "-f", + "{connection_file}", + "--profile-dir", + "/opt/ipython-profiles/stationcontrol-jupyter/" + ], + "language": "python", + "display_name": "StationControl" +} diff --git a/docker-compose/jupyterlab/jupyter-notebook b/docker-compose/jupyterlab/jupyter-notebook new file mode 100755 index 0000000000000000000000000000000000000000..59613a137cc1bb5c86b4cd7c82f3a2cb1f9abde3 --- /dev/null +++ b/docker-compose/jupyterlab/jupyter-notebook @@ -0,0 +1,28 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# An adjustment of the `jupyter-notebook' executable patched to: +# - log to the ELK stack +# +# We go straight for the notebook executable here, as the "jupyter" command +# execvp's into the requested notebook subcommand, erasing all configuration +# we set here. +import re +import sys + +from notebook.notebookapp import main + +from logstash_async.handler import AsynchronousLogstashHandler, LogstashFormatter +import logging + +if __name__ == '__main__': + # log to the tcp_input of logstash in our ELK stack + handler = AsynchronousLogstashHandler("elk", 5959, database_path='/tmp/pending_log_messages.db') + + # add to logger of Jupyter traitlets Application. As that logger is configured not to propagate + # messages upward, we need to configure it directly. + logger = logging.getLogger("NotebookApp") + logger.addHandler(handler) + logger.setLevel(logging.DEBUG) + + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/docker-compose/jupyterlab/requirements.txt b/docker-compose/jupyterlab/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..109f0280211f7c89959fd79da7421db17f2b91af --- /dev/null +++ b/docker-compose/jupyterlab/requirements.txt @@ -0,0 +1,25 @@ +ipython >=7.27.0,!=7.28.0 # BSD +jupyter +jupyterlab +ipykernel +jupyter_bokeh +matplotlib +jupyterplot +nbconvert +notebook-as-pdf +python-logstash-async +PyMySQL[rsa] +psycopg2-binary >= 2.9.2 #LGPL +sqlalchemy +pyvisa +pyvisa-py +opcua +lofarantpos >= 0.5.0 # Apache 2 +python-geohash >= 0.8.5 # Apache 2 / MIT + +numpy +scipy + +pabeam@git+https://git.astron.nl/mevius/grate # Apache2 +lofar-station-client@git+https://git.astron.nl/lofar2.0/lofar-station-client # Apache2 +etrs-itrs@git+https://github.com/brentjens/etrs-itrs # Apache 2 diff --git a/tangostationcontrol/docs/source/devices/antennafield.rst b/tangostationcontrol/docs/source/devices/antennafield.rst index faede195aec38828f2c1d1d6b71d2bdb928ef6eb..abc7cc8fe8889056e5a94dff32c3b84b6ed8fad3 100644 --- a/tangostationcontrol/docs/source/devices/antennafield.rst +++ b/tangostationcontrol/docs/source/devices/antennafield.rst @@ -99,7 +99,7 @@ These facts allow us to use the following information to calculate the absolute :type: ``float[96]`` -:recv.HBAT_PQR_to_ETRS_rotation_matrix: (property) The 3D rotation matrix to convert PQR coordinates into relative ETRS coordinates. +:recv.PQR_to_ETRS_rotation_matrix: (property) The 3D rotation matrix to convert PQR coordinates into relative ETRS coordinates. :type: ``float[3][3]`` diff --git a/tangostationcontrol/tangostationcontrol/clients/tcp_replicator.py b/tangostationcontrol/tangostationcontrol/clients/tcp_replicator.py index fd44bcc7247313a606fe20e8e105da4335780338..5bec83cebb8a91a0cf145d392cde815996e673e3 100644 --- a/tangostationcontrol/tangostationcontrol/clients/tcp_replicator.py +++ b/tangostationcontrol/tangostationcontrol/clients/tcp_replicator.py @@ -1,4 +1,4 @@ - +import atexit from threading import Condition from threading import Semaphore from threading import Thread @@ -54,7 +54,7 @@ class TCPReplicator(Thread, StatisticsClientThread): } def __init__(self, options: dict = None, queuesize=0): - super().__init__() + super().__init__(daemon=True) self.queuesize = queuesize @@ -162,7 +162,8 @@ class TCPReplicator(Thread, StatisticsClientThread): self._loop = asyncio.new_event_loop() # Create the input queue - self.queue = asyncio.Queue(maxsize=self.queuesize, loop=self._loop) + asyncio.set_event_loop(self._loop) + self.queue = asyncio.Queue(maxsize=self.queuesize) # When wanting to debug event loop behavior, uncomment this # self._loop.set_debug(True) @@ -178,6 +179,10 @@ class TCPReplicator(Thread, StatisticsClientThread): # call self._loop.stop() server_task.add_done_callback(self._server_start_callback) + # Register _clean_shutdown to be executed at termination to make + # sure all tcp connections are cleaned up. + atexit.register(self._clean_shutdown) + # Keep running event loop until self._loop.stop() is called. # Calling this will lose control flow to the event loop # indefinitely, upon self._loop.stop() control flow is returned @@ -325,6 +330,10 @@ class TCPReplicator(Thread, StatisticsClientThread): def _clean_shutdown(self): """Disconnect clients, stop the event loop and wait for it to close""" + # Unregister _clean_shutdown to prevent double execution and make + # sure the thread gets cleaned up on stop/join + atexit.unregister(self._clean_shutdown) + # The event loop is not running anymore, we can't send tasks to shut # it down further. if not self._loop.is_running(): diff --git a/tangostationcontrol/tangostationcontrol/devices/README.md b/tangostationcontrol/tangostationcontrol/devices/README.md index 4b7923faf4804b6fc0e33ab78d4765f16860e253..64390631beb1222e68c536757766e50fba64bcbc 100644 --- a/tangostationcontrol/tangostationcontrol/devices/README.md +++ b/tangostationcontrol/tangostationcontrol/devices/README.md @@ -7,7 +7,7 @@ This directory contains the sources for our custom Tango devices. If a new device is added, it will (likely) need to be referenced in several places. Adjust or add the following files (referenced from the repository root), following the pattern shown by the devices already there: - Adjust `CDB/LOFAR_ConfigDb.json` to create the device in the Tango device database, -- Adjust `docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py` to make an alias for it available in Jupyter, +- Adjust `docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py` and `docker-compose/jupyterlab/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py` to make an alias for it available in Jupyter and Jupyter-Lab, - Adjust `tangostationcontrol/tangostationcontrol/devices/boot.py` to add the device to the station initialisation sequence, - Add to `docker-compose/` to create a YaML file to start the device in a docker container. NOTE: it needs a unique 57xx port assigned (current _unused_ port value: 5722), a unique 58xx port for ZMQ events, and a unique 59xx port for ZMQ heartbeat - Adjust `tangostationcontrol/setup.cfg` to add an entry point for the device in the package installation, diff --git a/tangostationcontrol/tangostationcontrol/devices/antennafield.py b/tangostationcontrol/tangostationcontrol/devices/antennafield.py index b0a48fe25bf083bf7024fed5d9fcf0babedb49dd..7d0b6da6e94774981d88d6ab6adc21d5a799d866 100644 --- a/tangostationcontrol/tangostationcontrol/devices/antennafield.py +++ b/tangostationcontrol/tangostationcontrol/devices/antennafield.py @@ -170,7 +170,7 @@ class AntennaField(lofar_device): default_value = [0.0] * MAX_NUMBER_OF_HBAT ) - HBAT_PQR_to_ETRS_rotation_matrix = device_property( + PQR_to_ETRS_rotation_matrix = device_property( doc="Field-specific rotation matrix to convert PQR offsets to ETRS/ITRF offsets.", dtype='DevVarFloatArray', mandatory=False, @@ -336,7 +336,7 @@ class AntennaField(lofar_device): This takes the relative offsets between the elements in the tiles as described in HBAT_base_antenna_offsets. These offsets are in PQR space, which is the plane of the station. The tiles are rotated locally in this space according to the HBAT_PQR_rotation_angles_deg, - and finally translated into global ETRS coordinates using the HBAT_PQR_to_ETRS_rotation_matrix. + and finally translated into global ETRS coordinates using the PQR_to_ETRS_rotation_matrix. The relative ITRF offsets are the same as relative ETRS offsets. @@ -346,7 +346,7 @@ class AntennaField(lofar_device): # the relative offsets between the elements is fixed in HBAT_base_antenna_offsets base_antenna_offsets = numpy.array(self.HBAT_base_antenna_offsets).reshape(NUMBER_OF_ELEMENTS_PER_TILE,3) - PQR_to_ETRS_rotation_matrix = numpy.array(self.HBAT_PQR_to_ETRS_rotation_matrix).reshape(3,3) + PQR_to_ETRS_rotation_matrix = numpy.array(self.PQR_to_ETRS_rotation_matrix).reshape(3,3) # each tile has its own rotation angle, resulting in different offsets per tile all_offsets = numpy.array(