Skip to content
Snippets Groups Projects
Commit f3c0626c authored by Taya Snijder's avatar Taya Snijder
Browse files

Merge branch 'master' of https://git.astron.nl/lofar2.0/tango into...

Merge branch 'master' of https://git.astron.nl/lofar2.0/tango into L2SS-446-Extend-SNMP-client-to-support-MIB-files
parents d14b9344 49629243
No related branches found
No related tags found
1 merge request!288Resolve L2SS-446 "Extend snmp client to support mib files"
Showing
with 2760 additions and 315 deletions
......@@ -27,3 +27,4 @@ tangostationcontrol/docs/build
**/pending_log_messages.db
**/.eggs
docker-compose/alerta-web/alerta-secrets.json
......@@ -103,6 +103,7 @@ docker_build_image_all:
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-boot latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-docker latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-observation_control latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-pdu latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-recv latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-sdp latest
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-sst latest
......@@ -253,6 +254,17 @@ docker_build_image_device_apspu:
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-apspu $tag
docker_build_image_device_pdu:
extends: .base_docker_images_except
only:
refs:
- merge_requests
changes:
- docker-compose/device-pdu.yml
- docker-compose/lofar-device-base/*
script:
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh device-pdu $tag
docker_build_image_device_tilebeam:
extends: .base_docker_images_except
only:
......@@ -447,6 +459,10 @@ integration_test_docker:
- name: docker:dind
variables:
DOCKER_TLS_CERTDIR: "/certs"
artifacts:
when: always
paths:
- log/
before_script:
- |
if [[ "$CI_COMMIT_BRANCH" == "$CI_DEFAULT_BRANCH" && -z "$CI_COMMIT_TAG" ]]; then
......@@ -458,6 +474,7 @@ integration_test_docker:
fi
- apk add --update make bash docker-compose
- apk add --update bind-tools
- apk add --update postgresql14-client gzip
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- touch /root/.Xauthority
......@@ -465,20 +482,24 @@ integration_test_docker:
- export BASH_SOURCE=$(pwd)/bootstrap/etc/lofar20rc.sh
# Hack HOSTNAME env variable into host.docker.internal, set in docker-compose
- export HOSTNAME=host.docker.internal
# - export HOSTNAME=$(hostname -i)
# - export HOSTNAME=$(cat /run/systemd/netif/leases/2 | grep ^ADDRESS= | awk -F'=' '{print $2}')
# source the lofarrc file and mask its non zero exit code
- . bootstrap/etc/lofar20rc.sh || true
# TANGO_HOST must be unset our databaseds will be unreachable
- unset TANGO_HOST
## Allow docker image script to execute
# - chmod u+x $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/tag_and_push_docker_image.sh pull $tag
## Allow integration test to execute
# - chmod u+x $CI_PROJECT_DIR/sbin/run_integration_test.sh
# Do not remove 'bash' or statement will be ignored by primitive docker shell
- bash $CI_PROJECT_DIR/sbin/run_integration_test.sh
- bash -e $CI_PROJECT_DIR/sbin/run_integration_test.sh
after_script:
# Collect output of all containers
- |
mkdir -p log
for container in $(docker ps -a --format "{{.Names}}")
do
echo "Saving log for container $container"
docker logs "${container}" >& "log/${container}.log"
done
PGPASSWORD=password pg_dump --host=docker --username=postgres hdb 2>log/archiver-timescale-dump.log | gzip > log/archiver-timescale-dump.txt.gz
wheel_packaging:
stage: packaging
artifacts:
......
......@@ -14,6 +14,13 @@
}
}
},
"PDU": {
"STAT": {
"PDU": {
"STAT/PDU/1": {}
}
}
},
"TileBeam": {
"STAT": {
"TileBeam": {
......
{
"servers": {
"APSCT": {
"STAT": {
"APSCT": {
"STAT/APSCT/1": {
"properties": {
"OPC_Server_Name": [
"apsct-sim"
],
"OPC_Server_Port": [
"4843"
],
"OPC_Time_Out": [
"5.0"
]
}
}
}
}
},
"APSPU": {
"STAT": {
"APSPU": {
"STAT/APSPU/1": {
"properties": {
"OPC_Server_Name": [
"apspu-sim"
],
"OPC_Server_Port": [
"4843"
],
"OPC_Time_Out": [
"5.0"
]
}
}
}
}
},
"RECV": {
"STAT": {
"RECV": {
"STAT/RECV/1": {
"properties": {
"OPC_Server_Name": [
"recv-sim"
],
"OPC_Server_Port": [
"4840"
],
"OPC_Time_Out": [
"5.0"
]
}
}
}
}
},
"SDP": {
"STAT": {
"SDP": {
"STAT/SDP/1": {
"properties": {
"OPC_Server_Name": [
"sdptr-sim"
],
"OPC_Server_Port": [
"4840"
],
"OPC_Time_Out": [
"5.0"
],
"FPGA_sdp_info_station_id_RW_default": [
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901",
"901"
],
"polled_attr": [
"fpga_temp_r",
"1000",
"state",
"1000",
"status",
"1000",
"fpga_mask_rw",
"1000",
"fpga_scrap_r",
"1000",
"fpga_scrap_rw",
"1000",
"fpga_status_r",
"1000",
"fpga_version_r",
"1000",
"fpga_weights_r",
"1000",
"fpga_weights_rw",
"1000",
"tr_busy_r",
"1000",
"tr_reload_rw",
"1000",
"tr_tod_r",
"1000",
"tr_uptime_r",
"1000"
]
}
}
}
}
},
"SST": {
"STAT": {
"SST": {
"STAT/SST/1": {
"properties": {
"Statistics_Client_UDP_Port": [
"5001"
],
"Statistics_Client_TCP_Port": [
"5101"
],
"OPC_Server_Name": [
"sdptr-sim"
],
"OPC_Server_Port": [
"4840"
],
"OPC_Time_Out": [
"5.0"
],
"FPGA_sst_offload_hdr_eth_destination_mac_RW_default": [
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd"
],
"FPGA_sst_offload_hdr_ip_destination_address_RW_default": [
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250"
],
"FPGA_sst_offload_hdr_udp_destination_port_RW_default": [
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001",
"5001"
]
}
}
}
}
},
"XST": {
"STAT": {
"XST": {
"STAT/XST/1": {
"properties": {
"Statistics_Client_UDP_Port": [
"5002"
],
"Statistics_Client_TCP_Port": [
"5102"
],
"OPC_Server_Name": [
"sdptr-sim"
],
"OPC_Server_Port": [
"4840"
],
"OPC_Time_Out": [
"5.0"
],
"FPGA_xst_offload_hdr_eth_destination_mac_RW_default": [
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd",
"6c:2b:59:97:be:dd"
],
"FPGA_xst_offload_hdr_ip_destination_address_RW_default": [
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250",
"10.99.250.250"
],
"FPGA_xst_offload_hdr_udp_destination_port_RW_default": [
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002",
"5002"
]
}
}
}
}
},
"UNB2": {
"STAT": {
"UNB2": {
"STAT/UNB2/1": {
"properties": {
"OPC_Server_Name": [
"unb2-sim"
],
"OPC_Server_Port": [
"4841"
],
"OPC_Time_Out": [
"5.0"
]
}
}
}
}
}
}
}
This diff is collapsed.
......@@ -357,6 +357,24 @@
"902",
"902",
"902"
],
"TR_fpga_mask_RW_default": [
"True",
"True",
"True",
"True",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
"False",
"False"
]
}
}
......
......@@ -5,6 +5,9 @@
"RECV": {
"STAT/RECV/1": {
"properties": {
"Antenna_Field_Reference_ETRS": [
"3826896.631", "460979.131", "5064657.943"
],
"HBAT_reference_ETRS": [
"3826886.142", "460980.772", "5064665.668",
"3826887.237", "460985.643", "5064664.406",
......
......@@ -35,7 +35,7 @@ else
mkdir -p /tmp/tangostationcontrol
python3 setup.py build --build-base /tmp/tangostationcontrol egg_info --egg-base /tmp/tangostationcontrol bdist_wheel --dist-dir /tmp/tangostationcontrol || exit 1
# shellcheck disable=SC2012
sudo pip install "$(ls -Art /tmp/tangostationcontrol/*.whl | tail -n 1)"
pip install "$(ls -Art /tmp/tangostationcontrol/*.whl | tail -n 1)"
fi
# Return to the stored the directory, this preserves the working_dir argument in
......
#!/usr/bin/env bash
# shellcheck disable=SC2086,SC2064,SC2206,SC2124
# Use this script to test if a given TCP host/port are available
#
# Source: https://github.com/vishnubob/wait-for-it
# License: MIT
cmdname=$(basename $0)
echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
-s | --strict Only execute subcommand if the test succeeds
-q | --quiet Don't output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
wait_for()
{
if [[ $TIMEOUT -gt 0 ]]; then
echoerr "$cmdname: waiting $TIMEOUT seconds for $HOST:$PORT"
else
echoerr "$cmdname: waiting for $HOST:$PORT without a timeout"
fi
start_ts=$(date +%s)
while :
do
(echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1
result=$?
if [[ $result -eq 0 ]]; then
end_ts=$(date +%s)
echoerr "$cmdname: $HOST:$PORT is available after $((end_ts - start_ts)) seconds"
break
fi
sleep 1
done
return $result
}
wait_for_wrapper()
{
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
if [[ $QUIET -eq 1 ]]; then
timeout $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT &
else
timeout $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT &
fi
PID=$!
trap "kill -INT -$PID" INT
wait $PID
RESULT=$?
if [[ $RESULT -ne 0 ]]; then
echoerr "$cmdname: timeout occurred after waiting $TIMEOUT seconds for $HOST:$PORT"
fi
return $RESULT
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
*:* )
hostport=(${1//:/ })
HOST=${hostport[0]}
PORT=${hostport[1]}
shift 1
;;
--child)
CHILD=1
shift 1
;;
-q | --quiet)
QUIET=1
shift 1
;;
-s | --strict)
STRICT=1
shift 1
;;
-h)
HOST="$2"
if [[ $HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
HOST="${1#*=}"
shift 1
;;
-p)
PORT="$2"
if [[ $PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
PORT="${1#*=}"
shift 1
;;
-t)
TIMEOUT="$2"
if [[ $TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
CLI="$@"
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
if [[ "$HOST" == "" || "$PORT" == "" ]]; then
echoerr "Error: you need to provide a host and port to test."
usage
fi
TIMEOUT=${TIMEOUT:-15}
STRICT=${STRICT:-0}
CHILD=${CHILD:-0}
QUIET=${QUIET:-0}
if [[ $CHILD -gt 0 ]]; then
wait_for
RESULT=$?
exit $RESULT
else
if [[ $TIMEOUT -gt 0 ]]; then
wait_for_wrapper
RESULT=$?
else
wait_for
RESULT=$?
fi
fi
if [[ $CLI != "" ]]; then
if [[ $RESULT -ne 0 && $STRICT -eq 1 ]]; then
echoerr "$cmdname: strict mode, refusing to execute subprocess"
exit $RESULT
fi
exec $CLI
else
exit $RESULT
fi
......@@ -114,7 +114,9 @@ ifeq ($(NETWORK_MODE),host)
MYSQL_HOST := $(shell hostname):3306
else
ifeq ($(TANGO_HOST),)
TANGO_HOST := $(CONTAINER_NAME_PREFIX)databaseds:10000
# Use FQDN for TANGO_HOST to avoid confusion in the archiver, which also
# adds the domain.
TANGO_HOST := $(CONTAINER_NAME_PREFIX)databaseds.$(NETWORK_MODE):10000
else
TANGO_HOST := $(TANGO_HOST)
endif
......@@ -181,6 +183,7 @@ bootstrap: pull build # first start, initialise from scratch
$(MAKE) start dsconfig # boot up containers to load configurations
sleep 5 # wait for dsconfig container to come up
../sbin/update_ConfigDb.sh ../CDB/LOFAR_ConfigDb.json # load default configuration
../sbin/update_ConfigDb.sh ../CDB/tango-archiver-data/archiver-devices.json # load default archive configuration
../sbin/update_ConfigDb.sh ../CDB/stations/simulators_ConfigDb.json # by default, use simulators
start: up ## start a service (usage: make start <servicename>)
......
FROM alerta/alerta-web
RUN pip install git+https://github.com/alerta/alerta-contrib.git#subdirectory=plugins/slack
RUN bash -c 'source /venv/bin/activate; pip install git+https://github.com/alerta/alerta-contrib.git#subdirectory=plugins/slack'
RUN bash -c 'source /venv/bin/activate; pip install git+https://github.com/alerta/alerta-contrib.git#subdirectory=plugins/jira'
COPY grafana-plugin /tmp/grafana-plugin
RUN bash -c 'source /venv/bin/activate; pip install /tmp/grafana-plugin'
COPY lofar-plugin /tmp/lofar-plugin
RUN bash -c 'source /venv/bin/activate; pip install /tmp/lofar-plugin'
COPY alertad.conf /app/alertad.conf
COPY alerta.conf /app/alerta.conf
COPY config.json /web/config.json
You need:
* Your own Slack App:
* Give it channel write rights
* Get the OAuth token
* Install it in your slack
* Invite the app into your channel
* Feed the OAuth token to the config
* Add it to alerta-secrets.json
* Grafana:
* By default, grafana resends alarms every 4h, configure this in the notification settings to faster resend deleted alarms for testing
* Add alerts by hand
* add "Summary" as alert text
* add label "severity": "major"/"minor"/etc (see https://docs.alerta.io/webui/configuration.html#severity-colors)
* Create alerta-secrets.json in this directory:
Example alerta-secrets.json:
{
"SLACK_TOKEN": "xoxb-...",
"SLACK_CHANNEL": "#lofar20-alerta"
}
{
"SLACK_TOKEN": "xoxb-get-this-from-your-slack-app",
"SLACK_CHANNEL": "#your-channel"
}
[DEFAULT]
sslverify = no
output = presto
endpoint = http://localhost:8080/api
timezone = Europe/London
key = NpzX0z_fX8TVKZtXpzop-pi2MhaGnLawKVqbJBoA
debug = yes
DEBUG = True
SECRET = "T=&7xvF2S&x7w_JAcq$h1x5ocfA)8H2i"
# Allow non-admin views
CUSTOMER_VIEWS = True
# Never timeout alerts
ALERT_TIMEOUT = 0
# Auto unack after a day
ACK_TIMEOUT = 24 * 3600
# Auto unshelve after 2 hours
SHELVE_TIMEOUT = 2 * 3600
# Use custom date formats
DATE_FORMAT_MEDIUM_DATE = "dd DD/MM HH:mm"
DATE_FORMAT_LONG_DATE = "yyyy-MM-DD HH:mm:ss.sss"
# Default overview settings
COLUMNS = ['severity', 'status', 'createTime', 'lastReceiveTime', 'resource', 'grafanaDashboardHtml', 'grafanaPanelHtml', 'event', 'text']
DEFAULT_FILTER = {'status': ['open']}
SORT_LIST_BY = "createTime"
AUTO_REFRESH_INTERVAL = 5000 # ms
# ------------------------------------
# Plugin configuration
# ------------------------------------
PLUGINS = ['reject', 'blackout', 'acked_by', 'enhance', 'grafana', 'lofar', 'slack']
# Slack plugin settings, see https://github.com/alerta/alerta-contrib/tree/master/plugins/slack
import os, json
with open("/run/secrets/alerta-secrets") as secrets_file:
secrets = json.load(secrets_file)
SLACK_WEBHOOK_URL = 'https://slack.com/api/chat.postMessage'
SLACK_TOKEN = secrets["SLACK_TOKEN"]
SLACK_CHANNEL = secrets["SLACK_CHANNEL"]
SLACK_ATTACHMENTS = True
BASE_URL = os.environ.get("BASE_URL", "")
# for the Slack message configuration syntax, see https://api.slack.com/methods/chat.postMessage
# and https://app.slack.com/block-kit-builder
SLACK_PAYLOAD = {
"channel": "{{ channel }}",
"emoji": ":fire:",
"text": "*{{ alert.severity|capitalize }}* :: *{{ alert.resource }}* :: _{{ alert.event }}_\n\n```{{ alert.text }}```",
"attachments": [{
"color": "{{ color }}",
"fields": [
{"title": "Device", "value": "{{ alert.attributes.lofarDevice }}", "short": True },
{"title": "Attribute", "value": "{{ alert.attributes.lofarAttribute }}", "short": True },
{"title": "Resource", "value": "{{ alert.resource }}", "short": True },
{"title": "Status", "value": "{{ status|capitalize }}", "short": True },
{"title": "Dashboards", "value": "<{{ config.BASE_URL }}/#/alert/{{ alert.id }}|Alerta>\nGrafana <{{ alert.attributes.grafanaDashboardUrl }}|Dashboard> <{{ alert.attributes.grafanaPanelUrl }}|Panel>", "short": True },
{"title": "Configure", "value": "Grafana <{{ alert.attributes.grafanaAlertUrl }}|View> <{{ alert.attributes.grafanaSilenceUrl }}|Silence>", "short": True },
],
}]
}
{"endpoint": "/api"}
import os
import json
import logging
from alerta.plugins import PluginBase
LOG = logging.getLogger()
class EnhanceGrafana(PluginBase):
"""
Plugin for parsing alerts coming from Grafana
"""
def pre_receive(self, alert, **kwargs):
# Parse Grafana-specific fields
alert.attributes['grafanaStatus'] = alert.raw_data.get('status', '')
def htmlify(link: str, desc: str) -> str:
return f'<a href="{link}" target="_blank">{desc}</a>';
# User-specified "Panel ID" annotation
panelURL = alert.raw_data.get('panelURL', '')
if panelURL:
alert.attributes['grafanaPanelUrl'] = panelURL
alert.attributes['grafanaPanelHtml'] = htmlify(panelURL, "Grafana Panel")
# User-specified "Dashboard UID" annotation
dashboardURL = alert.raw_data.get('dashboardURL', '')
if dashboardURL:
alert.attributes['grafanaDashboardUrl'] = dashboardURL
alert.attributes['grafanaDashboardHtml'] = htmlify(dashboardURL, "Grafana Dashboard")
alertURL = alert.raw_data.get('generatorURL', '')
if alertURL:
# expose alert view URL, as user may not have edit rights
# Convert from
# http://host:3000/alerting/kujybCynk/edit
# to
# http://host:3000/alerting/grafana/kujybCynk/view
alertURL = alertURL.replace("/alerting/", "/alerting/grafana/").replace("/edit", "/view")
alert.attributes['grafanaAlertUrl'] = alertURL
alert.attributes['grafanaAlertHtml'] = htmlify(alertURL, "Grafana Alert")
silenceURL = alert.raw_data.get('silenceURL', '')
if silenceURL:
alert.attributes['grafanaSilenceUrl'] = silenceURL
alert.attributes['grafanaSilenceHtml'] = htmlify(silenceURL, "Grafana Silence Alert")
return alert
def post_receive(self, alert, **kwargs):
return
def status_change(self, alert, status, text, **kwargs):
return
def take_action(self, alert, action, text, **kwargs):
raise NotImplementedError
from setuptools import setup, find_packages
version = '1.0.0'
setup(
name="alerta-grafana",
version=version,
description='Alerta plugin for enhancing Grafana alerts',
url='https://git.astron.nl/lofar2.0/tango',
license='Apache License 2.0',
author='Jan David Mol',
author_email='mol@astron.nl',
packages=find_packages(),
py_modules=['alerta_grafana'],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'grafana = alerta_grafana:EnhanceGrafana'
]
},
python_requires='>=3.5'
)
import os
import json
import logging
from alerta.plugins import PluginBase
LOG = logging.getLogger()
class EnhanceLOFAR(PluginBase):
"""
Plugin for enhancing alerts with LOFAR-specific information
"""
def pre_receive(self, alert, **kwargs):
# Parse LOFAR-specific fields
for tag in alert.tags:
try:
key, value = tag.split("=", 1)
except ValueError:
continue
if key == "device":
alert.attributes['lofarDevice'] = value
if key == "name":
alert.attributes['lofarAttribute'] = value
if key == "station":
alert.resource = value
return alert
def post_receive(self, alert, **kwargs):
return
def status_change(self, alert, status, text, **kwargs):
return
def take_action(self, alert, action, text, **kwargs):
raise NotImplementedError
from setuptools import setup, find_packages
version = '1.0.0'
setup(
name="alerta-lofar",
version=version,
description='Alerta plugin for enhancing LOFAR alerts',
url='https://git.astron.nl/lofar2.0/tango',
license='Apache License 2.0',
author='Jan David Mol',
author_email='mol@astron.nl',
packages=find_packages(),
py_modules=['alerta_lofar'],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'lofar = alerta_lofar:EnhanceLOFAR'
]
},
python_requires='>=3.5'
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment