Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • RD/DP3
  • mancini/DP3
2 results
Show changes
Commits on Source (308)
......@@ -25,6 +25,7 @@ stages:
- linting
- test
- publish
- scan
- pages
include: .gitlab-ci.wheels.yml
......@@ -39,6 +40,7 @@ versioning:
- git fetch --unshallow
- echo BASE_IMAGE_2004=${CI_REGISTRY_IMAGE}/base_ubuntu20:$(git log -n 1 --pretty=format:%H -- docker/ubuntu_20_04_base) > versions.env
- echo BASE_IMAGE_2204=${CI_REGISTRY_IMAGE}/base_ubuntu22:$(git log -n 1 --pretty=format:%H -- docker/ubuntu_22_04_base) >> versions.env
- echo BASE_IMAGE_2404=${CI_REGISTRY_IMAGE}/base_ubuntu24:$(git log -n 1 --pretty=format:%H -- docker/ubuntu_24_04_base) >> versions.env
- cat versions.env
artifacts:
reports:
......@@ -74,81 +76,61 @@ versioning:
# - $DOCKER_FILE
# Create and push the base image to the gitlab registry, if it does not exist.
prepare-base-2004:
extends: .prepare
variables:
DOCKER_IMAGE: $BASE_IMAGE_2004
DOCKER_FILE: ./docker/ubuntu_20_04_base
prepare-base-2204:
extends: .prepare
variables:
DOCKER_IMAGE: $BASE_IMAGE_2204
DOCKER_FILE: ./docker/ubuntu_22_04_base
# Template for jobs that depend on the optional prepare-base job.
.needs-base-2204:
needs:
- job: versioning
- job: prepare-base-2204
optional: true
image: $BASE_IMAGE_2204
prepare-2004:
prepare-base-2404:
extends: .prepare
variables:
DOCKER_IMAGE: $BASE_IMAGE_2004
DOCKER_FILE: ./docker/ubuntu_20_04_base
DOCKER_IMAGE: $BASE_IMAGE_2404
DOCKER_FILE: ./docker/ubuntu_24_04_base
build-2004:
stage: build
# Template for basic build jobs.
.build-basic:
extends: .failable
needs:
- job: versioning
- job: prepare-2004
optional: true
image: $BASE_IMAGE_2004
stage: build
script:
# Build and run DP3
- mkdir build
- cd build
- cmake -G Ninja -DBUILD_TESTING=On -DBUILD_DP3_BENCHMARKS=ON ..
- ninja install
- mkdir build && cd build
- cmake ${CMAKE_FLAGS} -G Ninja ..
- ninja -j4 install
- DP3
build-no-idg-2204:
stage: build
extends: [".failable",".needs-base-2204"]
before_script:
- rm -r /usr/lib/cmake/*idg*
- rm -r /usr/lib/cmake/*IDGAPITargets*
- rm -r /usr/lib/*idg*
- rm -rf /usr/include/idg-*
script:
- mkdir build
- cd build
- cmake -G Ninja ..
- ninja
# Build (and run) DP3 on Ubuntu 20, ensuring backward compatiblity.
# This build does not include IDG, since IDG no longer supports Ubuntu 20.
# This build also tests building DP3 with the TARGET_CPU option.
build-2004-no-idg-target-cpu:
extends: .build-basic
needs: ["versioning", "prepare-base-2004"]
image: $BASE_IMAGE_2004
variables:
CMAKE_FLAGS: -DTARGET_CPU=haswell
# Build debug has no needs entry because it can have either a base
# image from build-base or build-base-ska which is determined on runtime
build-debug-2204:
stage: build
extends: [".failable",".needs-base-2204"]
script:
- mkdir build && cd build
- cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTING=On -DBUILD_DP3_BENCHMARKS=ON -DCMAKE_CXX_FLAGS="-coverage" -DCMAKE_EXE_LINKER_FLAGS="-coverage" ..
- ninja
- ninja install
artifacts:
paths:
- build
# Template for jobs that depend on prepare-base-2204.
.needs-base-2204:
needs: ["versioning", "prepare-base-2204"]
image: $BASE_IMAGE_2204
build-2204:
stage: build
extends: [".failable",".needs-base-2204"]
script:
- mkdir build && cd build
- cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=On -DBUILD_DP3_BENCHMARKS=ON ..
- ninja
- ninja install
artifacts:
paths:
- build
# Template for jobs that depend on prepare-base-2404.
.needs-base-2404:
needs: ["versioning", "prepare-base-2404"]
image: $BASE_IMAGE_2404
# Build (and run) DP3 on Ubuntu 24, ensuring compatibility with new systems.
# This build also tests building DP3 with the PORTABLE option.
build-2404-portable:
extends: [".needs-base-2404", ".build-basic"]
variables:
# Enable tests and thereby check if they also compile on new systems.
CMAKE_FLAGS: -DBUILD_TESTING=On -DPORTABLE=On
build-doc-2204:
stage: build
......@@ -156,7 +138,8 @@ build-doc-2204:
before_script:
# Install here since pytest and these items aren't compatible
- pip3 install autosemver jsonschema2rst sphinx sphinx-rtd-theme myst_parser
- patch -p0 /usr/local/lib/python3.10/dist-packages/jsonschema2rst/parser.py < docs/parser.py.patch
# Patch to make 'doc' settings not be parsed as code. Make sure to apply this patch also in .readthedocs.yml!
- patch -p0 /usr/local/lib/python3.10/dist-packages/jsonschema2rst/rst_utils.py < docs/rst_utils.py.patch
script:
- mkdir build && cd build
- cmake -G Ninja ../docs
......@@ -193,25 +176,15 @@ linting-2204:
script:
- ./scripts/run-format.sh
.unit-test-2204:
test-2204:
stage: test
extends: .failable
needs: ["versioning","build-debug-2204"]
image: $BASE_IMAGE_2204
extends: [".failable",".needs-base-2204"]
script:
- cd build
# There is an issue running cmake with existing build artifacts. This may
# cause the pytest collection to segfault. Therefore clear the old artifacts
# before re-running cmake.
- ninja clean
- echo Configuring with CMake flags \"$CMAKE_FLAGS\" and CXX flags \"$CXX_FLAGS\"
- cmake $CMAKE_FLAGS -DCMAKE_CXX_FLAGS="$CXX_FLAGS" .
- ninja
- ctest --output-on-failure -j$(nproc) $CTEST_FLAGS |& tee ctest.out
# Check if ctest found any tests. ctest >= 3.18 has a --no-tests=error
# option. Older versions require a manual check.
- if grep -q 'No tests were found' ctest.out; then exit 1; fi
- export PYTHONPATH=${PYTHONPATH}:/usr/local/lib/python3.10/dist-packages
- mkdir build && cd build
- cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTING=On ..
- ninja -j4
- ctest --output-on-failure --no-tests=error -j$(nproc)
artifacts:
paths:
- build/unittest*.xml
......@@ -221,22 +194,31 @@ linting-2204:
- build/unittest*.xml
- build/pytest_*.xml
# Common parts for unit test jobs that report coverage.
.unit-test-coverage:
after_script:
- cd build
- mkdir coverage-unit
# Collect coverage in json, xml and html formats.
- gcovr -j$(nproc) -r ../ -e '.*/external/.*' -e '_deps/.*' -e '.*/test/.*' -e '.*/CompilerIdCXX/.*' --json run-unit.json --xml coverage.xml --html-details coverage-unit/coverage.html
# Read json coverage and output in text format.
- gcovr --add-tracefile run-unit.json
- tar cfz coverage-unit.tar.gz coverage-unit/
# Use a separate job for collecting code coverage, which uses a Release build,
# since collecting code coverage in a Debug build results in low performance.
# Especially -fprofile-update=atomic makes the tests slow.
# (A single Debug+coverage job takes longer than the total runtime of
# the current test-2204 and coverage-2204 jobs. It may even time out.)
# The code coverage figures include both unit and integration tests.
coverage-2204:
stage: test
extends: [".failable",".needs-base-2204"]
script:
- export PYTHONPATH=${PYTHONPATH}:/usr/local/lib/python3.10/dist-packages
- mkdir build && cd build
- cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DBUILD_TESTING=On -DCMAKE_CXX_FLAGS="-coverage -fprofile-update=atomic" -DCMAKE_EXE_LINKER_FLAGS="-coverage" ..
- ninja -j4
- ctest --output-on-failure --no-tests=error -j$(nproc)
# Collect coverage in text, xml and json formats.
- gcovr -j$(nproc) -r ../ -e '.*/external/.*' -e '_deps/.*' -e '.*/test/.*' -e '.*/CompilerIdCXX/.*' --txt --xml coverage.xml --json coverage.json .
coverage: /^TOTAL.*\s+(\d+\%)$/
artifacts:
paths:
- build/run-unit.json
# The pages and collect-metrics jobs need coverage files. See .gitlab-ci.ska.yml.
- build/coverage.xml
- build/coverage.json
- build/unittest*.xml
- build/pytest_*.xml
- build/coverage-unit.tar.gz
reports:
junit:
- build/unittest*.xml
......@@ -245,96 +227,34 @@ linting-2204:
coverage_format: cobertura
path: build/coverage.xml
unit-test-2204:
extends: [".unit-test-2204", ".unit-test-coverage"]
variables:
CMAKE_FLAGS: -DPORTABLE=OFF
CXX_FLAGS: -coverage
CTEST_FLAGS: -L unit
unit-test-portable-2204:
extends: [".unit-test-2204", ".unit-test-coverage"]
variables:
CMAKE_FLAGS: -DPORTABLE=ON
CXX_FLAGS: -coverage
CTEST_FLAGS: -L unit
unit-test-portable-no-avx2-2204:
extends: [".unit-test-2204", ".unit-test-coverage"]
variables:
CMAKE_FLAGS: -DPORTABLE=ON
CXX_FLAGS: -mno-avx2 -coverage
CTEST_FLAGS: -L unit
unit-test-address-sanitizer-2204:
extends: .unit-test-2204
.sanitizer-2404:
stage: test
extends: [".failable",".needs-base-2404"]
script:
- mkdir build && cd build
- cmake -G Ninja -DCMAKE_BUILD_TYPE=Debug -DBUILD_TESTING=On -DPORTABLE=Off -DCMAKE_CXX_FLAGS="${CXX_FLAGS}" ..
- ninja -j4
# Don't run slow tests. The overhead of the sanitizer causes time outs.
- ctest --output-on-failure --no-tests=error -j$(nproc) -L unit -LE slow
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
allow_failure: false
- when: manual
allow_failure: true
unit-test-address-sanitizer-2404:
extends: [".sanitizer-2404",".dind"]
variables:
CMAKE_FLAGS: -DPORTABLE=OFF
CXX_FLAGS: -fsanitize=address
# The overhead of the sanitizer causes slow tests to time out.
CTEST_FLAGS: -L unit -LE slow
# Ignore the leaks in third-party libraries.
LSAN_OPTIONS: suppressions=../ci/address_sanitizer_suppressions.txt
unit-test-undefined-behaviour-sanitizer-2204:
extends: .unit-test-2204
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
allow_failure: false
- when: manual
allow_failure: true
# There are NULL pointer issues in Casacore which blocks testing on CI.
unit-test-undefined-behaviour-sanitizer-2404:
extends: .sanitizer-2404
variables:
CMAKE_FLAGS: -DPORTABLE=OFF
# There are NULL pointer issues in Casacore which blocks us from testing in
# the CI.
CXX_FLAGS: -fsanitize=undefined -fno-sanitize=null -fno-sanitize-recover
# The overhead of the sanitizer causes slow tests to time out.
CTEST_FLAGS: -L unit -LE slow
integration-test-2204:
stage: test
extends: .failable
needs: ["versioning","build-debug-2204"]
image: $BASE_IMAGE_2204
script:
- cd build
- ninja # Needed when ran on different containers with different timestamps
- ctest --output-on-failure -j$(($(nproc)/2 > 0 ? $(nproc)/2:1)) -L integration |& tee ctest.out
# Check if ctest found any tests. ctest >= 3.18 has a --no-tests=error
# option. Older versions require a manual check.
- if grep -q 'No tests were found' ctest.out; then exit 1; fi
# The json only needs to be built on the SKA repository
- if [[ $CI_SERVER_HOST != "git.astron.nl" ]] ; then gcovr -j$(($(nproc)/2 > 0 ? $(nproc)/2:1)) -r ../ -e '.*/external/.*' -e '.*/test/.*' -e '.*/CompilerIdCXX/.*' -e '.*/Common/.*' --json -o run-integration.json; fi
artifacts:
paths:
# This will give a warning and ERROR: No files to upload, which is ok
- build/run-integration.json
- build/pytest_*.xml
reports:
junit: build/pytest_*.xml
benchmark-2204:
stage: test
extends: .failable
needs: ["versioning","build-2204"]
image: $BASE_IMAGE_2204
script:
- cd build
# Needed when ran on different containers with different timestamps.
# The build may fail when compilers differ between containers for unknown
# reasons. Rebuild everything in that case.
- if ! ninja; then ninja clean; ninja; fi
- ctest -V -L benchmark
artifacts:
paths:
- build/benchmarks/*
deploy-package-2204:
stage: publish
......
......@@ -6,31 +6,46 @@
services:
- docker:20.10-dind
include: .gitlab-ci.common.yml
default:
tags:
- ska-default # Use SKAO runners instead of standard gitlab.com runners.
pages-2204:
include:
- .gitlab-ci.common.yml
# Collects metrics and creates badges.
- project: "ska-telescope/templates-repository"
file: "gitlab-ci/includes/finaliser.gitlab-ci.yml"
pages:
stage: pages
needs: ["versioning","build-debug-2204","unit-test-2204","integration-test-2204"]
needs: ["versioning","coverage-2204"]
image: $BASE_IMAGE_2204
before_script:
- apt-get update
- apt-get -y install curl
variables:
OUTPUT: public/$CI_COMMIT_REF_SLUG
script:
- mkdir -p .public/build/reports
- cd .public
- gcovr -j$(($(nproc)/2 > 0 ? $(nproc)/2:1)) -r ../ -a ../build/run-integration.json -a ../build/run-unit.json --xml -o build/reports/code-coverage.xml
- gcovr -j$(($(nproc)/2 > 0 ? $(nproc)/2:1)) -r ../ -a ../build/run-integration.json -a ../build/run-unit.json --html --html-details -o index.html
- cp ../build/unittests.xml build/reports/unit-tests.xml
# Create and upload GitLab badges
- chmod -R 700 ../ci
- python3 ../ci/.produce-ci-metrics.py build/reports > ci-metrics.json
- sh ../ci/ci-badges-func.sh
- cd ..
- mv .public public
- echo Deploying GitLab pages to $CI_PAGES_URL/$CI_COMMIT_REF_SLUG
- mkdir -p $OUTPUT/coverage
- gcovr -e 'build/.*' -a build/coverage.json --html-details $OUTPUT/index.html
artifacts:
name: $CI_COMMIT_REF_SLUG
paths:
- public
reports:
coverage_report:
coverage_format: cobertura
path: public/build/reports/code-coverage.xml
\ No newline at end of file
expire_in: 1 week
# Puts xml files in the proper directories for the SKAO finaliser job.
# See https://developer.skatelescope.org/en/latest/tools/ci-cd/continuous-integration.html#automated-collection-of-ci-health-metrics-as-part-of-the-ci-pipeline
collect-metrics:
stage: pages
needs: ["coverage-2204"]
before_script:
- apt-get update
- apt-get install -y -qq python3
script:
- mkdir -p build/reports
- cp build/coverage.xml build/reports/code-coverage.xml
# Combine unit test xml files into one large file.
- python3 scripts/junit-merge.py build/reports/unit-tests.xml build/*test*.xml
artifacts:
paths:
- build/reports/*.xml
......@@ -17,8 +17,15 @@
artifacts:
paths:
- output-*/*
when: manual
allow_failure: true
rules:
- if: $CI_PIPELINE_SOURCE == "schedule"
when: always
allow_failure: false
- if: $CI_COMMIT_TAG
when: always
allow_failure: false
- when: manual
allow_failure: true
build-wheel-37:
extends: .build-wheel
......@@ -56,8 +63,12 @@ build-wheel-312:
script:
- pip install twine
- TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python -m twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi output-*/*.whl
when: manual
allow_failure: true
rules:
- if: $CI_COMMIT_TAG
when: always
allow_failure: false
- when: manual
allow_failure: true
deploy-wheel-37:
extends: .deploy-wheel
......
[settings]
known_first_party=dp3
......@@ -16,9 +16,9 @@ build:
- ninja-build
jobs:
pre_build:
# Fix https://github.com/inspirehep/jsonschema2rst/issues/13
- echo Patching $(find $HOME -path '*/jsonschema2rst/parser.py')...
- patch -p0 $(find $HOME -path '*/jsonschema2rst/parser.py') < docs/parser.py.patch
# Make 'doc' render as text, not inline (make sure to apply this patch also in local CI)
- echo Patching $(find $HOME -path '*/jsonschema2rst/rst_utils.py')...
- patch -p0 $(find $HOME -path '*/jsonschema2rst/rst_utils.py') < docs/rst_utils.py.patch
- mkdir build
# CMake converts conf.py.in into conf.py.
- cmake -S docs/ -B build -G Ninja
......
# DP3 Changelog
## Next release
## Upcoming release
## [6.3] - 2025-01-28
## New features
- Add flagtransfer step, which transfers flags from a low-resolution MS.
- Support new Casacore Stokes I storage manager.
- Add `msout.scalarflags` option, for compressing flags.
## Improvements
- The `elementmodel` parset key of the ApplyBeam and Predict step is now parsed
by EveryBeam making all element models in that library available.
The default value is changed from "hamaker" to "default" meaning that
EveryBeam selects the default element model for the telescope in
the measurement set. For a LOFAR MS that will still be "hamaker".
- Support EveryBeam 0.7.x.
- Apply per-direction weights in constraints.
- Use C++20. DP3 now requires at least GCC-10.
## Bug fixes
- Fix flag counting in UVWFlagger when using BDA.
## [6.2.2] - 2024-11-08
### Improvements
- Remove the executable `__DP3_from_pip__` from python binary wheel, replace it
with a python command-line interface DP3.py which behaves like DP3.
## [6.2.1] - 2024-11-06
### New features
- Allow wildcards in DDECal directions.
### Improvements
- Many internal quality improvements to the input step.
- Optimize threading in solvers.
- Reduce beam evaluations.
### Bug fixes
- Fix the `__DP3_from_pip__` application in binary wheels.
## [6.2] - 2024-08-29
### New features
- Support reading extra data columns from the input Measurement Set(s).
- Support extra (model) data columns in applybeam step.
- Add wgridderpredict step, which uses ducc wgridder for image based prediction.
- Add rotationdiagonalmode setting to ddecal step.
- Add smoothnessspectralexponent setting to ddecal step.
- Add usedualvisibilites setting to ddecal step, for using XX/YY visibilities only.
- Support a restricted LBFGS solution range in ddecal and demixer.
- Add skipstations setting to applybeam step.
- Support DD intervals in bda ddecal step.
- Add wscleanwriter step which writes data in WSClean reordered format.
### Improvements
- DP3 now requires EveryBeam v0.5.4
- Interface change: Remove start channel from DPInfo constructor, also in Python.
- Format python files using isort.
- Use Logger for all output. Do not allow std::cout / std::cerr anymore.
### Bug fixes
- Fix using msin.startchan / filter.startchan when updating a Measurement Set.
## [6.1] - 2024-06-18
- Support EveryBeam 0.6 (in addition to EveryBeam 0.5.8)
- Faster beam predict by using cached time directions
- Add support for dish telescopes like SKA-mid
- Add preliminary clipper task for VLBI processing
- Reduce memory in predict step
- Read sky model only once to speed up processing
- Various AARTFAAC processing improvements
- More use of XTensor
- Some use of AVX(2) instructions to speed up tasks
- Improve threading and use of threadpool
- Fix wrong h5 time axis when solint larger than number
- Fix prediction for source models with only negative polarized sources
- Fix compilation on platforms where size_t != 64 bit
- Fix multiple baseline selection (!1255)
- Fix ApplyCal with full-Jones correction
- Various small bugs and code improvements
## [6.0] - 2023-08-11
......
......@@ -3,7 +3,8 @@
# FindHDF5 uses NATIVE_COMMAND in separate_arguments, which requires CMake 3.9.
# The RESULTS_VARIABLE argument of execute_process() requires CMake 3.10.
cmake_minimum_required(VERSION 3.11)
# 'add_compile_definitions' in FetchXTensor.cmake requires CMake 3.12.
cmake_minimum_required(VERSION 3.12)
# CMake >= 3.19 gives warnings when these policies are not 'NEW'.
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.19")
......@@ -12,7 +13,7 @@ if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.19")
endif()
# Set version number and project name.
set(DP3_VERSION 6.0.0)
set(DP3_VERSION 6.3.0) # Please keep in sync with the version in setup.py.
if(DP3_VERSION MATCHES "^([0-9]+)\\.([0-9]+)\\.([0-9]+)")
set(DP3_VERSION_MAJOR "${CMAKE_MATCH_1}")
set(DP3_VERSION_MINOR "${CMAKE_MATCH_2}")
......@@ -23,7 +24,7 @@ endif()
# Get the latest abbreviated commit hash of the working branch
execute_process(
COMMAND git describe
COMMAND git describe --tags
WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
OUTPUT_VARIABLE DP3_GIT_HASH
OUTPUT_STRIP_TRAILING_WHITESPACE)
......@@ -71,6 +72,11 @@ option(BUILD_TESTING "Include tests in the build" OFF)
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/CMake)
# An investigation was done to determine if the --ffast-math
# compiler option was possible in DP3. The conclusion was that
# it was not advisable because there was a considerable
# difference in the handling of NaN. Learn the details at
# https://jira.skatelescope.org/browse/AST-1502.
add_compile_options(
-Wall
-Wnon-virtual-dtor
......@@ -91,29 +97,7 @@ if(NOT CMAKE_BUILD_TYPE STREQUAL "Release")
add_compile_options(-O3)
endif()
option(PORTABLE "Generate portable code" OFF)
if(PORTABLE)
if(DEFINED TARGET_CPU)
message(WARNING "You have selected to build PORTABLE binaries. "
"TARGET_CPU settings will be ignored.")
unset(TARGET_CPU CACHE)
endif()
add_compile_definitions(PORTABLE_BUILD)
else()
if(DEFINED TARGET_CPU)
add_compile_options(-march=${TARGET_CPU})
else()
check_cxx_compiler_flag("-march=native" COMPILER_HAS_MARCH_NATIVE)
if(COMPILER_HAS_MARCH_NATIVE)
add_compile_options(-march=native)
else()
message(
WARNING "The compiler doesn't support -march=native for your CPU.")
endif()
endif()
endif()
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED YES)
set(CMAKE_CXX_EXTENSIONS OFF)
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
......@@ -226,7 +210,7 @@ endif()
# Prevent accidentally finding old BoostConfig.cmake file from casapy
set(Boost_NO_BOOST_CMAKE ON)
unset(BOOST_MINIMUM_VERSION)
set(BOOST_COMPONENTS "filesystem;program_options;system")
set(BOOST_COMPONENTS "date_time;filesystem;program_options;system")
if(BUILD_TESTING)
# Boost 1.59 introduced BOOST_TEST. Many tests use this feature.
set(BOOST_MINIMUM_VERSION 1.59)
......@@ -249,15 +233,14 @@ include_directories(${AOFLAGGER_INCLUDE_DIR})
# make it somewhat more explicit
find_package(EveryBeam NO_MODULE)
if(${EVERYBEAM_FOUND})
if(${EVERYBEAM_VERSION} VERSION_LESS "0.5.4" OR ${EVERYBEAM_VERSION}
VERSION_GREATER_EQUAL "0.6.0")
if(${EVERYBEAM_VERSION} VERSION_LESS "0.5.8" OR ${EVERYBEAM_VERSION}
VERSION_GREATER_EQUAL "0.8.0")
message(
FATAL_ERROR
"DP3 needs EveryBeam version 0.5.x - with x >= 4 - but found version ${EveryBeam_VERSION}"
"DP3 needs EveryBeam version >= 0.5.8 and < 0.8.0 - but found version ${EveryBeam_VERSION}"
)
endif()
# TODO(AST-1336): Remove SYSTEM when EveryBeam no longer includes XTensor headers.
include_directories(SYSTEM ${EVERYBEAM_INCLUDE_DIR})
include_directories(${EVERYBEAM_INCLUDE_DIR})
else(${EVERYBEAM_FOUND})
message(
FATAL_ERROR
......@@ -277,8 +260,7 @@ if(IDGAPI_FOUND)
endif()
endif()
if(IDGAPI_LIBRARIES AND IDGAPI_INCLUDE_DIRS)
# TODO(AST-1335): Remove SYSTEM when IDG no longer includes XTensor headers.
include_directories(SYSTEM ${IDGAPI_INCLUDE_DIRS})
include_directories(${IDGAPI_INCLUDE_DIRS})
set(HAVE_IDG TRUE)
add_definitions(-DHAVE_IDG)
message(STATUS "Image domain gridder API libraries found.")
......@@ -325,22 +307,30 @@ if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git")
endif()
endif()
# Include aocommon
include_directories(${CMAKE_SOURCE_DIR}/external/aocommon/include/)
# User may optionally set `TARGET_CPU` if `PORTABLE=OFF`
option(PORTABLE "Build portable binaries (with slightly decreased performance)"
OFF)
include(external/aocommon/CMake/SetTargetCPU.cmake)
# Include XTensor libraries. DP3 must include FetchXTensor directly (not
# indirectly, e.g., via schaapcommon) since it uses 'add_compile_definitions'.
set(XTENSOR_LIBRARIES xtl xsimd xtensor xtensor-blas)
include(external/aocommon/CMake/FetchXTensor.cmake)
add_compile_definitions(XTENSOR_USE_XSIMD)
include(${CMAKE_SOURCE_DIR}/external/aocommon/CMake/FetchXTensor.cmake)
# Include aocommon.
include_directories(${CMAKE_SOURCE_DIR}/external/aocommon/include/)
# Include schaapcommon, which should happen after including XTensor.
set(SCHAAPCOMMON_MODULES facets h5parm ducc0 reordering)
add_subdirectory(${CMAKE_SOURCE_DIR}/external/schaapcommon)
include_directories(${CMAKE_SOURCE_DIR}/external/schaapcommon/include)
include_directories(${CMAKE_SOURCE_DIR}/external/schaapcommon/external)
# Include pybind11
set(PYTHON_EXECUTABLE "${Python3_EXECUTABLE}")
add_subdirectory("${CMAKE_SOURCE_DIR}/external/pybind11")
set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
add_subdirectory(${CMAKE_SOURCE_DIR}/external/pybind11)
include_directories(${pybind11_INCLUDE_DIR})
# Include schaapcommon
set(SCHAAPCOMMON_MODULES facets h5parm)
add_subdirectory("${CMAKE_SOURCE_DIR}/external/schaapcommon")
include_directories(SYSTEM "${CMAKE_SOURCE_DIR}/external/schaapcommon/include")
# Add cmake information to headers
configure_file(base/Version.h.in base/Version.h)
include_directories(${CMAKE_CURRENT_BINARY_DIR}/base)
......@@ -378,7 +368,6 @@ add_library(
# 'Common' files are included in all executables and libraries.
add_library(
Common OBJECT
common/BaselineSelect.cc
common/ClusterDesc.cc
common/DataConvert.cc
common/Fields.cc
......@@ -463,10 +452,9 @@ endif()
add_library(
DP3_OBJ OBJECT
antennaflagger/Flagger.cc
base/AartfaacSubtableWriter.cc
base/Apply.cc
base/BaselineSelection.cc
base/BDABuffer.cc
base/BdaBuffer.cc
base/CalType.cc
base/DPBuffer.cc
base/DPInfo.cc
......@@ -476,8 +464,6 @@ add_library(
base/FlagCounter.cc
base/GainCalAlgorithm.cc
base/GaussianSource.cc
base/ModelComponent.cc
base/ModelComponentVisitor.cc
base/MS.cc
base/Patch.cc
base/PhaseFitter.cc
......@@ -485,6 +471,7 @@ add_library(
base/ProgressMeter.cc
base/Simulate.cc
base/Simulator.cc
base/SubtableWriter.cc
base/ComponentInfo.cc
base/SourceDBUtil.cc
base/Stokes.cc
......@@ -497,13 +484,14 @@ add_library(
steps/ApplyCal.cc
steps/Averager.cc
steps/BDAAverager.cc
steps/BDAExpander.cc
steps/BdaExpander.cc
steps/BdaGroupPredict.cc
steps/Clipper.cc
steps/Counter.cc
steps/Demixer.cc
steps/DummyStep.cc
steps/Filter.cc
steps/FlagTransfer.cc
steps/GainCal.cc
steps/H5ParmPredict.cc
steps/IDGImager.cc
......@@ -514,10 +502,11 @@ add_library(
steps/MSBDAReader.cc
steps/MSBDAWriter.cc
steps/MsColumnReader.cc
steps/MSReader.cc
steps/MsReader.cc
steps/MSUpdater.cc
steps/MSWriter.cc
steps/MultiMSReader.cc
steps/WSCleanWriter.cc
steps/MultiMsReader.cc
steps/MultiResultStep.cc
steps/OneApplyCal.cc
steps/OnePredict.cc
......@@ -532,6 +521,7 @@ add_library(
steps/Step.cc
steps/Upsample.cc
steps/UVWFlagger.cc
steps/WGridderPredict.cc
steps/ApplyBeam.cc
steps/NullStokes.cc
steps/SagecalPredict.cc)
......@@ -576,7 +566,7 @@ if(NOT "${LIBDIRAC_PREFIX}" STREQUAL "")
find_package(PkgConfig REQUIRED)
# Set search path to install directory of libdirac
set(ENV{PKG_CONFIG_PATH} "${LIBDIRAC_PREFIX}/lib/pkgconfig")
pkg_search_module(LIBDIRAC libdirac>=0.8.1)
pkg_search_module(LIBDIRAC libdirac 0.8.4...<0.8.6)
endif()
if(LIBDIRAC_FOUND)
message(STATUS "Found libdirac: ${LIBDIRAC_INCLUDE_DIRS}")
......@@ -598,7 +588,6 @@ endif()
set(DP3_LIBRARIES
${AOFLAGGER_LIB}
${ARMADILLO_LIBRARIES}
${Boost_LIBRARIES}
${CASACORE_LIBRARIES}
${CFITSIO_LIBRARY}
${EVERYBEAM_LIB}
......@@ -607,7 +596,8 @@ set(DP3_LIBRARIES
${Python3_LIBRARIES}
schaapcommon
Threads::Threads
pybind11::embed)
pybind11::embed
${Boost_LIBRARIES})
if(BUILD_WITH_CUDA)
list(APPEND DP3_LIBRARIES CudaSolvers)
......@@ -615,15 +605,13 @@ endif()
# If libdirac is found, use it
if(LIBDIRAC_FOUND)
# add link flags
set(DP3_LIBRARIES ${DP3_LIBRARIES} ${LIBDIRAC_LINK_LIBRARIES})
if(HAVE_CUDA)
# if we use libdirac with CUDA support, we enable a different preprocessor def
# so as not to conflict with CPU only version of libdirac
add_definitions(-DHAVE_LIBDIRAC_CUDA)
add_definitions(-DHAVE_CUDA)
# add link flags
set(DP3_LIBRARIES ${DP3_LIBRARIES} ${LIBDIRAC_LINK_LIBRARIES})
# add preprocessor def
add_definitions(-DHAVE_CUDA)
set(DP3_LIBRARIES
${DP3_LIBRARIES}
${CUDA_LIBRARIES}
......@@ -635,8 +623,6 @@ if(LIBDIRAC_FOUND)
else()
# add preprocessor def
add_definitions(-DHAVE_LIBDIRAC)
# add link flags
set(DP3_LIBRARIES ${DP3_LIBRARIES} ${LIBDIRAC_LINK_LIBRARIES})
endif()
endif()
......@@ -673,10 +659,10 @@ if(NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin")
endif()
set(SOURCEDB_LIBRARIES Blob Common ParmDB ${CASACORE_LIBRARIES}
${Boost_SYSTEM_LIBRARY})
${Boost_LIBRARIES})
add_executable(DP3 base/Main.cc)
target_link_libraries(DP3 LIBDP3 ${HDF5_LIBRARIES})
target_link_libraries(DP3 LIBDP3 ${HDF5_LIBRARIES} ${Boost_LIBRARIES})
add_executable(makesourcedb parmdb/makesourcedb.cc)
target_link_libraries(makesourcedb ${SOURCEDB_LIBRARIES})
......@@ -685,7 +671,7 @@ add_executable(showsourcedb parmdb/showsourcedb.cc)
target_link_libraries(showsourcedb ${SOURCEDB_LIBRARIES})
add_executable(msoverview base/msoverview.cc base/MS.cc)
target_link_libraries(msoverview ${CASACORE_LIBRARIES})
target_link_libraries(msoverview ${CASACORE_LIBRARIES} ${Boost_LIBRARIES})
install(TARGETS DP3 makesourcedb showsourcedb msoverview DESTINATION bin)
......@@ -697,6 +683,25 @@ install(
add_subdirectory(docs)
option(ENABLE_TRACY_PROFILING "Enables compilation with the Tracy profiler" OFF)
if(ENABLE_TRACY_PROFILING)
option(TRACY_STATIC "" OFF)
option(TRACY_ENABLE "" ON)
option(TRACY_ON_DEMAND "" ON)
FetchContent_Declare(
tracy
GIT_REPOSITORY https://github.com/wolfpld/tracy.git
GIT_TAG v0.11.1
GIT_SHALLOW TRUE
GIT_PROGRESS TRUE)
FetchContent_MakeAvailable(tracy)
target_link_libraries(DP3_OBJ TracyClient)
endif(ENABLE_TRACY_PROFILING)
if(BUILD_TESTING)
include(CTest)
......@@ -704,9 +709,8 @@ if(BUILD_TESTING)
aartfaacreader/test/unit/tAntennaConfig.cc
antennaflagger/test/unit/tFlagger.cc
base/test/runtests.cc
base/test/unit/tAartfaacSubtableWriter.cc
base/test/unit/tBaselineSelection.cc
base/test/unit/tBDABuffer.cc
base/test/unit/tBdaBuffer.cc
base/test/unit/tDP3.cc
base/test/unit/tDPBuffer.cc
base/test/unit/tDPInfo.cc
......@@ -717,6 +721,7 @@ if(BUILD_TESTING)
base/test/unit/tSimulate.cc
base/test/unit/tSimulator.cc
base/test/unit/tSourceDBUtil.cc
base/test/unit/tSubtableWriter.cc
base/test/unit/tTelescope.cc
base/test/unit/tUvwCalculator.cc
common/test/unit/fixtures/fSkymodel.cc
......@@ -735,13 +740,16 @@ if(BUILD_TESTING)
ddecal/test/unit/tLinearSolvers.cc
ddecal/test/unit/tLLSSolver.cc
ddecal/test/unit/tRotationConstraint.cc
ddecal/test/unit/tRotationAndDiagonalConstraint.cc
ddecal/test/unit/tSmoothnessConstraint.cc
ddecal/test/unit/tSettings.cc
ddecal/test/unit/tSolutionResampler.cc
ddecal/test/unit/tSolveData.cc
ddecal/test/unit/tSolverBaseMatrix.cc
ddecal/test/unit/tSolverFactory.cc
ddecal/test/unit/tSolvers.cc
ddecal/test/unit/tSolverTools.cc
ddecal/test/unit/tSolutionWriter.cc
ddecal/test/unit/tTECConstraint.cc
parmdb/test/unit/tSkymodelToSourceDB.cc
parmdb/test/unit/tSourceDB.cc
......@@ -759,11 +767,13 @@ if(BUILD_TESTING)
steps/test/unit/tBdaExpander.cc
steps/test/unit/tBdaGroupPredict.cc
steps/test/unit/tBDAResultStep.cc
steps/test/unit/tClipper.cc
steps/test/unit/tCounter.cc
steps/test/unit/tDDECal.cc
steps/test/unit/tDemixer.cc
steps/test/unit/tDummyStep.cc
steps/test/unit/tFilter.cc
steps/test/unit/tFlagTransfer.cc
steps/test/unit/tGainCal.cc
steps/test/unit/tH5ParmPredict.cc
steps/test/unit/tIDGImager.cc
......@@ -773,6 +783,7 @@ if(BUILD_TESTING)
steps/test/unit/tMSBDAReader.cc
steps/test/unit/tMSBDAWriter.cc
steps/test/unit/tMsColumnReader.cc
steps/test/unit/tMSReader.cc
steps/test/unit/tMSUpdater.cc
steps/test/unit/tMSWriter.cc
steps/test/unit/tNullStokes.cc
......@@ -787,7 +798,8 @@ if(BUILD_TESTING)
steps/test/unit/tStationAdder.cc
steps/test/unit/tStepCommon.cc
steps/test/unit/tUpsample.cc
steps/test/unit/tUVWFlagger.cc)
steps/test/unit/tUVWFlagger.cc
steps/test/unit/tWGridderPredict.cc)
if(HAVE_IDG)
list(APPEND TEST_FILENAMES steps/test/unit/tIDGPredict.cc)
endif()
......@@ -871,9 +883,6 @@ if(BUILD_TESTING)
set(SLOW_TESTS
idgpredict/process
idgpredict/process_beam
msbdawriter/process_simple
msbdawriter/create_default_subtables
msbdawriter/different_bda_intervals
solvers/scalar
solvers/scalar_normaleq
solvers/diagonal
......@@ -882,16 +891,26 @@ if(BUILD_TESTING)
solvers/full_jones
solvers/iterative_scalar
solvers/iterative_scalar_dd_intervals
solvers/iterative_uni_scalar_dd_intervals
solvers/iterative_duo_scalar_dd_intervals
solvers/iterative_diagonal
solvers/iterative_diagonal_dd_intervals
solvers/iterative_duo_diagonal_dd_intervals
solvers/iterative_full_jones
solvers/iterative_full_jones_dd_intervals
solvers/hybrid
solvers/min_iterations)
if(LIBDIRAC_FOUND AND NOT HAVE_CUDA)
list(APPEND SLOW_TESTS solvers/lbfgs_scalar solvers/lbfgs_diagonal
solvers/lbfgs_full_jones)
list(
APPEND
SLOW_TESTS
solvers/lbfgs_scalar
solvers/lbfgs_diagonal
solvers/lbfgs_full_jones
solvers/lbfgs_bounded_scalar
solvers/lbfgs_bounded_diagonal
solvers/lbfgs_bounded_full_jones)
endif()
foreach(TEST ${SLOW_TESTS})
......@@ -942,7 +961,7 @@ if(BUILD_TESTING)
configure_file(${CMAKE_SOURCE_DIR}/scripts/test/testconfig.py.in
testconfig.py)
configure_file(${CMAKE_SOURCE_DIR}/scripts/test/utils.py.in utils.py)
configure_file(${CMAKE_SOURCE_DIR}/scripts/test/utils.py utils.py COPYONLY)
# The 'source' symbolic link simplifies running the tests manually inside
# ${CMAKE_CURRENT_BINARY_DIR}: It allows using 'source/tApplyBeam.sh' instead
......
......@@ -45,7 +45,7 @@ set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT)
set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "amd64")
set(CPACK_DEBIAN_PACKAGE_DEPENDS
"aoflagger (>= 3.0.1),\
everybeam (>= 0.5.4), everybeam (<< 0.6.0),\
everybeam (>= 0.5.8), everybeam (<< 0.7.0),\
idg-api (>= 0.8)")
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "deb-packages@astron.nl")
set(CPACK_DEBIAN_PACKAGE_SECTION "science")
......
......@@ -6,7 +6,7 @@ The DP3 documentation can be found at: https://dp3.readthedocs.org
This repository is a continuation of the one at svn.astron.nl/LOFAR. In particular, it has branched off at LOFAR Release 3.2 (Sept 2018). The version of DP3 that is in the ASTRON repository is no longer maintained.
## Installation
Some non-standard dependencies of this project are: armadillo, boost, boost-python, casacore, hdf5, aoflagger, and EveryBeam. See the Dockerfiles [`docker/ubuntu_20_04_base`](docker/ubuntu_20_04_base) and/or [`docker/ubuntu_22_04_base`](docker/ubuntu_22_04_base) as examples.
Some non-standard dependencies of this project are: armadillo, boost, boost-python, casacore, hdf5, aoflagger, and EveryBeam. See the Dockerfiles [`docker/ubuntu_22_04_base`](docker/ubuntu_22_04_base) and/or [`docker/ubuntu_24_04_base`](docker/ubuntu_24_04_base) as examples.
Typical installation commands:
```
......
......@@ -6,22 +6,34 @@
#include "BaselineSelection.h"
#include <vector>
#include <boost/algorithm/string.hpp>
#include <casacore/casa/Utilities/Regex.h>
#include <casacore/casa/Arrays/Matrix.h>
#include <casacore/casa/Utilities/Regex.h>
#include <casacore/casa/version.h>
#include <casacore/ms/MeasurementSets/MeasurementSet.h>
#include <casacore/ms/MeasurementSets/MSAntenna.h>
#include <casacore/ms/MeasurementSets/MSAntennaColumns.h>
#include <casacore/ms/MSSel/MSAntennaGram.h>
#include <casacore/tables/Tables/ScaColDesc.h>
#include <casacore/tables/Tables/SetupNewTab.h>
#include <casacore/tables/Tables/Table.h>
#include <aocommon/logger.h>
#include "../common/BaselineSelect.h"
#include "../common/ParameterSet.h"
#include "../common/ParameterValue.h"
#include "../common/StreamUtil.h"
#include <vector>
#include <aocommon/logger.h>
using casacore::IPosition;
using casacore::Matrix;
using casacore::MS;
using casacore::MSAntenna;
using casacore::MSAntennaParse;
using casacore::SetupNewTable;
using casacore::Table;
using dp3::common::operator<<;
using aocommon::Logger;
......@@ -29,12 +41,43 @@ using aocommon::Logger;
namespace dp3 {
namespace base {
LogAntennaParseErrors::LogAntennaParseErrors()
: old_handler_(MSAntennaParse::thisMSAErrorHandler) {
// The new handler logs all errors as warnings and does not throw exceptions.
class ErrorHandler : public casacore::MSSelectionErrorHandler {
public:
ErrorHandler() = default;
~ErrorHandler() override = default;
void reportError(const char* token,
const casacore::String message) override {
Logger::Warn << message << token << '\n';
}
};
// This syntax works both when ErrorHandlerPointer is a raw pointer
// (casacore < 3.1.2) and when it's a smart pointer.
MSAntennaParse::thisMSAErrorHandler = ErrorHandlerPointer(new ErrorHandler());
}
LogAntennaParseErrors::~LogAntennaParseErrors() {
#if CASACORE_MAJOR_VERSION < 3 || \
(CASACORE_MAJOR_VERSION == 3 && \
(CASACORE_MINOR_VERSION == 0 || \
(CASACORE_MINOR_VERSION == 1 && CASACORE_PATCH_VERSION < 2)))
// In casacore < 3.1.2 thisMSAErrorHandler is a raw pointer,
// From casacore 3.1.2. it's a CountedPtr or another smart pointer.
delete MSAntennaParse::thisMSAErrorHandler;
#endif
MSAntennaParse::thisMSAErrorHandler = old_handler_;
}
BaselineSelection::BaselineSelection() {}
BaselineSelection::BaselineSelection(const common::ParameterSet& parset,
const string& prefix, bool minmax,
const string& defaultCorrType,
const string& defaultBaseline)
const std::string& prefix, bool minmax,
const std::string& defaultCorrType,
const std::string& defaultBaseline)
: itsStrBL(parset.getString(prefix + "baseline", defaultBaseline)),
itsCorrType(parset.getString(prefix + "corrtype", defaultCorrType)),
itsRangeBL(
......@@ -71,19 +114,19 @@ void BaselineSelection::show(std::ostream& os,
Matrix<bool> BaselineSelection::apply(const DPInfo& info) const {
// Size and initialize the selection matrix.
int nant = info.antennaNames().size();
Matrix<bool> selectBL(nant, nant, true);
const std::size_t n_antennas = info.antennaNames().size();
Matrix<bool> selection(n_antennas, n_antennas, true);
// Apply the various parts if given.
if (!itsStrBL.empty() && itsStrBL != "[]") {
handleBL(selectBL, info);
handleBL(selection, info);
}
if (!itsCorrType.empty()) {
handleCorrType(selectBL);
handleCorrType(selection);
}
if (!itsRangeBL.empty()) {
handleLength(selectBL, info);
handleLength(selection, info);
}
return selectBL;
return selection;
}
casacore::Vector<bool> BaselineSelection::applyVec(const DPInfo& info) const {
......@@ -108,14 +151,14 @@ void BaselineSelection::handleBL(Matrix<bool>& selectBL,
bool mssel = true;
if (itsStrBL[0] == '[') {
std::string::size_type rb = itsStrBL.find(']');
if (rb == string::npos)
if (rb == std::string::npos)
throw std::runtime_error("Baseline selection " + itsStrBL +
" has no ending ]");
if (rb == itsStrBL.size() - 1) {
mssel = false;
} else {
std::string::size_type lb = itsStrBL.find('[', 1);
mssel = (lb == string::npos || lb > rb);
mssel = (lb == std::string::npos || lb > rb);
}
}
if (!mssel) {
......@@ -125,25 +168,79 @@ void BaselineSelection::handleBL(Matrix<bool>& selectBL,
info.antennaNames()));
} else {
// Specified in casacore's MSSelection format.
string msName = info.msName();
if (msName.empty()) throw std::runtime_error("Empty measurement set name");
std::ostringstream os;
Matrix<bool> sel(common::BaselineSelect::convert(msName, itsStrBL, os));
// Show possible messages about unknown stations.
if (!os.str().empty()) {
Logger::Warn << os.str();
}
// The resulting matrix can be smaller because new stations might have
// been added that are not present in the MS's ANTENNA table.
if (sel.nrow() == selectBL.nrow()) {
selectBL = selectBL && sel;
} else {
// Only and the subset.
Matrix<bool> selBL =
selectBL(IPosition(2, 0), IPosition(2, sel.nrow() - 1));
selBL = selBL && sel;
}
selectBL = selectBL && HandleMsSelection(info);
}
}
Matrix<bool> BaselineSelection::HandleMsSelection(const DPInfo& info) const {
const casacore::String& antenna1_string = MS::columnName(MS::ANTENNA1);
const casacore::String& antenna2_string = MS::columnName(MS::ANTENNA2);
// Create a temporary MSAntenna table in memory for parsing purposes.
SetupNewTable antenna_setup(casacore::String(),
MSAntenna::requiredTableDesc(), Table::New);
Table antenna_table(antenna_setup, Table::Memory, info.antennaNames().size());
MSAntenna ms_antenna(antenna_table);
casacore::MSAntennaColumns ms_antenna_columns(ms_antenna);
for (size_t i = 0; i < info.antennaNames().size(); ++i) {
ms_antenna_columns.name().put(i, info.antennaNames()[i]);
ms_antenna_columns.positionMeas().put(i, info.antennaPos()[i]);
}
// Create a temporary table holding the antenna numbers of the baselines.
casacore::TableDesc table_description;
table_description.addColumn(casacore::ScalarColumnDesc<int>(antenna1_string));
table_description.addColumn(casacore::ScalarColumnDesc<int>(antenna2_string));
SetupNewTable setup(casacore::String(), table_description, Table::New);
Table table(setup, Table::Memory, info.nbaselines());
casacore::ScalarColumn<int> column1(table, antenna1_string);
casacore::ScalarColumn<int> column2(table, antenna2_string);
for (size_t i = 0; i < info.nbaselines(); ++i) {
column1.put(i, info.getAnt1()[i]);
column2.put(i, info.getAnt2()[i]);
}
// Do the selection using the temporary tables.
casacore::TableExprNode antenna_node1 = table.col(antenna1_string);
casacore::TableExprNode antenna_node2 = table.col(antenna2_string);
casacore::Vector<int> selected_antennas1;
casacore::Vector<int> selected_antennas2;
{
// Overwrite the error handler to ignore errors for unknown antennas.
dp3::base::LogAntennaParseErrors ignore_antenna_errors;
// Parse the selection.
// 'selected_antennas[12]' will contain the selected antenna indices.
// 'selected_baselines' becomes an n x 2 matrix with the antenna indices
// for each selected baseline.
Matrix<int> selected_baselines;
casacore::TableExprNode selection_node =
casacore::msAntennaGramParseCommand(
antenna_table, antenna_node1, antenna_node2, itsStrBL,
selected_antennas1, selected_antennas2, selected_baselines);
// msAntennaGramParseCommand may put negative indices (with unknown
// semantics) into 'selected_antennas[12]' and 'selected_baselines'.
// -> Apply 'selection_node' and extract the correct antenna indices.
Table selection_table = table(selection_node);
selected_antennas1 =
casacore::ScalarColumn<int>(selection_table, antenna1_string)
.getColumn();
selected_antennas2 =
casacore::ScalarColumn<int>(selection_table, antenna2_string)
.getColumn();
}
// Convert selected_antennas[12] to a selection matrix.
Matrix<bool> selection(info.nantenna(), info.nantenna(), false);
for (size_t bl = 0; bl < selected_antennas1.size(); ++bl) {
const int a1 = selected_antennas1[bl];
const int a2 = selected_antennas2[bl];
selection(a1, a2) = true;
selection(a2, a1) = true;
}
return selection;
}
Matrix<bool> BaselineSelection::handleBLVector(
......@@ -162,7 +259,7 @@ Matrix<bool> BaselineSelection::handleBLVector(
"it's more clear to use [[ant1],[ant2]]\n";
}
for (unsigned int i = 0; i < pairs.size(); ++i) {
std::vector<string> bl = pairs[i].getStringVector();
std::vector<std::string> bl = pairs[i].getStringVector();
if (bl.size() == 1) {
// Turn the given antenna name pattern into a regex.
casacore::Regex regex(casacore::Regex::fromPattern(bl[0]));
......
// BaselineSelection.h: Class to handle the baseline selection
// Copyright (C) 2020 ASTRON (Netherlands Institute for Radio Astronomy)
// Copyright (C) 2024 ASTRON (Netherlands Institute for Radio Astronomy)
// SPDX-License-Identifier: GPL-3.0-or-later
/// @file
/// @brief Class to handle the baseline selection
/// @author Ger van Diepen
#ifndef DPPP_BASELINESELECTION_H
#define DPPP_BASELINESELECTION_H
#ifndef DP3_BASELINESELECTION_H_
#define DP3_BASELINESELECTION_H_
#include <dp3/base/DPInfo.h>
#include <casacore/casa/Arrays/Vector.h>
#include <casacore/casa/Arrays/Matrix.h>
#include <casacore/ms/MSSel/MSAntennaParse.h>
#include <casacore/ms/MSSel/MSSelectionErrorHandler.h>
namespace dp3 {
namespace common {
......@@ -22,6 +24,24 @@ class ParameterValue;
namespace base {
/// RAII object for temporarily overriding MSAntennaParse::thisMSAErrorHandler.
class LogAntennaParseErrors {
public:
/// Constructor. Set MSAntennaParse::thisMSAErrorHandler to a handler
/// which forwards all errors as warnings to the Logger.
LogAntennaParseErrors();
/// Destructor. Restores the original MSAntennaParse::thisMSAErrorHandler
/// and cleans up the temporary installed handler.
~LogAntennaParseErrors();
private:
// Different casacore versions use different (smart) pointer types.
using ErrorHandlerPointer =
decltype(casacore::MSAntennaParse::thisMSAErrorHandler);
ErrorHandlerPointer old_handler_;
};
/// \brief Class containing a few static functions to parse a baseline selection
/// string.
class BaselineSelection {
......@@ -38,17 +58,17 @@ class BaselineSelection {
/// <li> minbl: minimum baseline length (in m); only if minmax=true
/// <li> maxbl: maximum baseline length (in m); only if minmax=true
/// </ul>
BaselineSelection(const common::ParameterSet&, const string& prefix,
BaselineSelection(const common::ParameterSet&, const std::string& prefix,
bool minmax = false,
const string& defaultCorrType = string(),
const string& defaultBaseline = string());
const std::string& defaultCorrType = std::string(),
const std::string& defaultBaseline = std::string());
/// Is there any selection?
bool hasSelection() const;
/// Show the parameters.
/// Optional extra blanks can be put before the value.
void show(std::ostream& os, const std::string& blanks = string()) const;
void show(std::ostream& os, const std::string& blanks = std::string()) const;
/// Form the selection matrix telling for each baseline if it is selected.
/// If no selection is made, all values in the matrix are true.
......@@ -62,6 +82,9 @@ class BaselineSelection {
/// Convert the baseline selection string.
void handleBL(casacore::Matrix<bool>& selectBL, const DPInfo& info) const;
/// Handle an MSSelection string.
casacore::Matrix<bool> HandleMsSelection(const DPInfo& info) const;
/// Handle a vector of baseline specifications.
casacore::Matrix<bool> handleBLVector(
const common::ParameterValue& pvBL,
......@@ -73,8 +96,8 @@ class BaselineSelection {
/// Handle the baseline length selection.
void handleLength(casacore::Matrix<bool>& selectBL, const DPInfo& info) const;
string itsStrBL;
string itsCorrType;
std::string itsStrBL;
std::string itsCorrType;
std::vector<double> itsRangeBL;
};
......
// Copyright (C) 2020 ASTRON (Netherlands Institute for Radio Astronomy)
// SPDX-License-Identifier: GPL-3.0-or-later
#include <dp3/base/BDABuffer.h>
#include <dp3/base/BdaBuffer.h>
#include <algorithm>
#include <cassert>
#include <limits>
namespace dp3 {
namespace base {
BDABuffer::Row::Row(double _time, double _interval, double _exposure,
BdaBuffer::Row::Row(double _time, double _interval, double _exposure,
common::rownr_t _row_nr, std::size_t _baseline_nr,
std::size_t _n_channels, std::size_t _n_correlations,
std::complex<float>* _data, bool* _flags, float* _weights,
bool* _full_res_flags, const double* const _uvw)
std::size_t _offset, const double* const _uvw)
: time(_time),
interval(_interval),
exposure(_exposure),
......@@ -21,145 +21,77 @@ BDABuffer::Row::Row(double _time, double _interval, double _exposure,
baseline_nr(_baseline_nr),
n_channels(_n_channels),
n_correlations(_n_correlations),
data(_data),
flags(_flags),
weights(_weights),
full_res_flags(_full_res_flags),
offset(_offset),
uvw{_uvw ? _uvw[0] : std::numeric_limits<double>::quiet_NaN(),
_uvw ? _uvw[1] : std::numeric_limits<double>::quiet_NaN(),
_uvw ? _uvw[2] : std::numeric_limits<double>::quiet_NaN()} {}
BDABuffer::BDABuffer(const std::size_t pool_size, const Fields& fields)
BdaBuffer::BdaBuffer(const std::size_t pool_size, const common::Fields& fields)
: data_(),
flags_(),
weights_(),
full_res_flags_(),
rows_(),
original_capacity_(pool_size),
remaining_capacity_(pool_size) {
if (fields.data) {
data_.reserve(remaining_capacity_);
if (fields.Data()) {
data_[""].reserve(remaining_capacity_);
}
if (fields.flags) {
if (fields.Flags()) {
flags_.reserve(remaining_capacity_);
}
if (fields.weights) {
if (fields.Weights()) {
weights_.reserve(remaining_capacity_);
}
if (fields.full_res_flags) {
full_res_flags_.reserve(remaining_capacity_);
}
}
// When copying the memory pools in this copy-constructor, the capacity
// of the new memory pools will equal their size. There is therefore
// no remaining capacity in the new copy.
BDABuffer::BDABuffer(const BDABuffer& other, const Fields& fields,
const Fields& copy_fields)
BdaBuffer::BdaBuffer(const BdaBuffer& other, const common::Fields& fields)
: data_(),
flags_(),
weights_(),
full_res_flags_(),
rows_(),
rows_(other.rows_),
original_capacity_(other.original_capacity_ - other.remaining_capacity_),
remaining_capacity_(0) {
if (fields.data) {
if (copy_fields.data) data_ = other.data_;
data_.resize(original_capacity_);
// aocommon::UVector ensures there is no remaining capacity in a vector copy.
// The assertions below check that aocommon::UVector still has this behavior.
if (fields.Data()) {
data_ = other.data_; // Copy all data buffers.
data_[""]; // Add main data buffer if absent.
// Resize all data buffers.
for (auto it = data_.begin(); it != data_.end(); ++it) {
it->second.resize(original_capacity_);
assert(it->second.capacity() == it->second.size());
}
}
if (fields.flags) {
if (copy_fields.flags) flags_ = other.flags_;
if (fields.Flags()) {
flags_ = other.flags_;
flags_.resize(original_capacity_);
assert(flags_.capacity() == flags_.size());
}
if (fields.weights) {
if (copy_fields.weights) weights_ = other.weights_;
if (fields.Weights()) {
weights_ = other.weights_;
weights_.resize(original_capacity_);
assert(weights_.capacity() == weights_.size());
}
if (fields.full_res_flags) {
if (copy_fields.full_res_flags) full_res_flags_ = other.full_res_flags_;
full_res_flags_.resize(original_capacity_);
}
CopyRows(other.rows_);
}
void BDABuffer::CopyRows(const std::vector<BDABuffer::Row>& existing_rows) {
std::complex<float>* row_data = data_.empty() ? nullptr : data_.data();
bool* row_flags = flags_.empty() ? nullptr : flags_.data();
float* row_weights = weights_.empty() ? nullptr : weights_.data();
bool* row_full_res_flags =
full_res_flags_.empty() ? nullptr : full_res_flags_.data();
// Note: 'existing_rows' can reference 'rows_' !
std::vector<Row> new_rows;
new_rows.reserve(existing_rows.size());
for (const Row& row : existing_rows) {
new_rows.emplace_back(row.time, row.interval, row.exposure, row.row_nr,
row.baseline_nr, row.n_channels, row.n_correlations,
row_data, row_flags, row_weights, row_full_res_flags,
row.uvw);
const std::size_t row_size = row.GetDataSize();
if (row_data) row_data += row_size;
if (row_flags) row_flags += row_size;
if (row_weights) row_weights += row_size;
if (row_full_res_flags) row_full_res_flags += row_size;
}
rows_ = std::move(new_rows);
}
void BDABuffer::SetFields(const Fields& fields) {
if ((fields.data == !data_.empty()) && (fields.flags == !flags_.empty()) &&
(fields.weights == !weights_.empty()) &&
(fields.full_res_flags == !full_res_flags_.empty())) {
return;
}
if (fields.data) {
data_.resize(original_capacity_);
} else {
data_.clear();
data_.shrink_to_fit();
void BdaBuffer::Clear() {
for (auto data_it = data_.begin(); data_it != data_.end(); ++data_it) {
data_it->second.clear();
}
if (fields.flags) {
flags_.resize(original_capacity_);
} else {
flags_.clear();
flags_.shrink_to_fit();
}
if (fields.weights) {
weights_.resize(original_capacity_);
} else {
weights_.clear();
weights_.shrink_to_fit();
}
if (fields.full_res_flags) {
full_res_flags_.resize(original_capacity_);
} else {
full_res_flags_.clear();
full_res_flags_.shrink_to_fit();
}
CopyRows(rows_);
}
void BDABuffer::Clear() {
data_.clear();
flags_.clear();
weights_.clear();
full_res_flags_.clear();
rows_.clear();
remaining_capacity_ = original_capacity_;
}
std::size_t BDABuffer::GetNumberOfElements() const {
return original_capacity_ - remaining_capacity_;
}
bool BDABuffer::AddRow(double time, double interval, double exposure,
bool BdaBuffer::AddRow(double time, double interval, double exposure,
std::size_t baseline_nr, std::size_t n_channels,
std::size_t n_correlations,
const std::complex<float>* const data,
const bool* const flags, const float* const weights,
const bool* const full_res_flags,
const double* const uvw) {
if (!rows_.empty() &&
TimeIsLessEqual(time + interval / 2,
......@@ -171,21 +103,22 @@ bool BDABuffer::AddRow(double time, double interval, double exposure,
return false;
}
remaining_capacity_ -= n_elements;
std::complex<float>* row_data = nullptr;
bool* row_flags = nullptr;
float* row_weights = nullptr;
bool* row_full_res_flags = nullptr;
if (data_.capacity() > 0) {
row_data = data_.end();
if (data) {
data_.insert(data_.end(), data, data + n_elements);
std::size_t offset = 0;
for (auto data_it = data_.begin(); data_it != data_.end(); ++data_it) {
offset = data_it->second.size();
aocommon::UVector<std::complex<float>>& data_vector = data_it->second;
if (data_it->first == "" && data) {
data_vector.insert(data_vector.end(), data, data + n_elements);
} else {
const float kNaN = std::numeric_limits<float>::quiet_NaN();
data_.insert(data_.end(), n_elements, {kNaN, kNaN});
data_vector.insert(data_vector.end(), n_elements, {0.0, 0.0});
}
}
if (flags_.capacity() > 0) {
row_flags = flags_.end();
if (offset == 0) {
offset = flags_.size();
} else {
assert(offset == flags_.size());
}
if (flags) {
flags_.insert(flags_.end(), flags, flags + n_elements);
} else {
......@@ -193,56 +126,79 @@ bool BDABuffer::AddRow(double time, double interval, double exposure,
}
}
if (weights_.capacity() > 0) {
row_weights = weights_.end();
if (weights) {
weights_.insert(weights_.end(), weights, weights + n_elements);
if (offset == 0) {
offset = weights_.size();
} else {
const float kNaN = std::numeric_limits<float>::quiet_NaN();
weights_.insert(weights_.end(), n_elements, kNaN);
assert(offset == weights_.size());
}
}
if (full_res_flags_.capacity() > 0) {
row_full_res_flags = full_res_flags_.end();
if (full_res_flags) {
full_res_flags_.insert(full_res_flags_.end(), full_res_flags,
full_res_flags + n_elements);
if (weights) {
weights_.insert(weights_.end(), weights, weights + n_elements);
} else {
full_res_flags_.insert(full_res_flags_.end(), n_elements, false);
weights_.insert(weights_.end(), n_elements, 0.0);
}
}
const common::rownr_t row_nr = rows_.empty() ? 0 : rows_.back().row_nr + 1;
rows_.emplace_back(time, interval, exposure, row_nr, baseline_nr, n_channels,
n_correlations, row_data, row_flags, row_weights,
row_full_res_flags, uvw);
n_correlations, offset, uvw);
return true;
}
void BDABuffer::SetBaseRowNr(common::rownr_t row_nr) {
void BdaBuffer::SetBaseRowNr(common::rownr_t row_nr) {
for (Row& row : rows_) {
row.row_nr = row_nr;
++row_nr;
}
}
bool BDABuffer::Row::IsMetadataEqual(const BDABuffer::Row& other) const {
const std::complex<float>* BdaBuffer::GetData(const std::string& name) const {
const auto found = data_.find(name);
if (found != data_.end() && !found->second.empty()) {
return found->second.data();
} else {
return nullptr;
}
}
std::complex<float>* BdaBuffer::GetData(const std::string& name) {
const auto found = data_.find(name);
if (found != data_.end() && !found->second.empty()) {
return found->second.data();
} else {
return nullptr;
}
}
const std::complex<float>* BdaBuffer::GetData(std::size_t row,
const std::string& name) const {
const std::complex<float>* data = GetData(name);
if (data) data += rows_[row].offset;
return data;
}
std::complex<float>* BdaBuffer::GetData(std::size_t row,
const std::string& name) {
std::complex<float>* data = GetData(name);
if (data) data += rows_[row].offset;
return data;
}
bool BdaBuffer::Row::IsMetadataEqual(const BdaBuffer::Row& other) const {
for (std::size_t i = 0; i < 3; ++i) {
if (std::isnan(uvw[i])) {
if (!std::isnan(other.uvw[i])) return false;
} else {
if (!BDABuffer::TimeIsEqual(uvw[i], other.uvw[i])) return false;
if (!BdaBuffer::TimeIsEqual(uvw[i], other.uvw[i])) return false;
}
}
return ((BDABuffer::TimeIsEqual(time, other.time)) &&
(BDABuffer::TimeIsEqual(interval, other.interval)) &&
(BDABuffer::TimeIsEqual(exposure, other.exposure)) &&
return ((BdaBuffer::TimeIsEqual(time, other.time)) &&
(BdaBuffer::TimeIsEqual(interval, other.interval)) &&
(BdaBuffer::TimeIsEqual(exposure, other.exposure)) &&
(row_nr == other.row_nr) && (baseline_nr == other.baseline_nr) &&
(n_channels == other.n_channels) &&
(n_correlations == other.n_correlations));
}
bool BDABuffer::IsMetadataEqual(const BDABuffer& other) const {
bool BdaBuffer::IsMetadataEqual(const BdaBuffer& other) const {
if (GetRows().size() == other.GetRows().size()) {
auto this_row = GetRows().begin();
auto other_row = other.GetRows().begin();
......
......@@ -30,7 +30,28 @@ CalType StringToCalType(const std::string& mode);
/// Convert CalType to a string
std::string ToString(CalType caltype);
constexpr size_t GetNPolarizations(CalType cal_type) {
switch (cal_type) {
case CalType::kDiagonal:
case CalType::kDiagonalPhase:
case CalType::kDiagonalAmplitude:
return 2;
case CalType::kFullJones:
case CalType::kRotationAndDiagonal:
case CalType::kRotation:
return 4;
case CalType::kScalar:
case CalType::kScalarAmplitude:
case CalType::kScalarPhase:
case CalType::kTecAndPhase:
case CalType::kTec:
case CalType::kTecScreen:
return 1;
}
return 0;
}
} // namespace base
} // namespace dp3
#endif // DP3_CALTYPE_H
\ No newline at end of file
#endif // DP3_CALTYPE_H
......@@ -6,6 +6,7 @@
#include <dp3/base/DP3.h>
#include <aocommon/checkblas.h>
#include <aocommon/logger.h>
#include <aocommon/threadpool.h>
......@@ -15,6 +16,7 @@
#include <dp3/base/DPInfo.h>
#include "ProgressMeter.h"
#include "SkyModelCache.h"
#include "Version.h"
#include "../steps/AntennaFlagger.h"
#include "../steps/AOFlaggerStep.h"
......@@ -22,7 +24,7 @@
#include "../steps/ApplyCal.h"
#include "../steps/Averager.h"
#include "../steps/BDAAverager.h"
#include "../steps/BDAExpander.h"
#include "../steps/BdaExpander.h"
#include "../steps/BdaGroupPredict.h"
#include "../steps/Clipper.h"
#include "../steps/Counter.h"
......@@ -40,6 +42,7 @@
#include "../steps/MsColumnReader.h"
#include "../steps/MSUpdater.h"
#include "../steps/MSWriter.h"
#include "../steps/WSCleanWriter.h"
#include "../steps/NullStep.h"
#include "../steps/NullStokes.h"
#include "../steps/PhaseShift.h"
......@@ -52,6 +55,8 @@
#include "../steps/StationAdder.h"
#include "../steps/UVWFlagger.h"
#include "../steps/Upsample.h"
#include "../steps/WGridderPredict.h"
#include "../steps/FlagTransfer.h"
#include "../pythondp3/PyStep.h"
......@@ -157,9 +162,9 @@ std::shared_ptr<Step> MakeSingleStep(const std::string& type,
} else if (type == "averager" || type == "average" || type == "squash") {
step = std::make_shared<steps::Averager>(parset, prefix);
} else if (type == "bdaaverage" || type == "bdaaverager") {
step = std::make_shared<steps::BDAAverager>(parset, prefix);
step = std::make_shared<steps::BdaAverager>(parset, prefix);
} else if (type == "bdaexpander") {
step = std::make_shared<steps::BDAExpander>(prefix);
step = std::make_shared<steps::BdaExpander>(prefix);
} else if (type == "madflagger" || type == "madflag") {
step = std::make_shared<steps::MadFlagger>(parset, prefix);
} else if (type == "preflagger" || type == "preflag") {
......@@ -194,6 +199,8 @@ std::shared_ptr<Step> MakeSingleStep(const std::string& type,
step = std::make_shared<steps::NullStokes>(parset, prefix);
} else if (type == "predict") {
step = std::make_shared<steps::Predict>(parset, prefix, inputType);
} else if (type == "wgridderpredict") {
step = std::make_shared<steps::WGridderPredict>(parset, prefix);
} else if (type == "idgpredict") {
step = std::make_shared<steps::IDGPredict>(parset, prefix);
} else if (type == "idgimager") {
......@@ -222,6 +229,10 @@ std::shared_ptr<Step> MakeSingleStep(const std::string& type,
}
} else if (type == "null") {
step = std::make_shared<steps::NullStep>();
} else if (type == "wscleanwriter") {
step = std::make_shared<steps::WSCleanWriter>(parset, prefix);
} else if (type == "flagtransfer") {
step = std::make_shared<steps::FlagTransfer>(parset, prefix);
}
return step;
}
......@@ -258,7 +269,7 @@ dp3::common::Fields SetChainProvidedFields(std::shared_ptr<Step> first_step,
return provided_fields;
}
void Execute(const string& parsetName, int argc, char* argv[]) {
void Execute(const std::string& parsetName, int argc, char* argv[]) {
casacore::Timer timer;
common::NSTimer nstimer;
nstimer.start();
......@@ -271,8 +282,10 @@ void Execute(const string& parsetName, int argc, char* argv[]) {
// Immediately initialize logger such that output will follow requested
// verbosity
aocommon::Logger::SetVerbosity(aocommon::StringToLogVerbosityLevel(
parset.getString("verbosity", "normal")));
if (parset.isDefined("verbosity")) {
aocommon::Logger::SetVerbosity(aocommon::StringToLogVerbosityLevel(
parset.getString("verbosity", "normal")));
}
aocommon::Logger::SetLogTime(parset.getBool("time_logging", false));
aocommon::Logger::SetLogMemory(parset.getBool("memory_logging", false));
......@@ -320,8 +333,9 @@ void Execute(const string& parsetName, int argc, char* argv[]) {
aocommon::Logger::Warn
<< "\n*** WARNING: the following parset keywords were not used ***"
<< "\n maybe they are misspelled"
<< "\n ";
for (const std::string& s : unused) aocommon::Logger::Warn << s;
<< "\n";
for (const std::string& s : unused)
aocommon::Logger::Warn << " - " << s << '\n';
aocommon::Logger::Warn << '\n';
if (checkparset != 0)
throw std::runtime_error("Unused parset keywords found");
......@@ -387,6 +401,60 @@ void Execute(const string& parsetName, int argc, char* argv[]) {
// The destructors are called automatically at this point.
}
void ShowUsage() {
aocommon::Logger::Info
<< "Usage: DP3 [-v] [parsetfile] [parsetkeys...]\n"
" parsetfile: a file containing one parset key=value pair per line\n"
" parsetkeys: any number of parset key=value pairs, e.g. "
"msin=my.MS\n\n"
"If both a file and command-line keys are specified, the keys on the "
"command\n"
"line override those in the file.\n"
"If no arguments are specified, the program tries to read "
"\"DP3.parset\",\n"
"\"NDPPP.parset\" or \"DPPP.parset\" as a default.\n"
"-v will show version info and exit.\n"
"Documentation is at: https://dp3.readthedocs.io\n";
}
void ExecuteFromCommandLine(int argc, char* argv[]) {
check_openblas_multithreading();
// Get the name of the parset file.
if (argc > 1) {
string param = argv[1];
if (param == "--help" || param == "-help" || param == "-h" ||
param == "--usage" || param == "-usage") {
ShowUsage();
return;
} else if (param == "-v" || param == "--version") {
aocommon::Logger::Info << DP3Version::AsString(true) << '\n';
return;
}
}
std::string parsetName;
if (argc > 1 && std::string(argv[1]).find('=') == std::string::npos) {
// First argument is parset name (except if it's a key-value pair)
parsetName = argv[1];
} else if (argc == 1) {
// No arguments given: try to load [N]DPPP.parset
if (std::filesystem::exists("DP3.parset")) {
parsetName = "DP3.parset";
} else if (std::filesystem::exists("DPPP.parset")) {
parsetName = "DPPP.parset";
} else if (std::filesystem::exists("NDPPP.parset")) {
parsetName = "NDPPP.parset";
} else { // No default file, show usage and exit
ShowUsage();
return;
}
}
// Execute the parset file.
Execute(parsetName, argc, argv);
}
std::shared_ptr<InputStep> MakeMainSteps(const common::ParameterSet& parset) {
std::shared_ptr<InputStep> input_step = InputStep::CreateReader(parset);
std::shared_ptr<Step> last_step = input_step;
......@@ -446,7 +514,7 @@ std::shared_ptr<Step> MakeStepsFromParset(const common::ParameterSet& parset,
bool terminateChain,
Step::MsType initial_step_output) {
std::string msName = input_ms_name;
const std::vector<string> stepNames =
const std::vector<std::string> stepNames =
parset.getStringVector(prefix + step_names_key);
std::shared_ptr<Step> firstStep;
......
......@@ -8,6 +8,7 @@
#include <algorithm>
#include <cassert>
#include <regex>
#include <casacore/casa/version.h>
#include <casacore/casa/BasicSL/Complexfwd.h>
......
// DPInfo.cc: General info about DPPP data processing attributes like averaging
// Copyright (C) 2020 ASTRON (Netherlands Institute for Radio Astronomy)
// SPDX-License-Identifier: GPL-3.0-or-later
//
......@@ -31,7 +30,7 @@ namespace dp3 {
namespace base {
DPInfo::DPInfo(unsigned int n_correlations, unsigned int original_n_channels,
unsigned int start_channel, std::string antenna_set)
std::string antenna_set)
: meta_changed_(false),
ms_name_(),
data_column_name_(MS::columnName(MS::DATA)),
......@@ -39,7 +38,7 @@ DPInfo::DPInfo(unsigned int n_correlations, unsigned int original_n_channels,
weight_column_name_(MS::columnName(MS::WEIGHT_SPECTRUM)),
antenna_set_(std::move(antenna_set)),
n_correlations_(n_correlations),
start_channel_(start_channel),
start_channel_(0),
original_n_channels_(original_n_channels),
n_channels_(original_n_channels),
channel_averaging_factor_(1),
......@@ -54,7 +53,8 @@ DPInfo::DPInfo(unsigned int n_correlations, unsigned int original_n_channels,
resolutions_(1), // can retrieve a first list.
effective_bandwidth_(1),
total_bandwidth_(0.0),
spectral_window_(0) {}
spectral_window_(0),
polarizations_() {}
void DPInfo::setTimes(double first_time, double last_time,
double time_interval) {
......@@ -354,9 +354,10 @@ void DPInfo::update(std::vector<unsigned int>&& timeAvg) {
time_averaging_factors_ = std::move(timeAvg);
}
void DPInfo::update(unsigned int startChan, unsigned int nchan,
const std::vector<unsigned int>& baselines,
bool removeAnt) {
void DPInfo::SelectChannels(unsigned int start_channel,
unsigned int n_channels) {
if (start_channel == 0 && n_channels == n_channels_) return;
if (channel_frequencies_.size() != 1) {
throw std::runtime_error("Channel selection does not support BDA");
}
......@@ -372,45 +373,46 @@ void DPInfo::update(unsigned int startChan, unsigned int nchan,
effective_bandwidth_.front().size() &&
"The number of elements of the channel frequencies and effective "
"bandwidths should be equal.");
if (startChan + nchan > channel_frequencies_.front().size()) {
if (start_channel + n_channels > channel_frequencies_.front().size()) {
throw std::invalid_argument("Channel range is out of bounds.");
}
start_channel_ = startChan;
auto freqs_begin = channel_frequencies_.front().begin() + startChan;
auto widths_begin = channel_widths_.front().begin() + startChan;
auto resol_begin = resolutions_.front().begin() + startChan;
auto effbw_begin = effective_bandwidth_.front().begin() + startChan;
auto freqs_begin = channel_frequencies_.front().begin() + start_channel;
auto widths_begin = channel_widths_.front().begin() + start_channel;
auto resol_begin = resolutions_.front().begin() + start_channel;
auto effbw_begin = effective_bandwidth_.front().begin() + start_channel;
channel_frequencies_.front() =
std::vector<double>(freqs_begin, freqs_begin + nchan);
std::vector<double>(freqs_begin, freqs_begin + n_channels);
channel_widths_.front() =
std::vector<double>(widths_begin, widths_begin + nchan);
resolutions_.front() = std::vector<double>(resol_begin, resol_begin + nchan);
std::vector<double>(widths_begin, widths_begin + n_channels);
resolutions_.front() =
std::vector<double>(resol_begin, resol_begin + n_channels);
effective_bandwidth_.front() =
std::vector<double>(effbw_begin, effbw_begin + nchan);
n_channels_ = nchan;
// Keep only selected baselines.
if (!baselines.empty()) {
std::vector<int> ant1(baselines.size());
std::vector<int> ant2(baselines.size());
for (unsigned int i = 0; i < baselines.size(); ++i) {
ant1[i] = antenna1_[baselines[i]];
ant2[i] = antenna2_[baselines[i]];
}
antenna1_ = std::move(ant1);
antenna2_ = std::move(ant2);
// Clear; they'll be recalculated if needed.
baseline_lengths_.clear();
auto_correlation_indices_.clear();
std::vector<double>(effbw_begin, effbw_begin + n_channels);
// Add the new start channel to an existing start_channel, so MSUpdater can
// still update the correct channel(s) in the original input MS.
start_channel_ += start_channel;
n_channels_ = n_channels;
}
void DPInfo::SelectBaselines(const std::vector<unsigned int>& baselines) {
std::vector<int> ant1(baselines.size());
std::vector<int> ant2(baselines.size());
for (unsigned int i = 0; i < baselines.size(); ++i) {
ant1[i] = antenna1_[baselines[i]];
ant2[i] = antenna2_[baselines[i]];
}
antenna1_ = std::move(ant1);
antenna2_ = std::move(ant2);
// Clear; they'll be recalculated if needed.
baseline_lengths_.clear();
auto_correlation_indices_.clear();
setAntUsed();
// If needed, remove the stations and renumber the baselines.
if (removeAnt) {
removeUnusedAnt();
}
}
void DPInfo::removeUnusedAnt() {
void DPInfo::RemoveUnusedAntennas() {
if (antennas_used_.size() < antenna_map_.size()) {
// First remove stations.
std::vector<std::string> names(antennas_used_.size());
......@@ -431,6 +433,9 @@ void DPInfo::removeUnusedAnt() {
antenna1_[i] = antenna_map_[antenna1_[i]];
antenna2_[i] = antenna_map_[antenna2_[i]];
}
setMetaChanged();
// Now fill the antennas_used_ and antenna_map_ vectors again.
setAntUsed();
// Clear; they'll be recalculated if needed.
......@@ -439,6 +444,15 @@ void DPInfo::removeUnusedAnt() {
}
}
const std::vector<std::string> DPInfo::GetUsedAntennaNames() const {
std::vector<std::string> used_antenna_names;
used_antenna_names.reserve(antennas_used_.size());
for (size_t used_antenna : antennas_used_) {
used_antenna_names.emplace_back(antenna_names_[used_antenna]);
}
return used_antenna_names;
}
const std::vector<double>& DPInfo::getBaselineLengths() const {
// Calculate the baseline lengths if not done yet.
if (baseline_lengths_.empty()) {
......
......@@ -70,6 +70,8 @@ bool estimate(size_t nDirection, size_t nStation, size_t nBaseline,
/// \param[in] robust_nu
/// Robust noise model degrees of freedom, when >30, it is almost Gaussian
/// The remaining input variables are similar to estimate() method above.
/// \param[in] sol_min, sol_max: if a valid range is given, restrict the
/// solutions to fit this range using bound constrained LBFGS
bool estimate(std::size_t n_direction, std::size_t n_station,
std::size_t n_baseline, std::size_t n_channel,
const_cursor<Baseline> baselines,
......@@ -77,8 +79,8 @@ bool estimate(std::size_t n_direction, std::size_t n_station,
std::vector<const_cursor<std::complex<double>>> model,
const_cursor<bool> flag, const_cursor<float> weight,
const_cursor<std::complex<double>> mix, double* unknowns,
std::size_t lbfgs_mem, double robust_nu,
std::size_t max_iter = 50);
std::size_t lbfgs_mem, double robust_nu, const double sol_min,
const double sol_max, std::size_t max_iter = 50);
#endif /* HAVE_LIBDIRAC */
/// Compute a map that contains the index of the unknowns related to the
......
......@@ -463,16 +463,27 @@ bool estimate(std::size_t n_direction, std::size_t n_station,
std::vector<const_cursor<std::complex<double>>> model,
const_cursor<bool> flag, const_cursor<float> weight,
const_cursor<std::complex<double>> mix, double *unknowns,
std::size_t lbfgs_mem, double robust_nu, std::size_t max_iter) {
std::size_t lbfgs_mem, double robust_nu, double sol_min,
double sol_max, std::size_t max_iter) {
LBFGSData t(n_direction, n_station, n_baseline, n_channel, baselines, data,
model, flag, weight, mix, robust_nu);
/* for full batch mode, last argument is NULL */
int retval = 0;
/* LBFGS memory size lbfgs_mem */
const int retval =
lbfgs_fit(cost_func, grad_func, unknowns, n_direction * n_station * 4 * 2,
max_iter, lbfgs_mem, (void *)&t, nullptr);
if (sol_min < sol_max) {
/* check to see if we have a valid range [sol_min,sol_max] to
* restrict solutions */
std::vector<double> lower_bound(n_direction * n_station * 4 * 2, sol_min);
std::vector<double> upper_bound(n_direction * n_station * 4 * 2, sol_max);
retval = lbfgsb_fit(cost_func, grad_func, unknowns, lower_bound.data(),
upper_bound.data(), n_direction * n_station * 4 * 2,
max_iter, lbfgs_mem, (void *)&t, nullptr);
} else {
retval = lbfgs_fit(cost_func, grad_func, unknowns,
n_direction * n_station * 4 * 2, max_iter, lbfgs_mem,
(void *)&t, nullptr);
}
return retval == 0;
}
#endif /* HAVE_LIBDIRAC */
......
......@@ -37,14 +37,14 @@ static std::string MakeSaveFilename(std::string path,
const std::string& ms_name,
std::string suffix) {
// Use the step name (without dot) as a name suffix.
string::size_type pos = suffix.find('.');
if (pos != string::npos) {
std::string::size_type pos = suffix.find('.');
if (pos != std::string::npos) {
suffix.resize(pos);
}
// If no path is given, use the path of the name (use . if no path).
pos = ms_name.rfind('/');
if (path.empty()) {
if (pos == string::npos) {
if (pos == std::string::npos) {
path = '.';
} else {
path = ms_name.substr(0, pos);
......@@ -52,14 +52,14 @@ static std::string MakeSaveFilename(std::string path,
}
std::string name = ms_name.substr(pos + 1);
pos = name.find('.');
if (pos != string::npos) {
if (pos != std::string::npos) {
name = name.substr(0, pos);
}
return path + '/' + name + '_' + suffix + ".flag";
}
FlagCounter::FlagCounter(const common::ParameterSet& parset,
const string& prefix)
const std::string& prefix)
: warning_percentage_(parset.getDouble(prefix + "warnperc", 0)),
show_fully_flagged_(parset.getBool(prefix + "showfullyflagged", false)),
save_(parset.getBool(prefix + "save", false)),
......
......@@ -6,8 +6,8 @@
/// @brief Class to keep counts of nr of flagged points
/// @author Ger van Diepen
#ifndef DPPP_FLAGCOUNTER_H
#define DPPP_FLAGCOUNTER_H
#ifndef DP3_FLAGCOUNTER_H_
#define DP3_FLAGCOUNTER_H_
#include <casacore/casa/Arrays/Vector.h>
......@@ -24,9 +24,9 @@ class DPInfo;
/// @brief Class to keep counts of nr of flagged points
/// This class contains counts the number of flags.
/// This class contains counts of the number of flags.
/// The flags can be counted per baseline, channel, and correlation.
/// Once the counting is completed, they can be printed using the 'show'
/// Once counting completes, the counts can be printed using the 'show'
/// functions. When printing, the baselines counts are shown per antenna.
/// Optionally the flagging percentages can be saved in a table.
/// The name of the table is the MS name suffixed by the step name and
......