From ca6dea64be6a9648b3e9832c1d92e5fe9f4f5d13 Mon Sep 17 00:00:00 2001
From: Jan David Mol <mol@astron.nl>
Date: Thu, 11 Jun 2020 11:28:52 +0000
Subject: [PATCH] Removed imaging pipelines that were based on the deprecated
 AWImager.

---
 CEP/Pipeline/recipes/sip/bin/CMakeLists.txt   |   3 -
 .../recipes/sip/bin/imaging_pipeline.py       | 609 ------------
 .../recipes/sip/bin/msss_imager_pipeline.py   | 619 -------------
 .../sip/bin/selfcal_imager_pipeline.py        | 871 ------------------
 .../recipes/sip/master/imager_awimager.py     | 203 ----
 CEP/Pipeline/recipes/sip/master/imager_bbs.py | 160 ----
 .../recipes/sip/master/imager_create_dbs.py   | 297 ------
 .../recipes/sip/master/imager_finalize.py     | 167 ----
 .../recipes/sip/master/imager_prepare.py      | 352 -------
 .../sip/master/imager_source_finding.py       | 164 ----
 .../recipes/sip/master/selfcal_awimager.py    | 206 -----
 .../recipes/sip/master/selfcal_bbs.py         | 170 ----
 .../recipes/sip/master/selfcal_finalize.py    | 207 -----
 .../recipes/sip/nodes/imager_awimager.py      | 569 ------------
 CEP/Pipeline/recipes/sip/nodes/imager_bbs.py  |  77 --
 .../recipes/sip/nodes/imager_create_dbs.py    | 536 -----------
 .../recipes/sip/nodes/imager_finalize.py      | 220 -----
 .../recipes/sip/nodes/imager_prepare.py       | 372 --------
 .../sip/nodes/imager_source_finding.py        | 275 ------
 .../recipes/sip/nodes/selfcal_awimager.py     | 820 -----------------
 CEP/Pipeline/recipes/sip/nodes/selfcal_bbs.py | 118 ---
 .../recipes/sip/nodes/selfcal_finalize.py     | 197 ----
 CEP/Pipeline/recipes/sip/tasks.cfg.CEP4.in    |  11 -
 CEP/Pipeline/recipes/sip/tasks.cfg.in         |  55 --
 24 files changed, 7278 deletions(-)
 delete mode 100755 CEP/Pipeline/recipes/sip/bin/imaging_pipeline.py
 delete mode 100755 CEP/Pipeline/recipes/sip/bin/msss_imager_pipeline.py
 delete mode 100644 CEP/Pipeline/recipes/sip/bin/selfcal_imager_pipeline.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_awimager.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_bbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_create_dbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_finalize.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_prepare.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/imager_source_finding.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/selfcal_awimager.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/selfcal_bbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/master/selfcal_finalize.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_awimager.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_bbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_create_dbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_finalize.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_prepare.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/imager_source_finding.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/selfcal_awimager.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/selfcal_bbs.py
 delete mode 100644 CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py

diff --git a/CEP/Pipeline/recipes/sip/bin/CMakeLists.txt b/CEP/Pipeline/recipes/sip/bin/CMakeLists.txt
index 53d89911854..87feab01b85 100644
--- a/CEP/Pipeline/recipes/sip/bin/CMakeLists.txt
+++ b/CEP/Pipeline/recipes/sip/bin/CMakeLists.txt
@@ -3,13 +3,10 @@
 lofar_add_bin_scripts(
   calibration_pipeline.py
   msss_calibrator_pipeline.py
-  msss_imager_pipeline.py
   msss_target_pipeline.py
   preprocessing_pipeline.py
-  imaging_pipeline.py
   pulsar_pipeline.py
   long_baseline_pipeline.py
-  selfcal_imager_pipeline.py
   runPipeline.sh
   startPython.sh
   startPythonVersion.sh
diff --git a/CEP/Pipeline/recipes/sip/bin/imaging_pipeline.py b/CEP/Pipeline/recipes/sip/bin/imaging_pipeline.py
deleted file mode 100755
index 55b9759342a..00000000000
--- a/CEP/Pipeline/recipes/sip/bin/imaging_pipeline.py
+++ /dev/null
@@ -1,609 +0,0 @@
-#!/usr/bin/env python3
-#                                                LOFAR STANDARD IMAGING PIPELINE
-#
-#                                                        Imager Pipeline recipe
-#                                                            Marcel Loose, 2012
-#                                                               loose@astron.nl
-#                                                            Wouter Klijn, 2012
-#                                                               klijn@astron.nl
-# -----------------------------------------------------------------------------
-import os
-import sys
-import copy
-
-from lofarpipe.support.control import control
-from lofarpipe.support.utilities import create_directory
-from lofarpipe.support.lofarexceptions import PipelineException
-from lofarpipe.support.data_map import DataMap, validate_data_maps, MultiDataMap
-from lofarpipe.support.utilities import patch_parset, get_parset
-from lofarpipe.support.loggingdecorators import xml_node, mail_log_on_exception
-from lofarpipe.support.feedback_version import VERSION as feedback_version
-
-from lofar.parameterset import parameterset
-
-class imaging_pipeline(control):
-    """
-    The imaging pipeline is used to generate images and find
-    sources in the generated images. Generated images and lists of found
-    sources are complemented with meta data and thus ready for consumption by
-    the Long Term Storage (LTA)
-    
-    This pipeline difference from the MSSS imaging pipeline on two aspects:
-    1. It does not by default perform any automated parameter determination for,
-    the awimager.
-    2. It does not output images and sourcelist to the image server.
-
-    *subband groups*
-    The imager_pipeline is able to generate images on the frequency range of
-    LOFAR in parallel. Combining the frequency subbands together in so called
-    subbandgroups. Each subband group will result in an image and sourcelist,
-    (typically 8, because ten subband groups are combined).
-
-    *Time Slices*
-    Images are compiled from a number of so-called (time) slices. Each
-    slice comprises a short (approx. 10 min) observation of a field (an area on
-    the sky) containing typically 80 subbands. The number of slices will be
-    different for LBA observations (typically 9) and HBA observations
-    (typically 2), due to differences in sensitivity.
-
-    Each image will be compiled on a different cluster node to balance the
-    processing load. The input- and output- files and locations are determined
-    by the scheduler and specified in the parset-file.
-
-    **This pipeline performs the following operations:**
-
-    1. Prepare Phase. Copy the preprocessed MS's from the different compute
-       nodes to the nodes where the images will be compiled (the prepare phase)
-       Combine the subbands in subband groups, concattenate the timeslice in a
-       single large measurement set and perform flagging, RFI and bad station
-       exclusion.
-    2. Create db. Generate a local sky model (LSM) from the global sky model
-       (GSM) for the sources that are in the field-of-view (FoV). The LSM
-       is stored as sourcedb.
-       In step 3 calibration of the measurement sets is performed on these
-       sources and in step 4 to create a mask for the awimager. The calibration
-       solution will be placed in an instrument table/db also created in this
-       step.
-    3. BBS. Calibrate the measurement set with the sourcedb from the gsm.
-       In later iterations sourced found in the created images will be added
-       to this list. Resulting in a selfcalibration cycle.
-    4. Awimager. The combined  measurement sets are now imaged. The imaging
-       is performed using a mask: The sources in the sourcedb are used to
-       create an casa image masking known sources. Together with the
-       measurement set an image is created.
-    5. Sourcefinding. The images created in step 4 are fed to pyBDSM to find
-       and describe sources. In multiple itterations substracting the found
-       sources, all sources are collectedin a sourcelist.
-       Step I. The sources found in step 5 are fed back into step 2.
-       This allows the Measurement sets to be calibrated with sources currently
-       found in the image. This loop will continue until convergence (3 times
-       for the time being).
-    6. Finalize. Meta data with regards to the input, computations performed
-       and results are collected an added to the casa image. The images created
-       are converted from casa to HDF5 and copied to the correct output
-       location.
-    7. Export meta data: meta data is generated ready for
-       consumption by the LTA and/or the LOFAR framework.
-
-
-    **Per subband-group, the following output products will be delivered:**
-
-    a. An image
-    b. A source list
-    c. (Calibration solutions and corrected visibilities)
-
-    """
-    def __init__(self):
-        """
-        Initialize member variables and call superclass init function
-        """
-        control.__init__(self)
-        self.input_data = DataMap()
-        self.target_data = DataMap()
-        self.output_data = DataMap()
-        self.scratch_directory = None
-        self.parset_dir = None
-        self.mapfile_dir = None
-
-    @mail_log_on_exception
-    def pipeline_logic(self):
-        """
-        Define the individual tasks that comprise the current pipeline.
-        This method will be invoked by the base-class's `go()` method.
-        """
-        self.logger.info("Starting imager pipeline")
-
-        # Define scratch directory to be used by the compute nodes.
-        self.scratch_directory = os.path.join(
-            self.inputs['working_directory'], self.inputs['job_name'])
-        # Get input/output-data products specifications.
-        self._get_io_product_specs()
-
-        # remove prepending parset identifiers, leave only pipelinecontrol
-        full_parset = self.parset
-        self.parset = self.parset.makeSubset(
-            self.parset.fullModuleName('PythonControl') + '.')  # remove this
-
-        # Create directories to store communication and data files
-
-        job_dir = self.config.get("layout", "job_directory")
-
-        self.parset_dir = os.path.join(job_dir, "parsets")
-        create_directory(self.parset_dir)
-        self.mapfile_dir = os.path.join(job_dir, "mapfiles")
-        create_directory(self.mapfile_dir)
-
-        # *********************************************************************
-        # (INPUT) Get the input from external sources and create pipeline types
-        # Input measure ment sets
-        input_mapfile = os.path.join(self.mapfile_dir, "uvdata.mapfile")
-        self.input_data.save(input_mapfile)
-        # storedata_map(input_mapfile, self.input_data)
-        self.logger.debug(
-            "Wrote input UV-data mapfile: {0}".format(input_mapfile))
-
-        # Provides location for the scratch directory and concat.ms location
-        target_mapfile = os.path.join(self.mapfile_dir, "target.mapfile")
-        self.target_data.save(target_mapfile)
-        self.logger.debug(
-            "Wrote target mapfile: {0}".format(target_mapfile))
-
-        # images datafiles
-        output_image_mapfile = os.path.join(self.mapfile_dir, "images.mapfile")
-        self.output_data.save(output_image_mapfile)
-        self.logger.debug(
-            "Wrote output sky-image mapfile: {0}".format(output_image_mapfile))
-
-        # ******************************************************************
-        # (1) prepare phase: copy and collect the ms
-        concat_ms_map_path, timeslice_map_path, ms_per_image_map_path, \
-            processed_ms_dir = self._prepare_phase(input_mapfile,
-                                    target_mapfile)
-
-        number_of_major_cycles = self.parset.getInt(
-                                    "Imaging.number_of_major_cycles")
-
-        # We start with an empty source_list map. It should contain n_output
-        # entries all set to empty strings
-        source_list_map_path = os.path.join(self.mapfile_dir,
-                                        "initial_sourcelist.mapfile")
-        source_list_map = DataMap.load(target_mapfile) # copy the output map
-        for item in source_list_map:
-            item.file = ""             # set all to empty string
-        source_list_map.save(source_list_map_path)
-
-        for idx_loop in range(number_of_major_cycles):
-            # *****************************************************************
-            # (2) Create dbs and sky model
-            parmdbs_path, sourcedb_map_path = self._create_dbs(
-                        concat_ms_map_path, timeslice_map_path,
-                        source_list_map_path = source_list_map_path,
-                        skip_create_dbs = False)
-
-            # *****************************************************************
-            # (3)  bbs_imager recipe.
-            bbs_output = self._bbs(timeslice_map_path, parmdbs_path,
-                        sourcedb_map_path, skip = False)
-
-            # TODO: Extra recipe: concat timeslices using pyrap.concatms
-            # (see prepare)
-
-            # *****************************************************************
-            # (4) Get parameters awimager from the prepare_parset and inputs
-            aw_image_mapfile, maxbaseline = self._aw_imager(concat_ms_map_path,
-                        idx_loop, sourcedb_map_path,
-                        skip = False)
-
-            # *****************************************************************
-            # (5) Source finding
-            sourcelist_map, found_sourcedb_path = self._source_finding(
-                    aw_image_mapfile, idx_loop, skip = False)
-            # should the output be a sourcedb? instead of a sourcelist
-
-        # TODO: minbaseline should be a parset value as is maxbaseline..
-        minbaseline = 0
-
-        # *********************************************************************
-        # (6) Finalize:
-        placed_data_image_map = self._finalize(aw_image_mapfile,
-            processed_ms_dir, ms_per_image_map_path, sourcelist_map,
-            minbaseline, maxbaseline, target_mapfile, output_image_mapfile,
-            found_sourcedb_path)
-
-        # *********************************************************************
-        # (7) Get metadata
-        # Create a parset containing the metadata for MAC/SAS
-        metadata_file = "%s_feedback_SkyImage" % (self.parset_file,)
-        self.run_task("get_metadata", placed_data_image_map,
-            parset_prefix = (
-                full_parset.getString('prefix') +
-                full_parset.fullModuleName('DataProducts')
-            ),
-            product_type = "SkyImage",
-            metadata_file = metadata_file)
-
-        self.send_feedback_processing(parameterset({'feedback_version': feedback_version}))
-        self.send_feedback_dataproducts(parameterset(metadata_file))
-
-        return 0
-
-    def _get_io_product_specs(self):
-        """
-        Get input- and output-data product specifications from the
-        parset-file, and do some sanity checks.
-        """
-        dps = self.parset.makeSubset(
-            self.parset.fullModuleName('DataProducts') + '.'
-        )
-        # convert input dataproducts from parset value to DataMap
-        self.input_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Input_Correlated.locations'),
-                    dps.getStringVector('Input_Correlated.filenames'),
-                    dps.getBoolVector('Input_Correlated.skip'))
-        ])
-        self.logger.debug("%d Input_Correlated data products specified" %
-                          len(self.input_data))
-
-        self.output_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Output_SkyImage.locations'),
-                    dps.getStringVector('Output_SkyImage.filenames'),
-                    dps.getBoolVector('Output_SkyImage.skip'))
-        ])
-        self.logger.debug("%d Output_SkyImage data products specified" %
-                          len(self.output_data))
-
-        # # Sanity checks on input- and output data product specifications
-        # if not validate_data_maps(self.input_data, self.output_data):
-        #    raise PipelineException(
-        #        "Validation of input/output data product specification failed!"
-        #    )#Turned off untill DataMap is extended..
-
-        # Target data is basically scratch data, consisting of one concatenated
-        # MS per image. It must be stored on the same host as the final image.
-        self.target_data = copy.deepcopy(self.output_data)
-
-        for idx, item in enumerate(self.target_data):
-            item.file = os.path.join(self.scratch_directory, 'ms_per_image_%d' % idx, 'concat.ms')
-
-
-    @xml_node
-    def _finalize(self, awimager_output_map, processed_ms_dir,
-                  ms_per_image_map, sourcelist_map, minbaseline,
-                  maxbaseline, target_mapfile,
-                  output_image_mapfile, sourcedb_map, skip = False):
-        """
-        Perform the final step of the imager:
-        Convert the output image to hdf5 and copy to output location
-        Collect meta data and add to the image
-        """
-
-        placed_image_mapfile = self._write_datamap_to_file(None,
-             "placed_image")
-        self.logger.debug("Touched mapfile for correctly placed"
-                        " hdf images: {0}".format(placed_image_mapfile))
-
-        if skip:
-            return placed_image_mapfile
-        else:
-            # run the awimager recipe
-            placed_image_mapfile = self.run_task("imager_finalize",
-                target_mapfile, awimager_output_map = awimager_output_map,
-                    ms_per_image_map = ms_per_image_map,
-                    sourcelist_map = sourcelist_map,
-                    sourcedb_map = sourcedb_map,
-                    minbaseline = minbaseline,
-                    maxbaseline = maxbaseline,
-                    target_mapfile = target_mapfile,
-                    output_image_mapfile = output_image_mapfile,
-                    processed_ms_dir = processed_ms_dir,
-                    placed_image_mapfile = placed_image_mapfile
-                    )["placed_image_mapfile"]
-
-        return placed_image_mapfile
-
-    @xml_node
-    def _source_finding(self, image_map_path, major_cycle, skip = True):
-        """
-        Perform the sourcefinding step
-        """
-        # Create the parsets for the different sourcefinder runs
-        bdsm_parset_pass_1 = self.parset.makeSubset("BDSM[0].")
-        parset_path_pass_1 = self._write_parset_to_file(bdsm_parset_pass_1,
-                "pybdsm_first_pass.par", "Sourcefinder first pass parset.")
-
-        bdsm_parset_pass_2 = self.parset.makeSubset("BDSM[1].")
-        parset_path_pass_2 = self._write_parset_to_file(bdsm_parset_pass_2,
-                "pybdsm_second_pass.par", "sourcefinder second pass parset")
-
-        # touch a mapfile to be filled with created sourcelists
-        source_list_map = self._write_datamap_to_file(None,
-             "source_finding_outputs",
-             "map to sourcefinding outputs (sourcelist)")
-        sourcedb_map_path = self._write_datamap_to_file(None,
-             "source_dbs_outputs", "Map to sourcedbs based in found sources")
-
-        # construct the location to save the output products of the
-        # sourcefinder
-        cycle_path = os.path.join(self.scratch_directory,
-                                  "awimage_cycle_{0}".format(major_cycle))
-        catalog_path = os.path.join(cycle_path, "bdsm_catalog")
-        sourcedb_path = os.path.join(cycle_path, "bdsm_sourcedb")
-
-        # Run the sourcefinder
-        if skip:
-            return source_list_map, sourcedb_map_path
-        else:
-            self.run_task("imager_source_finding",
-                        image_map_path,
-                        bdsm_parset_file_run1 = parset_path_pass_1,
-                        bdsm_parset_file_run2x = parset_path_pass_2,
-                        working_directory = self.scratch_directory,
-                        catalog_output_path = catalog_path,
-                        mapfile = source_list_map,
-                        sourcedb_target_path = sourcedb_path,
-                        sourcedb_map_path = sourcedb_map_path
-                         )
-
-            return source_list_map, sourcedb_map_path
-
-    @xml_node
-    def _bbs(self, timeslice_map_path, parmdbs_map_path, sourcedb_map_path,
-              skip = False):
-        """
-        Perform a calibration step. First with a set of sources from the
-        gsm and in later iterations also on the found sources
-        """
-        # create parset for bbs run
-        parset = self.parset.makeSubset("BBS.")
-        parset_path = self._write_parset_to_file(parset, "bbs",
-                        "Parset for calibration with a local sky model")
-
-        # create the output file path
-        output_mapfile = self._write_datamap_to_file(None, "bbs_output",
-                        "Mapfile with calibrated measurement sets.")
-
-        converted_sourcedb_map_path = self._write_datamap_to_file(None,
-                "source_db", "correctly shaped mapfile for input sourcedbs")
-
-        if skip:
-            return output_mapfile
-
-        # The create db step produces a mapfile with a single sourcelist for
-        # the different timeslices. Generate a mapfile with copies of the
-        # sourcelist location: This allows validation of maps in combination
-        # get the original map data
-        sourcedb_map = DataMap.load(sourcedb_map_path)
-        parmdbs_map = MultiDataMap.load(parmdbs_map_path)
-        converted_sourcedb_map = []
-
-        # sanity check for correcy output from previous recipes
-        if not validate_data_maps(sourcedb_map, parmdbs_map):
-            self.logger.error("The input files for bbs do not contain "
-                                "matching host names for each entry content:")
-            self.logger.error(repr(sourcedb_map))
-            self.logger.error(repr(parmdbs_map))
-            raise PipelineException("Invalid input data for imager_bbs recipe")
-
-        self.run_task("imager_bbs",
-                      timeslice_map_path,
-                      parset = parset_path,
-                      instrument_mapfile = parmdbs_map_path,
-                      sourcedb_mapfile = sourcedb_map_path,
-                      mapfile = output_mapfile,
-                      working_directory = self.scratch_directory)
-
-        return output_mapfile
-
-    @xml_node
-    def _aw_imager(self, prepare_phase_output, major_cycle, sky_path,
-                   skip = False):
-        """
-        Create an image based on the calibrated, filtered and combined data.
-        """
-        # Create parset for the awimage recipe
-        parset = self.parset.makeSubset("AWimager.")
-        # Get maxbaseline from 'full' parset
-        max_baseline = self.parset.getInt("Imaging.maxbaseline")
-        patch_dictionary = {"maxbaseline": str(
-                                    max_baseline)}
-        try:
-            temp_parset_filename = patch_parset(parset, patch_dictionary)
-            aw_image_parset = get_parset(temp_parset_filename)
-            aw_image_parset_path = self._write_parset_to_file(aw_image_parset,
-                "awimager_cycle_{0}".format(major_cycle),
-                "Awimager recipe parset")
-        finally:
-            # remove tempfile
-            os.remove(temp_parset_filename)
-
-        # Create path to write the awimage files
-        intermediate_image_path = os.path.join(self.scratch_directory,
-            "awimage_cycle_{0}".format(major_cycle), "image")
-
-        output_mapfile = self._write_datamap_to_file(None, "awimager",
-                                    "output map for awimager recipe")
-
-        mask_patch_size = self.parset.getInt("Imaging.mask_patch_size")
-        auto_imaging_specs = self.parset.getBool("Imaging.auto_imaging_specs")
-        fov = self.parset.getFloat("Imaging.fov")
-        specify_fov = self.parset.getBool("Imaging.specify_fov")
-        if skip:
-            pass
-        else:
-            # run the awimager recipe
-            self.run_task("imager_awimager", prepare_phase_output,
-                          parset = aw_image_parset_path,
-                          mapfile = output_mapfile,
-                          output_image = intermediate_image_path,
-                          mask_patch_size = mask_patch_size,
-                          sourcedb_path = sky_path,
-                          working_directory = self.scratch_directory,
-                          autogenerate_parameters = auto_imaging_specs,
-                          specify_fov = specify_fov,
-                          fov = fov)
-
-        return output_mapfile, max_baseline
-
-    @xml_node
-    def _prepare_phase(self, input_ms_map_path, target_mapfile):
-        """
-        Copy ms to correct location, combine the ms in slices and combine
-        the time slices into a large virtual measurement set
-        """
-        # Create the dir where found and processed ms are placed
-        # ms_per_image_map_path contains all the original ms locations:
-        # this list contains possible missing files
-        processed_ms_dir = os.path.join(self.scratch_directory, "subbands")
-
-        # get the parameters, create a subset for ndppp, save
-        ndppp_parset = self.parset.makeSubset("DPPP.")
-        ndppp_parset_path = self._write_parset_to_file(ndppp_parset,
-                    "prepare_imager_ndppp", "parset for ndpp recipe")
-
-        # create the output file paths
-        # [1] output -> prepare_output
-        output_mapfile = self._write_datamap_to_file(None, "prepare_output")
-        time_slices_mapfile = self._write_datamap_to_file(None,
-                                                    "prepare_time_slices")
-        ms_per_image_mapfile = self._write_datamap_to_file(None,
-                                                         "ms_per_image")
-
-        # get some parameters from the imaging pipeline parset:
-        slices_per_image = self.parset.getInt("Imaging.slices_per_image")
-        subbands_per_image = self.parset.getInt("Imaging.subbands_per_image")
-
-        outputs = self.run_task("imager_prepare", input_ms_map_path,
-                parset = ndppp_parset_path,
-                target_mapfile = target_mapfile,
-                slices_per_image = slices_per_image,
-                subbands_per_image = subbands_per_image,
-                mapfile = output_mapfile,
-                slices_mapfile = time_slices_mapfile,
-                ms_per_image_mapfile = ms_per_image_mapfile,
-                working_directory = self.scratch_directory,
-                processed_ms_dir = processed_ms_dir)
-
-        # validate that the prepare phase produced the correct data
-        output_keys = list(outputs.keys())
-        if not ('mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format('mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-        if not ('slices_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                        'slices_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-        if not ('ms_per_image_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                'ms_per_image_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-
-        # Return the mapfiles paths with processed data
-        return output_mapfile, outputs["slices_mapfile"], ms_per_image_mapfile, \
-            processed_ms_dir
-
-    @xml_node
-    def _create_dbs(self, input_map_path, timeslice_map_path, source_list_map_path,
-                    skip_create_dbs = False):
-        """
-        Create for each of the concatenated input measurement sets
-        an instrument model and parmdb
-        """
-        # Create the parameters set
-        parset = self.parset.makeSubset("GSM.")
-
-        # create the files that will contain the output of the recipe
-        parmdbs_map_path = self._write_datamap_to_file(None, "parmdbs",
-                    "parmdbs output mapfile")
-        sourcedb_map_path = self._write_datamap_to_file(None, "sky_files",
-                    "source db output mapfile")
-
-        # run the master script
-        if skip_create_dbs:
-            pass
-        else:
-            self.run_task("imager_create_dbs", input_map_path,
-                        monetdb_hostname = parset.getString("monetdb_hostname"),
-                        monetdb_port = parset.getInt("monetdb_port"),
-                        monetdb_name = parset.getString("monetdb_name"),
-                        monetdb_user = parset.getString("monetdb_user"),
-                        monetdb_password = parset.getString("monetdb_password"),
-                        assoc_theta = parset.getString("assoc_theta"),
-                        sourcedb_suffix = ".sourcedb",
-                        slice_paths_mapfile = timeslice_map_path,
-                        parmdb_suffix = ".parmdb",
-                        parmdbs_map_path = parmdbs_map_path,
-                        sourcedb_map_path = sourcedb_map_path,
-                        source_list_map_path = source_list_map_path,
-                        working_directory = self.scratch_directory)
-
-        return parmdbs_map_path, sourcedb_map_path
-
-    # TODO: Move these helpers to the parent class
-    def _write_parset_to_file(self, parset, parset_name, message):
-        """
-        Write the suplied the suplied parameterset to the parameter set
-        directory in the jobs dir with the filename suplied in parset_name.
-        Return the full path to the created file.
-        """
-        parset_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "parsets")
-        # create the parset dir if it does not exist
-        create_directory(parset_dir)
-
-        # write the content to a new parset file
-        parset_path = os.path.join(parset_dir,
-                         "{0}.parset".format(parset_name))
-        parset.writeFile(parset_path)
-
-        # display a debug log entrie with path and message
-        self.logger.debug("Wrote parset to path <{0}> : {1}".format(
-                               parset_path, message))
-
-        return parset_path
-
-    def _write_datamap_to_file(self, datamap, mapfile_name, message = ""):
-        """
-        Write the suplied the suplied map to the mapfile.
-        directory in the jobs dir with the filename suplied in mapfile_name.
-        Return the full path to the created file.
-        Id supllied data is None then the file is touched if not existing, but
-        existing files are kept as is
-        """
-
-        mapfile_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "mapfiles")
-        # create the mapfile_dir if it does not exist
-        create_directory(mapfile_dir)
-
-        # write the content to a new parset file
-        mapfile_path = os.path.join(mapfile_dir,
-                         "{0}.map".format(mapfile_name))
-
-        # display a debug log entrie with path and message
-        if datamap != None:
-            datamap.save(mapfile_path)
-
-            self.logger.debug(
-            "Wrote mapfile <{0}>: {1}".format(mapfile_path, message))
-        else:
-            if not os.path.exists(mapfile_path):
-                DataMap().save(mapfile_path)
-
-                self.logger.debug(
-                    "Touched mapfile <{0}>: {1}".format(mapfile_path, message))
-
-        return mapfile_path
-
-
-if __name__ == '__main__':
-    sys.exit(imaging_pipeline().main())
diff --git a/CEP/Pipeline/recipes/sip/bin/msss_imager_pipeline.py b/CEP/Pipeline/recipes/sip/bin/msss_imager_pipeline.py
deleted file mode 100755
index 40b3c96c82a..00000000000
--- a/CEP/Pipeline/recipes/sip/bin/msss_imager_pipeline.py
+++ /dev/null
@@ -1,619 +0,0 @@
-#!/usr/bin/env python3
-#                                                        LOFAR IMAGING PIPELINE
-#
-#                                                        Imager Pipeline recipe
-#                                                            Marcel Loose, 2012
-#                                                               loose@astron.nl
-#                                                            Wouter Klijn, 2012
-#                                                               klijn@astron.nl
-# -----------------------------------------------------------------------------
-import os
-import sys
-import copy
-
-from lofarpipe.support.control import control
-from lofarpipe.support.utilities import create_directory
-from lofarpipe.support.lofarexceptions import PipelineException
-from lofarpipe.support.data_map import DataMap, validate_data_maps, MultiDataMap
-from lofarpipe.support.utilities import patch_parset, get_parset
-from lofarpipe.support.loggingdecorators import xml_node, mail_log_on_exception
-from lofarpipe.support.feedback_version import VERSION as feedback_version
-
-from lofar.parameterset import parameterset
-
-
-class msss_imager_pipeline(control):
-    """
-    The Automatic MSSS imager pipeline is used to generate MSSS images and find
-    sources in the generated images. Generated images and lists of found
-    sources are complemented with meta data and thus ready for consumption by
-    the Long Term Storage (LTA)
-
-    *subband groups*
-    The imager_pipeline is able to generate images on the frequency range of
-    LOFAR in parallel. Combining the frequency subbands together in so called
-    subbandgroups. Each subband group will result in an image and sourcelist,
-    (typically 8, because ten subband groups are combined).
-
-    *Time Slices*
-    MSSS images are compiled from a number of so-called (time) slices. Each
-    slice comprises a short (approx. 10 min) observation of a field (an area on
-    the sky) containing typically 80 subbands. The number of slices will be
-    different for LBA observations (typically 9) and HBA observations
-    (typically 2), due to differences in sensitivity.
-
-    Each image will be compiled on a different cluster node to balance the
-    processing load. The input- and output- files and locations are determined
-    by the scheduler and specified in the parset-file.
-
-    **This pipeline performs the following operations:**
-
-    1. Prepare Phase. Copy the preprocessed MS's from the different compute
-       nodes to the nodes where the images will be compiled (the prepare phase)
-       Combine the subbands in subband groups, concattenate the timeslice in a
-       single large measurement set and perform flagging, RFI and bad station
-       exclusion.
-    2. Create db. Generate a local sky model (LSM) from the global sky model
-       (GSM) for the sources that are in the field-of-view (FoV). The LSM
-       is stored as sourcedb.
-       In step 3 calibration of the measurement sets is performed on these
-       sources and in step 4 to create a mask for the awimager. The calibration
-       solution will be placed in an instrument table/db also created in this
-       step.
-    3. BBS. Calibrate the measurement set with the sourcedb from the gsm.
-       In later iterations sourced found in the created images will be added
-       to this list. Resulting in a selfcalibration cycle.
-    4. Awimager. The combined  measurement sets are now imaged. The imaging
-       is performed using a mask: The sources in the sourcedb are used to
-       create an casa image masking known sources. Together with the
-       measurement set an image is created.
-    5. Sourcefinding. The images created in step 4 are fed to pyBDSM to find
-       and describe sources. In multiple itterations substracting the found
-       sources, all sources are collectedin a sourcelist.
-       Step I. The sources found in step 5 are fed back into step 2.
-       This allows the Measurement sets to be calibrated with sources currently
-       found in the image. This loop will continue until convergence (3 times
-       for the time being).
-    6. Finalize. Meta data with regards to the input, computations performed
-       and results are collected an added to the casa image. The images created
-       are converted from casa to HDF5 and copied to the correct output
-       location.
-    7. Export meta data: meta data is generated ready for
-       consumption by the LTA and/or the LOFAR framework.
-
-
-    **Per subband-group, the following output products will be delivered:**
-
-    a. An image
-    b. A source list
-    c. (Calibration solutions and corrected visibilities)
-
-    """
-    def __init__(self):
-        """
-        Initialize member variables and call superclass init function
-        """
-        control.__init__(self)
-        self.input_data = DataMap()
-        self.target_data = DataMap()
-        self.output_data = DataMap()
-        self.scratch_directory = None
-        self.parset_dir = None
-        self.mapfile_dir = None
-
-
-    @mail_log_on_exception
-    def pipeline_logic(self):
-        """
-        Define the individual tasks that comprise the current pipeline.
-        This method will be invoked by the base-class's `go()` method.
-        """
-        self.logger.info("Starting imager pipeline")
-
-        # Define scratch directory to be used by the compute nodes.
-        self.scratch_directory = os.path.join(
-            self.inputs['working_directory'], self.inputs['job_name'])
-        # Get input/output-data products specifications.
-        self._get_io_product_specs()
-
-        # remove prepending parset identifiers, leave only pipelinecontrol
-        full_parset = self.parset
-        self.parset = self.parset.makeSubset(
-            self.parset.fullModuleName('PythonControl') + '.')  # remove this
-
-        # Create directories to store communication and data files
-
-        job_dir = self.config.get("layout", "job_directory")
-
-        self.parset_dir = os.path.join(job_dir, "parsets")
-        create_directory(self.parset_dir)
-        self.mapfile_dir = os.path.join(job_dir, "mapfiles")
-        create_directory(self.mapfile_dir)
-
-        # *********************************************************************
-        # (INPUT) Get the input from external sources and create pipeline types
-        # Input measure ment sets
-        input_mapfile = os.path.join(self.mapfile_dir, "uvdata.mapfile")
-        self.input_data.save(input_mapfile)
-        # storedata_map(input_mapfile, self.input_data)
-        self.logger.debug(
-            "Wrote input UV-data mapfile: {0}".format(input_mapfile))
-
-        # Provides location for the scratch directory and concat.ms location
-        target_mapfile = os.path.join(self.mapfile_dir, "target.mapfile")
-        self.target_data.save(target_mapfile)
-        self.logger.debug(
-            "Wrote target mapfile: {0}".format(target_mapfile))
-
-        # images datafiles
-        output_image_mapfile = os.path.join(self.mapfile_dir, "images.mapfile")
-        self.output_data.save(output_image_mapfile)
-        self.logger.debug(
-            "Wrote output sky-image mapfile: {0}".format(output_image_mapfile))
-
-        # TODO: This is a backdoor option to manually add beamtables when these
-        # are missing on the provided ms. There is NO use case for users of the
-        # pipeline
-        add_beam_tables = self.parset.getBool(
-                                    "Imaging.addBeamTables", False)
-
-        # ******************************************************************
-        # (1) prepare phase: copy and collect the ms
-        concat_ms_map_path, timeslice_map_path, ms_per_image_map_path, \
-            processed_ms_dir = self._prepare_phase(input_mapfile,
-                                    target_mapfile, add_beam_tables)
-
-        number_of_major_cycles = self.parset.getInt(
-                                    "Imaging.number_of_major_cycles")
-
-        # We start with an empty source_list map. It should contain n_output
-        # entries all set to empty strings
-        source_list_map_path = os.path.join(self.mapfile_dir,
-                                        "initial_sourcelist.mapfile")
-        source_list_map = DataMap.load(target_mapfile) # copy the output map
-        for item in source_list_map:
-            item.file = ""             # set all to empty string
-        source_list_map.save(source_list_map_path)
-
-        for idx_loop in range(number_of_major_cycles):
-            # *****************************************************************
-            # (2) Create dbs and sky model
-            parmdbs_path, sourcedb_map_path = self._create_dbs(
-                        concat_ms_map_path, timeslice_map_path,
-                        source_list_map_path = source_list_map_path,
-                        skip_create_dbs = False)
-
-            # *****************************************************************
-            # (3)  bbs_imager recipe.
-            bbs_output = self._bbs(timeslice_map_path, parmdbs_path,
-                        sourcedb_map_path, skip = False)
-
-            # TODO: Extra recipe: concat timeslices using pyrap.concatms
-            # (see prepare)
-
-            # *****************************************************************
-            # (4) Get parameters awimager from the prepare_parset and inputs
-            aw_image_mapfile, maxbaseline = self._aw_imager(concat_ms_map_path,
-                        idx_loop, sourcedb_map_path,
-                        skip = False)
-
-            # *****************************************************************
-            # (5) Source finding
-            sourcelist_map, found_sourcedb_path = self._source_finding(
-                    aw_image_mapfile, idx_loop, skip = False)
-            # should the output be a sourcedb? instead of a sourcelist
-
-        # TODO: minbaseline should be a parset value as is maxbaseline..
-        minbaseline = 0
-
-        # *********************************************************************
-        # (6) Finalize:
-        placed_data_image_map = self._finalize(aw_image_mapfile,
-            processed_ms_dir, ms_per_image_map_path, sourcelist_map,
-            minbaseline, maxbaseline, target_mapfile, output_image_mapfile,
-            found_sourcedb_path)
-
-        # *********************************************************************
-        # (7) Get metadata
-        # create a parset with information that is available on the toplevel
-        toplevel_meta_data = parameterset({'feedback_version': feedback_version})
-        toplevel_meta_data.replace("numberOfMajorCycles", 
-                                           str(number_of_major_cycles))
-
-        # Create a parset containing the metadata for MAC/SAS at nodes
-        metadata_file = "%s_feedback_SkyImage" % (self.parset_file,)
-        self.run_task("get_metadata", placed_data_image_map,
-            parset_prefix = (
-                full_parset.getString('prefix') +
-                full_parset.fullModuleName('DataProducts')
-            ),
-            product_type = "SkyImage",
-            metadata_file = metadata_file)
-
-        self.send_feedback_processing(toplevel_meta_data)
-        self.send_feedback_dataproducts(parameterset(metadata_file))
-
-        return 0
-
-    def _get_io_product_specs(self):
-        """
-        Get input- and output-data product specifications from the
-        parset-file, and do some sanity checks.
-        """
-        dps = self.parset.makeSubset(
-            self.parset.fullModuleName('DataProducts') + '.'
-        )
-        # convert input dataproducts from parset value to DataMap
-        self.input_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Input_Correlated.locations'),
-                    dps.getStringVector('Input_Correlated.filenames'),
-                    dps.getBoolVector('Input_Correlated.skip'))
-        ])
-        self.logger.debug("%d Input_Correlated data products specified" %
-                          len(self.input_data))
-
-        self.output_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Output_SkyImage.locations'),
-                    dps.getStringVector('Output_SkyImage.filenames'),
-                    dps.getBoolVector('Output_SkyImage.skip'))
-        ])
-        self.logger.debug("%d Output_SkyImage data products specified" %
-                          len(self.output_data))
-
-        # # Sanity checks on input- and output data product specifications
-        # if not validate_data_maps(self.input_data, self.output_data):
-        #    raise PipelineException(
-        #        "Validation of input/output data product specification failed!"
-        #    )#Turned off untill DataMap is extended..
-
-        # Target data is basically scratch data, consisting of one concatenated
-        # MS per image. It must be stored on the same host as the final image.
-        self.target_data = copy.deepcopy(self.output_data)
-
-        for idx, item in enumerate(self.target_data):
-            item.file = os.path.join(self.scratch_directory, 'ms_per_image_%d' % idx, 'concat.ms')
-
-
-    @xml_node
-    def _finalize(self, awimager_output_map, processed_ms_dir,
-                  ms_per_image_map, sourcelist_map, minbaseline,
-                  maxbaseline, target_mapfile,
-                  output_image_mapfile, sourcedb_map, skip = False):
-        """
-        Perform the final step of the imager:
-        Convert the output image to hdf5 and copy to output location
-        Collect meta data and add to the image
-        """
-
-        placed_image_mapfile = self._write_datamap_to_file(None,
-             "placed_image")
-        self.logger.debug("Touched mapfile for correctly placed"
-                        " hdf images: {0}".format(placed_image_mapfile))
-
-        if skip:
-            return placed_image_mapfile
-        else:
-            # run the awimager recipe
-            placed_image_mapfile = self.run_task("imager_finalize",
-                target_mapfile, awimager_output_map = awimager_output_map,
-                    ms_per_image_map = ms_per_image_map,
-                    sourcelist_map = sourcelist_map,
-                    sourcedb_map = sourcedb_map,
-                    minbaseline = minbaseline,
-                    maxbaseline = maxbaseline,
-                    target_mapfile = target_mapfile,
-                    output_image_mapfile = output_image_mapfile,
-                    processed_ms_dir = processed_ms_dir,
-                    placed_image_mapfile = placed_image_mapfile
-                    )["placed_image_mapfile"]
-
-        return placed_image_mapfile
-
-    @xml_node
-    def _source_finding(self, image_map_path, major_cycle, skip = True):
-        """
-        Perform the sourcefinding step
-        """
-        # Create the parsets for the different sourcefinder runs
-        bdsm_parset_pass_1 = self.parset.makeSubset("BDSM[0].")
-        parset_path_pass_1 = self._write_parset_to_file(bdsm_parset_pass_1,
-                "pybdsm_first_pass.par", "Sourcefinder first pass parset.")
-
-        bdsm_parset_pass_2 = self.parset.makeSubset("BDSM[1].")
-        parset_path_pass_2 = self._write_parset_to_file(bdsm_parset_pass_2,
-                "pybdsm_second_pass.par", "sourcefinder second pass parset")
-
-        # touch a mapfile to be filled with created sourcelists
-        source_list_map = self._write_datamap_to_file(None,
-             "source_finding_outputs",
-             "map to sourcefinding outputs (sourcelist)")
-        sourcedb_map_path = self._write_datamap_to_file(None,
-             "source_dbs_outputs", "Map to sourcedbs based in found sources")
-
-        # construct the location to save the output products of the
-        # sourcefinder
-        cycle_path = os.path.join(self.scratch_directory,
-                                  "awimage_cycle_{0}".format(major_cycle))
-        catalog_path = os.path.join(cycle_path, "bdsm_catalog")
-        sourcedb_path = os.path.join(cycle_path, "bdsm_sourcedb")
-
-        # Run the sourcefinder
-        if skip:
-            return source_list_map, sourcedb_map_path
-        else:
-            self.run_task("imager_source_finding",
-                        image_map_path,
-                        bdsm_parset_file_run1 = parset_path_pass_1,
-                        bdsm_parset_file_run2x = parset_path_pass_2,
-                        working_directory = self.scratch_directory,
-                        catalog_output_path = catalog_path,
-                        mapfile = source_list_map,
-                        sourcedb_target_path = sourcedb_path,
-                        sourcedb_map_path = sourcedb_map_path
-                         )
-
-            return source_list_map, sourcedb_map_path
-
-    @xml_node
-    def _bbs(self, timeslice_map_path, parmdbs_map_path, sourcedb_map_path,
-              skip = False):
-        """
-        Perform a calibration step. First with a set of sources from the
-        gsm and in later iterations also on the found sources
-        """
-        # create parset for bbs run
-        parset = self.parset.makeSubset("BBS.")
-        parset_path = self._write_parset_to_file(parset, "bbs",
-                        "Parset for calibration with a local sky model")
-
-        # create the output file path
-        output_mapfile = self._write_datamap_to_file(None, "bbs_output",
-                        "Mapfile with calibrated measurement sets.")
-
-        converted_sourcedb_map_path = self._write_datamap_to_file(None,
-                "source_db", "correctly shaped mapfile for input sourcedbs")
-
-        if skip:
-            return output_mapfile
-
-        # The create db step produces a mapfile with a single sourcelist for
-        # the different timeslices. Generate a mapfile with copies of the
-        # sourcelist location: This allows validation of maps in combination
-        # get the original map data
-        sourcedb_map = DataMap.load(sourcedb_map_path)
-        parmdbs_map = MultiDataMap.load(parmdbs_map_path)
-        converted_sourcedb_map = []
-
-        # sanity check for correcy output from previous recipes
-        if not validate_data_maps(sourcedb_map, parmdbs_map):
-            self.logger.error("The input files for bbs do not contain "
-                                "matching host names for each entry content:")
-            self.logger.error(repr(sourcedb_map))
-            self.logger.error(repr(parmdbs_map))
-            raise PipelineException("Invalid input data for imager_bbs recipe")
-
-        self.run_task("imager_bbs",
-                      timeslice_map_path,
-                      parset = parset_path,
-                      instrument_mapfile = parmdbs_map_path,
-                      sourcedb_mapfile = sourcedb_map_path,
-                      mapfile = output_mapfile,
-                      working_directory = self.scratch_directory)
-
-        return output_mapfile
-
-    @xml_node
-    def _aw_imager(self, prepare_phase_output, major_cycle, sky_path,
-                   skip = False):
-        """
-        Create an image based on the calibrated, filtered and combined data.
-        """
-        # Create parset for the awimage recipe
-        parset = self.parset.makeSubset("AWimager.")
-        # Get maxbaseline from 'full' parset
-        max_baseline = self.parset.getInt("Imaging.maxbaseline")
-        patch_dictionary = {"maxbaseline": str(
-                                    max_baseline)}
-        try:
-            temp_parset_filename = patch_parset(parset, patch_dictionary)
-            aw_image_parset = get_parset(temp_parset_filename)
-            aw_image_parset_path = self._write_parset_to_file(aw_image_parset,
-                "awimager_cycle_{0}".format(major_cycle),
-                "Awimager recipe parset")
-        finally:
-            # remove tempfile
-            os.remove(temp_parset_filename)
-
-        # Create path to write the awimage files
-        intermediate_image_path = os.path.join(self.scratch_directory,
-            "awimage_cycle_{0}".format(major_cycle), "image")
-
-        output_mapfile = self._write_datamap_to_file(None, "awimager",
-                                    "output map for awimager recipe")
-
-        mask_patch_size = self.parset.getInt("Imaging.mask_patch_size")
-        autogenerate_parameters = self.parset.getBool(
-                                    "Imaging.auto_imaging_specs")
-        specify_fov = self.parset.getBool(
-                                    "Imaging.specify_fov")
-        if skip:
-            pass
-        else:
-            # run the awimager recipe
-            self.run_task("imager_awimager", prepare_phase_output,
-                          parset = aw_image_parset_path,
-                          mapfile = output_mapfile,
-                          output_image = intermediate_image_path,
-                          mask_patch_size = mask_patch_size,
-                          sourcedb_path = sky_path,
-                          working_directory = self.scratch_directory,
-                          autogenerate_parameters = autogenerate_parameters,
-                          specify_fov = specify_fov)
-
-        return output_mapfile, max_baseline
-
-    @xml_node
-    def _prepare_phase(self, input_ms_map_path, target_mapfile,
-        add_beam_tables):
-        """
-        Copy ms to correct location, combine the ms in slices and combine
-        the time slices into a large virtual measurement set
-        """
-        # Create the dir where found and processed ms are placed
-        # ms_per_image_map_path contains all the original ms locations:
-        # this list contains possible missing files
-        processed_ms_dir = os.path.join(self.scratch_directory, "subbands")
-
-        # get the parameters, create a subset for ndppp, save
-        ndppp_parset = self.parset.makeSubset("DPPP.")
-        ndppp_parset_path = self._write_parset_to_file(ndppp_parset,
-                    "prepare_imager_ndppp", "parset for ndpp recipe")
-
-        # create the output file paths
-        # [1] output -> prepare_output
-        output_mapfile = self._write_datamap_to_file(None, "prepare_output")
-        time_slices_mapfile = self._write_datamap_to_file(None,
-                                                    "prepare_time_slices")
-        ms_per_image_mapfile = self._write_datamap_to_file(None,
-                                                         "ms_per_image")
-
-        # get some parameters from the imaging pipeline parset:
-        slices_per_image = self.parset.getInt("Imaging.slices_per_image")
-        subbands_per_image = self.parset.getInt("Imaging.subbands_per_image")
-
-        outputs = self.run_task("imager_prepare", input_ms_map_path,
-                parset = ndppp_parset_path,
-                target_mapfile = target_mapfile,
-                slices_per_image = slices_per_image,
-                subbands_per_image = subbands_per_image,
-                mapfile = output_mapfile,
-                slices_mapfile = time_slices_mapfile,
-                ms_per_image_mapfile = ms_per_image_mapfile,
-                working_directory = self.scratch_directory,
-                processed_ms_dir = processed_ms_dir,
-                add_beam_tables = add_beam_tables)
-
-        # validate that the prepare phase produced the correct data
-        output_keys = list(outputs.keys())
-        if not ('mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format('mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-        if not ('slices_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                        'slices_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-        if not ('ms_per_image_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                'ms_per_image_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-
-        # Return the mapfiles paths with processed data
-        return output_mapfile, outputs["slices_mapfile"], ms_per_image_mapfile, \
-            processed_ms_dir
-
-    @xml_node
-    def _create_dbs(self, input_map_path, timeslice_map_path, source_list_map_path,
-                    skip_create_dbs = False):
-        """
-        Create for each of the concatenated input measurement sets
-        an instrument model and parmdb
-        """
-        # Create the parameters set
-        parset = self.parset.makeSubset("GSM.")
-
-        # create the files that will contain the output of the recipe
-        parmdbs_map_path = self._write_datamap_to_file(None, "parmdbs",
-                    "parmdbs output mapfile")
-        sourcedb_map_path = self._write_datamap_to_file(None, "sky_files",
-                    "source db output mapfile")
-
-        # run the master script
-        if skip_create_dbs:
-            pass
-        else:
-            self.run_task("imager_create_dbs", input_map_path,
-                        monetdb_hostname = parset.getString("monetdb_hostname"),
-                        monetdb_port = parset.getInt("monetdb_port"),
-                        monetdb_name = parset.getString("monetdb_name"),
-                        monetdb_user = parset.getString("monetdb_user"),
-                        monetdb_password = parset.getString("monetdb_password"),
-                        assoc_theta = parset.getString("assoc_theta"),
-                        sourcedb_suffix = ".sourcedb",
-                        slice_paths_mapfile = timeslice_map_path,
-                        parmdb_suffix = ".parmdb",
-                        parmdbs_map_path = parmdbs_map_path,
-                        sourcedb_map_path = sourcedb_map_path,
-                        source_list_map_path = source_list_map_path,
-                        working_directory = self.scratch_directory)
-
-        return parmdbs_map_path, sourcedb_map_path
-
-    # TODO: Move these helpers to the parent class
-    def _write_parset_to_file(self, parset, parset_name, message):
-        """
-        Write the suplied the suplied parameterset to the parameter set
-        directory in the jobs dir with the filename suplied in parset_name.
-        Return the full path to the created file.
-        """
-        parset_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "parsets")
-        # create the parset dir if it does not exist
-        create_directory(parset_dir)
-
-        # write the content to a new parset file
-        parset_path = os.path.join(parset_dir,
-                         "{0}.parset".format(parset_name))
-        parset.writeFile(parset_path)
-
-        # display a debug log entrie with path and message
-        self.logger.debug("Wrote parset to path <{0}> : {1}".format(
-                               parset_path, message))
-
-        return parset_path
-
-    def _write_datamap_to_file(self, datamap, mapfile_name, message = ""):
-        """
-        Write the suplied the suplied map to the mapfile.
-        directory in the jobs dir with the filename suplied in mapfile_name.
-        Return the full path to the created file.
-        If suplied data is None then the file is touched if not existing, but
-        existing files are kept as is
-        """
-
-        mapfile_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "mapfiles")
-        # create the mapfile_dir if it does not exist
-        create_directory(mapfile_dir)
-
-        # write the content to a new parset file
-        mapfile_path = os.path.join(mapfile_dir,
-                         "{0}.map".format(mapfile_name))
-
-        # display a debug log entrie with path and message
-        if datamap != None:
-            datamap.save(mapfile_path)
-
-            self.logger.debug(
-            "Wrote mapfile <{0}>: {1}".format(mapfile_path, message))
-        else:
-            if not os.path.exists(mapfile_path):
-                DataMap().save(mapfile_path)
-
-                self.logger.debug(
-                    "Touched mapfile <{0}>: {1}".format(mapfile_path, message))
-
-        return mapfile_path
-
-
-if __name__ == '__main__':
-    sys.exit(msss_imager_pipeline().main())
diff --git a/CEP/Pipeline/recipes/sip/bin/selfcal_imager_pipeline.py b/CEP/Pipeline/recipes/sip/bin/selfcal_imager_pipeline.py
deleted file mode 100644
index 99e4c9380c2..00000000000
--- a/CEP/Pipeline/recipes/sip/bin/selfcal_imager_pipeline.py
+++ /dev/null
@@ -1,871 +0,0 @@
-#!/usr/bin/env python3
-#                                                        LOFAR IMAGING PIPELINE
-#
-#                                                      selfcal Pipeline recipe
-#                                                            Marcel Loose, 2012
-#                                                               loose@astron.nl
-#                                                            Wouter Klijn, 2012
-#                                                               klijn@astron.nl
-#                                                         Nicolas Vilchez, 2014
-#                                                             vilchez@astron.nl
-# -----------------------------------------------------------------------------
-import os
-import sys
-import copy
-import shutil
-
-from lofarpipe.support.control import control
-from lofarpipe.support.utilities import create_directory
-from lofarpipe.support.lofarexceptions import PipelineException
-from lofarpipe.support.data_map import DataMap, validate_data_maps,\
-                                       MultiDataMap, align_data_maps
-from lofarpipe.support.utilities import patch_parset, get_parset
-from lofarpipe.support.loggingdecorators import xml_node, mail_log_on_exception
-from lofarpipe.support.feedback_version import VERSION as feedback_version
-
-from lofar.parameterset import parameterset
-
-
-class selfcal_imager_pipeline(control):
-    """
-    The self calibration pipeline is used to generate images and find
-    sources in the generated images. Generated images and lists of found
-    sources are complemented with meta data and thus ready for consumption by
-    the Long Term Storage (LTA)
-
-    *subband groups*
-    The imager_pipeline is able to generate images on the frequency range of
-    LOFAR in parallel. Combining the frequency subbands together in so called
-    subbandgroups. Each subband group will result in an image and sourcelist,
-    (typically 8, because ten subband groups are combined).
-
-    *Time Slices*
-    selfcal images are compiled from a number of so-called (time) slices. Each
-    slice comprises a short (approx. 10 min) observation of a field (an area on
-    the sky) containing typically 80 subbands. The number of slices will be
-    different for LBA observations (typically 9) and HBA observations
-    (typically 2), due to differences in sensitivity.
-
-    Each image will be compiled on a different cluster node to balance the
-    processing load. The input- and output- files and locations are determined
-    by the scheduler and specified in the parset-file.
-
-    **This pipeline performs the following operations:**
-
-    1. Prepare Phase. Copy the preprocessed MS's from the different compute
-       nodes to the nodes where the images will be compiled (the prepare phase)
-       Combine the subbands in subband groups, concattenate the timeslice in a
-       single large measurement set and perform flagging, RFI and bad station
-       exclusion.
-    2. Create db. Generate a local sky model (LSM) from the global sky model
-       (GSM) for the sources that are in the field-of-view (FoV). The LSM
-       is stored as sourcedb.
-       In step 3 calibration of the measurement sets is performed on these
-       sources and in step 4 to create a mask for the awimager. The calibration
-       solution will be placed in an instrument table/db also created in this
-       step.
-    3. BBS. Calibrate the measurement set with the sourcedb from the gsm.
-       In later iterations sourced found in the created images will be added
-       to this list. Resulting in a selfcalibration cycle.
-    4. Awimager. The combined  measurement sets are now imaged. The imaging
-       is performed using a mask: The sources in the sourcedb are used to
-       create an casa image masking known sources. Together with the
-       measurement set an image is created.
-    5. Sourcefinding. The images created in step 4 are fed to pyBDSM to find
-       and describe sources. In multiple itterations substracting the found
-       sources, all sources are collectedin a sourcelist.
-       Step I. The sources found in step 5 are fed back into step 2.
-       This allows the Measurement sets to be calibrated with sources currently
-       found in the image. This loop will continue until convergence (3 times
-       for the time being).
-    6. Finalize. Meta data with regards to the input, computations performed
-       and results are collected an added to the casa image. The images created
-       are converted from casa to HDF5 and copied to the correct output
-       location.
-    7. Export meta data: An outputfile with meta data is generated ready for
-       consumption by the LTA and/or the LOFAR framework.
-
-
-    **Per subband-group, the following output products will be delivered:**
-
-    a. An image
-    b. A source list
-    c. (Calibration solutions and corrected visibilities)
-
-    """
-    def __init__(self):
-        """
-        Initialize member variables and call superclass init function
-        """
-        control.__init__(self)
-        self.parset = parameterset()
-        self.input_data = DataMap()
-        self.target_data = DataMap()
-        self.output_data = DataMap()
-        self.output_correlated_data = DataMap()
-        self.scratch_directory = None
-        self.parset_feedback_file = None
-        self.parset_dir = None
-        self.mapfile_dir = None
-
-    def usage(self):
-        """
-        Display usage information
-        """
-        print("Usage: %s <parset-file>  [options]" % sys.argv[0], file=sys.stderr)
-        return 1
-
-    def go(self):
-        """
-        Read the parset-file that was given as input argument, and set the
-        jobname before calling the base-class's `go()` method.
-        """
-        try:
-            parset_file = os.path.abspath(self.inputs['args'][0])
-        except IndexError:
-            return self.usage()
-        self.parset.adoptFile(parset_file)
-        self.parset_feedback_file = parset_file + "_feedback"
-        # Set job-name to basename of parset-file w/o extension, if it's not
-        # set on the command-line with '-j' or '--job-name'
-        if not 'job_name' in self.inputs:
-            self.inputs['job_name'] = (
-                os.path.splitext(os.path.basename(parset_file))[0]
-            )
-        return super(selfcal_imager_pipeline, self).go()
-
-    @mail_log_on_exception
-    def pipeline_logic(self):
-        """
-        Define the individual tasks that comprise the current pipeline.
-        This method will be invoked by the base-class's `go()` method.
-        """
-        self.logger.info("Starting imager pipeline")
-
-        # Define scratch directory to be used by the compute nodes.
-        self.scratch_directory = os.path.join(
-            self.inputs['working_directory'], self.inputs['job_name'])
-        # Get input/output-data products specifications.
-        self._get_io_product_specs()
-
-        # remove prepending parset identifiers, leave only pipelinecontrol
-        full_parset = self.parset
-        self.parset = self.parset.makeSubset(
-            self.parset.fullModuleName('PythonControl') + '.')  # remove this
-
-        # Create directories to store communication and data files
-
-        job_dir = self.config.get("layout", "job_directory")
-
-        self.parset_dir = os.path.join(job_dir, "parsets")
-        create_directory(self.parset_dir)
-        self.mapfile_dir = os.path.join(job_dir, "mapfiles")
-        create_directory(self.mapfile_dir)
-
-        # *********************************************************************
-        # (INPUT) Get the input from external sources and create pipeline types
-        # Input measure ment sets
-        input_mapfile = os.path.join(self.mapfile_dir, "uvdata.mapfile")
-        self.input_data.save(input_mapfile)
-        # storedata_map(input_mapfile, self.input_data)
-        self.logger.debug(
-            "Wrote input UV-data mapfile: {0}".format(input_mapfile))
-
-        # Provides location for the scratch directory and concat.ms location
-        target_mapfile = os.path.join(self.mapfile_dir, "target.mapfile")
-        self.target_data.save(target_mapfile)
-        self.logger.debug(
-            "Wrote target mapfile: {0}".format(target_mapfile))
-
-        # images datafiles
-        output_image_mapfile = os.path.join(self.mapfile_dir, "images.mapfile")
-        self.output_data.save(output_image_mapfile)
-        self.logger.debug(
-            "Wrote output sky-image mapfile: {0}".format(output_image_mapfile))
-
-        # Location of the output measurement set
-        output_correlated_mapfile = os.path.join(self.mapfile_dir, 
-                                                 "correlated.mapfile")
-        self.output_correlated_data.save(output_correlated_mapfile)
-        self.logger.debug(
-            "Wrote output correlated mapfile: {0}".format(output_correlated_mapfile))
-
-        # Get pipeline parameters from the toplevel recipe
-        # TODO: This is a backdoor option to manually add beamtables when these
-        # are missing on the provided ms. There is NO use case for users of the
-        # pipeline
-        add_beam_tables = self.parset.getBool(
-                                    "Imaging.addBeamTables", False)
-
-
-        number_of_major_cycles = self.parset.getInt(
-                                    "Imaging.number_of_major_cycles")
-
-        # Almost always a users wants a partial succes above a failed pipeline
-        output_result_of_last_succesfull_cycle = self.parset.getBool(
-                            "Imaging.output_on_error", True)
-
-
-        if number_of_major_cycles < 3:
-            self.logger.error(
-                "The number of major cycles must be 3 or higher, correct"
-                " the key: Imaging.number_of_major_cycles")
-            raise PipelineException(
-                     "Incorrect number_of_major_cycles in the parset")
-
-
-        # ******************************************************************
-        # (1) prepare phase: copy and collect the ms
-        concat_ms_map_path, timeslice_map_path, ms_per_image_map_path, \
-            processed_ms_dir = self._prepare_phase(input_mapfile,
-                                    target_mapfile, add_beam_tables)
-
-        # We start with an empty source_list map. It should contain n_output
-        # entries all set to empty strings
-        source_list_map_path = os.path.join(self.mapfile_dir,
-                                        "initial_sourcelist.mapfile")
-        source_list_map = DataMap.load(target_mapfile) # copy the output map
-        for item in source_list_map:
-            item.file = ""             # set all to empty string
-        source_list_map.save(source_list_map_path)
-
-        succesfull_cycle_mapfiles_dict = None
-        for idx_cycle in range(number_of_major_cycles):
-            try:
-                # *****************************************************************
-                # (2) Create dbs and sky model
-                parmdbs_path, sourcedb_map_path = self._create_dbs(
-                            concat_ms_map_path, timeslice_map_path, idx_cycle,
-                            source_list_map_path = source_list_map_path,
-                            skip_create_dbs = False)
-
-
-                # *****************************************************************
-                # (3)  bbs_imager recipe.
-                bbs_output = self._bbs(concat_ms_map_path, timeslice_map_path, 
-                        parmdbs_path, sourcedb_map_path, idx_cycle, skip = False)
-
-            
-                # TODO: Extra recipe: concat timeslices using pyrap.concatms
-                # (see prepare) redmine issue #6021
-                # Done in imager_bbs.p at the node level after calibration 
-
-                # *****************************************************************
-                # (4) Get parameters awimager from the prepare_parset and inputs
-                aw_image_mapfile, maxbaseline = self._aw_imager(concat_ms_map_path,
-                            idx_cycle, sourcedb_map_path, number_of_major_cycles,
-                            skip = False)
-
-                # *****************************************************************
-                # (5) Source finding
-                source_list_map_path, found_sourcedb_path = self._source_finding(
-                        aw_image_mapfile, idx_cycle, skip = False)
-                # should the output be a sourcedb? instead of a sourcelist
-
-                # save the active mapfiles: locations and content
-                # Used to output last succesfull cycle on error
-                mapfiles_to_save = {'aw_image_mapfile':aw_image_mapfile,
-                                    'source_list_map_path':source_list_map_path,
-                                    'found_sourcedb_path':found_sourcedb_path,
-                                    'concat_ms_map_path':concat_ms_map_path}
-                succesfull_cycle_mapfiles_dict = self._save_active_mapfiles(idx_cycle, 
-                                      self.mapfile_dir, mapfiles_to_save)
-
-            # On exception there is the option to output the results of the 
-            # last cycle without errors
-            except KeyboardInterrupt as ex:
-                raise ex
-
-            except Exception as ex:
-                self.logger.error("Encountered an fatal exception during self"
-                                  "calibration. Aborting processing and return"
-                                  " the last succesfull cycle results")
-                self.logger.error(str(ex))
-
-                # if we are in the first cycle always exit with exception
-                if idx_cycle == 0:
-                    raise ex
-
-                if not output_result_of_last_succesfull_cycle:
-                    raise ex
-                
-                # restore the mapfile variables
-                aw_image_mapfile = succesfull_cycle_mapfiles_dict['aw_image_mapfile']
-                source_list_map_path = succesfull_cycle_mapfiles_dict['source_list_map_path']
-                found_sourcedb_path = succesfull_cycle_mapfiles_dict['found_sourcedb_path']
-                concat_ms_map_path = succesfull_cycle_mapfiles_dict['concat_ms_map_path']
-
-                # set the number_of_major_cycles to the correct number
-                number_of_major_cycles = idx_cycle - 1
-                max_cycles_reached = False
-                break
-            else:
-                max_cycles_reached = True
-
-
-        # TODO: minbaseline should be a parset value as is maxbaseline..
-        minbaseline = 0
-
-        # *********************************************************************
-        # (6) Finalize:
-        placed_data_image_map, placed_correlated_map =  \
-                                        self._finalize(aw_image_mapfile, 
-            processed_ms_dir, ms_per_image_map_path, source_list_map_path,
-            minbaseline, maxbaseline, target_mapfile, output_image_mapfile,
-            found_sourcedb_path, concat_ms_map_path, output_correlated_mapfile)
-
-        # *********************************************************************
-        # (7) Get metadata
-        # create a parset with information that is available on the toplevel
-
-        self._get_meta_data(number_of_major_cycles, placed_data_image_map,
-                       placed_correlated_map, full_parset, 
-                       max_cycles_reached)
-
-
-        return 0
-
-    def _save_active_mapfiles(self, cycle_idx, mapfile_dir, mapfiles = {}):
-        """
-        receives a dict with active mapfiles, var name to path
-        Each mapfile is copier to a seperate directory and saved
-        THis allows us to exit the last succesfull run
-        """
-        # create a directory for storing the saved mapfiles, use cycle idx
-        mapfile_for_cycle_dir = os.path.join(mapfile_dir, "cycle_" + str(cycle_idx))
-        create_directory(mapfile_for_cycle_dir)
-
-        saved_mapfiles = {}
-        for (var_name,mapfile_path) in list(mapfiles.items()):
-            shutil.copy(mapfile_path, mapfile_for_cycle_dir)
-            # save the newly created file, get the filename, and append it
-            # to the directory name
-            saved_mapfiles[var_name] = os.path.join(mapfile_for_cycle_dir,
-                                          os.path.basename(mapfile_path))
-
-        return saved_mapfiles
-            
-            
-
-
-    def _get_meta_data(self, number_of_major_cycles, placed_data_image_map,
-                       placed_correlated_map, full_parset, max_cycles_reached):
-        """
-        Function combining all the meta data collection steps of the processing
-        """
-        parset_prefix = full_parset.getString('prefix') + \
-                full_parset.fullModuleName('DataProducts')
-                    
-        toplevel_meta_data = parameterset({'feedback_version': feedback_version})
-        toplevel_meta_data.replace(
-             parset_prefix + ".numberOfMajorCycles", 
-                                           str(number_of_major_cycles))
-        toplevel_meta_data_path = os.path.join(
-                self.parset_dir, "toplevel_meta_data.parset")
-
-        toplevel_meta_data.replace(parset_prefix + ".max_cycles_reached",
-                                  str(max_cycles_reached))
-
-        try:
-            toplevel_meta_data.writeFile(toplevel_meta_data_path)
-            self.logger.info("Wrote meta data to: " + 
-                    toplevel_meta_data_path)
-        except RuntimeError as err:
-            self.logger.error(
-              "Failed to write toplevel meta information parset: %s" % str(
-                                    toplevel_meta_data_path))
-            return 1
-
-        skyimage_metadata = "%s_feedback_SkyImage" % (self.parset_file,)
-        correlated_metadata = "%s_feedback_Correlated" % (self.parset_file,)
-
-        # Create a parset-file containing the metadata for MAC/SAS at nodes
-        self.run_task("get_metadata", placed_data_image_map,           
-            parset_prefix = parset_prefix,
-            product_type = "SkyImage",
-            metadata_file = skyimage_metadata)
-
-        self.run_task("get_metadata", placed_correlated_map,
-            parset_prefix = parset_prefix,
-            product_type = "Correlated",
-            metadata_file = correlated_metadata)
-
-        self.send_feedback_processing(toplevel_meta_data)
-        self.send_feedback_dataproducts(parameterset(skyimage_metadata))
-        self.send_feedback_dataproducts(parameterset(correlated_metadata))
-
-    def _get_io_product_specs(self):
-        """
-        Get input- and output-data product specifications from the
-        parset-file, and do some sanity checks.
-        """
-        dps = self.parset.makeSubset(
-            self.parset.fullModuleName('DataProducts') + '.'
-        )
-        # convert input dataproducts from parset value to DataMap
-        self.input_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Input_Correlated.locations'),
-                    dps.getStringVector('Input_Correlated.filenames'),
-                    dps.getBoolVector('Input_Correlated.skip'))
-        ])
-        self.logger.debug("%d Input_Correlated data products specified" %
-                          len(self.input_data))
-
-        self.output_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Output_SkyImage.locations'),
-                    dps.getStringVector('Output_SkyImage.filenames'),
-                    dps.getBoolVector('Output_SkyImage.skip'))
-        ])
-        self.logger.debug("%d Output_SkyImage data products specified" %
-                          len(self.output_data))
-
-        self.output_correlated_data = DataMap([
-            tuple(os.path.join(location, filename).split(':')) + (skip,)
-                for location, filename, skip in zip(
-                    dps.getStringVector('Output_Correlated.locations'),
-                    dps.getStringVector('Output_Correlated.filenames'),
-                    dps.getBoolVector('Output_Correlated.skip'))
-        ])
-
-        # assure that the two output maps contain the same skip fields
-        align_data_maps( self.output_data, self.output_correlated_data)
-
-        self.logger.debug("%d Output_Correlated data products specified" %
-                          len(self.output_correlated_data))
-
-        # # Sanity checks on input- and output data product specifications
-        # if not validate_data_maps(self.input_data, self.output_data):
-        #    raise PipelineException(
-        #        "Validation of input/output data product specification failed!"
-        #    )#Turned off untill DataMap is extended..
-
-        # Target data is basically scratch data, consisting of one concatenated
-        # MS per image. It must be stored on the same host as the final image.
-        self.target_data = copy.deepcopy(self.output_data)
-
-        for idx, item in enumerate(self.target_data):
-            item.file = os.path.join(self.scratch_directory, 'ms_per_image_%d' % idx, 'concat.ms')
-
-
-    @xml_node
-    def _finalize(self, awimager_output_map, processed_ms_dir,
-                  ms_per_image_map, sourcelist_map, minbaseline,
-                  maxbaseline, target_mapfile,
-                  output_image_mapfile, sourcedb_map, concat_ms_map_path,
-                  output_correlated_mapfile, skip = False):
-        """
-        Perform the final step of the imager:
-        Convert the output image to hdf5 and copy to output location
-        Collect meta data and add to the image
-        """
-
-        placed_image_mapfile = self._write_datamap_to_file(None,
-             "placed_image")
-        self.logger.debug("Touched mapfile for correctly placed"
-                        " hdf images: {0}".format(placed_image_mapfile))
-
-        placed_correlated_mapfile = self._write_datamap_to_file(None,
-             "placed_correlated")
-        self.logger.debug("Touched mapfile for correctly placed"
-                        " correlated datasets: {0}".format(placed_correlated_mapfile))
-
-        if skip:
-            return placed_image_mapfile, placed_correlated_mapfile
-        else:
-            # run the awimager recipe
-            outputs = self.run_task("selfcal_finalize",
-                target_mapfile, awimager_output_map = awimager_output_map,
-                    ms_per_image_map = ms_per_image_map,
-                    sourcelist_map = sourcelist_map,
-                    sourcedb_map = sourcedb_map,
-                    minbaseline = minbaseline,
-                    maxbaseline = maxbaseline,
-                    target_mapfile = target_mapfile,
-                    output_image_mapfile = output_image_mapfile,
-                    processed_ms_dir = processed_ms_dir,
-                    placed_image_mapfile = placed_image_mapfile,
-                    placed_correlated_mapfile = placed_correlated_mapfile,
-                    concat_ms_map_path = concat_ms_map_path,
-                    output_correlated_mapfile = output_correlated_mapfile
-                    )
-
-        return outputs["placed_image_mapfile"], \
-                outputs["placed_correlated_mapfile"]
-
-    @xml_node
-    def _source_finding(self, image_map_path, major_cycle, skip = True):
-        """
-        Perform the sourcefinding step
-        """
-        # Create the parsets for the different sourcefinder runs
-        bdsm_parset_pass_1 = self.parset.makeSubset("BDSM[0].")
-
-        self._selfcal_modify_parset(bdsm_parset_pass_1, "pybdsm_first_pass.par")
-        parset_path_pass_1 = self._write_parset_to_file(bdsm_parset_pass_1,
-                "pybdsm_first_pass.par", "Sourcefinder first pass parset.")
-
-        bdsm_parset_pass_2 = self.parset.makeSubset("BDSM[1].")
-        self._selfcal_modify_parset(bdsm_parset_pass_2, "pybdsm_second_pass.par")
-        parset_path_pass_2 = self._write_parset_to_file(bdsm_parset_pass_2,
-                "pybdsm_second_pass.par", "sourcefinder second pass parset")
-
-        # touch a mapfile to be filled with created sourcelists
-        source_list_map = self._write_datamap_to_file(None,
-             "source_finding_outputs",
-             "map to sourcefinding outputs (sourcelist)")
-        sourcedb_map_path = self._write_datamap_to_file(None,
-             "source_dbs_outputs", "Map to sourcedbs based in found sources")
-
-        # construct the location to save the output products of the
-        # sourcefinder
-        cycle_path = os.path.join(self.scratch_directory,
-                                  "awimage_cycle_{0}".format(major_cycle))
-        catalog_path = os.path.join(cycle_path, "bdsm_catalog")
-        sourcedb_path = os.path.join(cycle_path, "bdsm_sourcedb")
-
-        # Run the sourcefinder
-        if skip:
-            return source_list_map, sourcedb_map_path
-        else:
-            self.run_task("imager_source_finding",
-                        image_map_path,
-                        bdsm_parset_file_run1 = parset_path_pass_1,
-                        bdsm_parset_file_run2x = parset_path_pass_2,
-                        working_directory = self.scratch_directory,
-                        catalog_output_path = catalog_path,
-                        mapfile = source_list_map,
-                        sourcedb_target_path = sourcedb_path,
-                        sourcedb_map_path = sourcedb_map_path
-                         )
-
-            return source_list_map, sourcedb_map_path
-
-    @xml_node
-    def _bbs(self, concat_ms_map_path, timeslice_map_path, parmdbs_map_path, sourcedb_map_path,
-              major_cycle, skip = False):
-        """
-        Perform a calibration step. First with a set of sources from the
-        gsm and in later iterations also on the found sources
-        """
-        # create parset for bbs run
-        parset = self.parset.makeSubset("BBS.")
-        self._selfcal_modify_parset(parset, "bbs")
-        parset_path = self._write_parset_to_file(parset, "bbs",
-                        "Parset for calibration with a local sky model")
-
-        # create the output file path
-        output_mapfile = self._write_datamap_to_file(None, "bbs_output",
-                        "Mapfile with calibrated measurement sets.")
-
-        converted_sourcedb_map_path = self._write_datamap_to_file(None,
-                "source_db", "correctly shaped mapfile for input sourcedbs")
-
-        if skip:
-            return output_mapfile
-
-        # The create db step produces a mapfile with a single sourcelist for
-        # the different timeslices. Generate a mapfile with copies of the
-        # sourcelist location: This allows validation of maps in combination
-        # get the original map data
-        sourcedb_map = DataMap.load(sourcedb_map_path)
-        parmdbs_map = MultiDataMap.load(parmdbs_map_path)
-        converted_sourcedb_map = []
-
-        # sanity check for correcy output from previous recipes
-        if not validate_data_maps(sourcedb_map, parmdbs_map):
-            self.logger.error("The input files for bbs do not contain "
-                                "matching host names for each entry content:")
-            self.logger.error(repr(sourcedb_map))
-            self.logger.error(repr(parmdbs_map))
-            raise PipelineException("Invalid input data for imager_bbs recipe")
-
-        self.run_task("selfcal_bbs",
-                      timeslice_map_path,
-                      parset = parset_path,
-                      instrument_mapfile = parmdbs_map_path,
-                      sourcedb_mapfile = sourcedb_map_path,
-                      mapfile = output_mapfile,
-                      working_directory = self.scratch_directory,
-                      concat_ms_map_path=concat_ms_map_path,
-                      major_cycle=major_cycle)
-
-        return output_mapfile
-
-    @xml_node
-    def _aw_imager(self, prepare_phase_output, major_cycle, sky_path,
-                  number_of_major_cycles,   skip = False):
-        """
-        Create an image based on the calibrated, filtered and combined data.
-        """
-        # Create parset for the awimage recipe
-        parset = self.parset.makeSubset("AWimager.")
-        # Get maxbaseline from 'full' parset
-        max_baseline = self.parset.getInt("Imaging.maxbaseline")
-        patch_dictionary = {"maxbaseline": str(
-                                    max_baseline)}
-        try:
-            temp_parset_filename = patch_parset(parset, patch_dictionary)
-            aw_image_parset = get_parset(temp_parset_filename)
-            aw_image_parset_path = self._write_parset_to_file(aw_image_parset,
-                "awimager_cycle_{0}".format(major_cycle),
-                "Awimager recipe parset")
-        finally:
-            # remove tempfile
-            os.remove(temp_parset_filename)
-
-        # Create path to write the awimage files
-        intermediate_image_path = os.path.join(self.scratch_directory,
-            "awimage_cycle_{0}".format(major_cycle), "image")
-
-        output_mapfile = self._write_datamap_to_file(None, "awimager",
-                                    "output map for awimager recipe")
-
-        mask_patch_size = self.parset.getInt("Imaging.mask_patch_size")
-        autogenerate_parameters = self.parset.getBool(
-                                    "Imaging.auto_imaging_specs")
-        specify_fov = self.parset.getBool(
-                                    "Imaging.specify_fov")
-        if skip:
-            pass
-        else:
-            # run the awimager recipe
-            self.run_task("selfcal_awimager", prepare_phase_output,
-                          parset = aw_image_parset_path,
-                          mapfile = output_mapfile,
-                          output_image = intermediate_image_path,
-                          mask_patch_size = mask_patch_size,
-                          sourcedb_path = sky_path,
-                          working_directory = self.scratch_directory,
-                          autogenerate_parameters = autogenerate_parameters,
-                          specify_fov = specify_fov, major_cycle = major_cycle,
-                          nr_cycles = number_of_major_cycles,
-                          perform_self_cal = True)
-
-        return output_mapfile, max_baseline
-
-    @xml_node
-    def _prepare_phase(self, input_ms_map_path, target_mapfile,
-        add_beam_tables):
-        """
-        Copy ms to correct location, combine the ms in slices and combine
-        the time slices into a large virtual measurement set
-        """
-        # Create the dir where found and processed ms are placed
-        # ms_per_image_map_path contains all the original ms locations:
-        # this list contains possible missing files
-        processed_ms_dir = os.path.join(self.scratch_directory, "subbands")
-
-        # get the parameters, create a subset for ndppp, save
-        # Aditional parameters are added runtime on the node, based on data
-        ndppp_parset = self.parset.makeSubset("DPPP.")
-        ndppp_parset_path = self._write_parset_to_file(ndppp_parset,
-                    "prepare_imager_ndppp", "parset for ndpp recipe")
-
-        # create the output file paths
-        # [1] output -> prepare_output
-        output_mapfile = self._write_datamap_to_file(None, "prepare_output")
-        time_slices_mapfile = self._write_datamap_to_file(None,
-                                                    "prepare_time_slices")
-        ms_per_image_mapfile = self._write_datamap_to_file(None,
-                                                         "ms_per_image")
-
-        # get some parameters from the imaging pipeline parset:
-        slices_per_image = self.parset.getInt("Imaging.slices_per_image")
-        subbands_per_image = self.parset.getInt("Imaging.subbands_per_image")
-
-        outputs = self.run_task("imager_prepare", input_ms_map_path,
-                parset = ndppp_parset_path,
-                target_mapfile = target_mapfile,
-                slices_per_image = slices_per_image,
-                subbands_per_image = subbands_per_image,
-                mapfile = output_mapfile,
-                slices_mapfile = time_slices_mapfile,
-                ms_per_image_mapfile = ms_per_image_mapfile,
-                working_directory = self.scratch_directory,
-                processed_ms_dir = processed_ms_dir,
-                add_beam_tables = add_beam_tables,
-                do_rficonsole = False)
-
-        # validate that the prepare phase produced the correct data
-        output_keys = list(outputs.keys())
-        if not ('mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format('mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-        if not ('slices_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                        'slices_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-
-        if not ('ms_per_image_mapfile' in output_keys):
-            error_msg = "The imager_prepare master script did not"\
-                    "return correct data. missing: {0}".format(
-                                                'ms_per_image_mapfile')
-            self.logger.error(error_msg)
-            raise PipelineException(error_msg)
-
-        # Return the mapfiles paths with processed data
-        return output_mapfile, outputs["slices_mapfile"], ms_per_image_mapfile, \
-            processed_ms_dir
-
-    @xml_node
-    def _create_dbs(self, input_map_path, timeslice_map_path, 
-                    major_cycle, source_list_map_path , 
-                    skip_create_dbs = False):
-        """
-        Create for each of the concatenated input measurement sets
-        an instrument model and parmdb
-        """
-        # Create the parameters set
-        parset = self.parset.makeSubset("GSM.")
-
-        # create the files that will contain the output of the recipe
-        parmdbs_map_path = self._write_datamap_to_file(None, "parmdbs",
-                    "parmdbs output mapfile")
-        sourcedb_map_path = self._write_datamap_to_file(None, "sky_files",
-                    "source db output mapfile")
-
-        # run the master script
-        if skip_create_dbs:
-            pass
-        else:
-            self.run_task("imager_create_dbs", input_map_path,
-                        monetdb_hostname = parset.getString("monetdb_hostname"),
-                        monetdb_port = parset.getInt("monetdb_port"),
-                        monetdb_name = parset.getString("monetdb_name"),
-                        monetdb_user = parset.getString("monetdb_user"),
-                        monetdb_password = parset.getString("monetdb_password"),
-                        assoc_theta = parset.getString("assoc_theta"),
-                        sourcedb_suffix = ".sourcedb",
-                        slice_paths_mapfile = timeslice_map_path,
-                        parmdb_suffix = ".parmdb",
-                        parmdbs_map_path = parmdbs_map_path,
-                        sourcedb_map_path = sourcedb_map_path,
-                        source_list_map_path = source_list_map_path,
-                        working_directory = self.scratch_directory,
-                        major_cycle = major_cycle)
-
-        return parmdbs_map_path, sourcedb_map_path
-
-    # TODO: Move these helpers to the parent class
-    def _write_parset_to_file(self, parset, parset_name, message):
-        """
-        Write the suplied the suplied parameterset to the parameter set
-        directory in the jobs dir with the filename suplied in parset_name.
-        Return the full path to the created file.
-        """
-        parset_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "parsets")
-        # create the parset dir if it does not exist
-        create_directory(parset_dir)
-
-        # write the content to a new parset file
-        parset_path = os.path.join(parset_dir,
-                         "{0}.parset".format(parset_name))
-        parset.writeFile(parset_path)
-
-        # display a debug log entrie with path and message
-        self.logger.debug("Wrote parset to path <{0}> : {1}".format(
-                               parset_path, message))
-
-        return parset_path
-
-    def _write_datamap_to_file(self, datamap, mapfile_name, message = ""):
-        """
-        Write the suplied the suplied map to the mapfile.
-        directory in the jobs dir with the filename suplied in mapfile_name.
-        Return the full path to the created file.
-        If suplied data is None then the file is touched if not existing, but
-        existing files are kept as is
-        """
-
-        mapfile_dir = os.path.join(
-            self.config.get("layout", "job_directory"), "mapfiles")
-        # create the mapfile_dir if it does not exist
-        create_directory(mapfile_dir)
-
-        # write the content to a new parset file
-        mapfile_path = os.path.join(mapfile_dir,
-                         "{0}.map".format(mapfile_name))
-
-        # display a debug log entrie with path and message
-        if datamap != None:
-            datamap.save(mapfile_path)
-
-            self.logger.debug(
-            "Wrote mapfile <{0}>: {1}".format(mapfile_path, message))
-        else:
-            if not os.path.exists(mapfile_path):
-                DataMap().save(mapfile_path)
-
-                self.logger.debug(
-                    "Touched mapfile <{0}>: {1}".format(mapfile_path, message))
-
-        return mapfile_path
-
-
-    # This functionality should be moved outside into MOM/ default template.
-    # This is now a static we should be able to control this.
-    def _selfcal_modify_parset(self, parset, parset_name):    
-        """ 
-        Modification of the BBS parset for selfcal implementation, add, 
-        remove, modify some values in bbs parset, done by 
-        done by Nicolas Vilchez
-        """            
-        
-        if parset_name == "bbs":			
-        
-             parset.replace('Step.solve.Model.Beam.UseChannelFreq', 'True')
-             parset.replace('Step.solve.Model.Ionosphere.Enable', 'F')
-             parset.replace('Step.solve.Model.TEC.Enable', 'F')
-             parset.replace('Step.correct.Model.Beam.UseChannelFreq', 'True')
-             parset.replace('Step.correct.Model.TEC.Enable', 'F')
-             parset.replace('Step.correct.Model.Phasors.Enable', 'T')
-             parset.replace('Step.correct.Output.WriteCovariance', 'T')             
-                         
-             #must be erased, by default I replace to the default value
-             parset.replace('Step.solve.Baselines', '*&')
-             
-             parset.replace('Step.solve.Solve.Mode', 'COMPLEX')
-             parset.replace('Step.solve.Solve.CellChunkSize', '100')                             
-             parset.replace('Step.solve.Solve.PropagateSolutions', 'F')                    
-             parset.replace('Step.solve.Solve.Options.MaxIter', '100')  
-                   
-
-        if parset_name == "pybdsm_first_pass.par":
-             
-             parset.replace('advanced_opts', 'True')
-             parset.replace('atrous_do', 'True')
-             parset.replace('rms_box', '(80.0,15.0)')
-             parset.replace('thresh_isl', '5')
-             parset.replace('thresh_pix', '5')
-             parset.replace('adaptive_rms_box', 'True')
-             parset.replace('blank_limit', '1E-4')
-             parset.replace('ini_method', 'curvature')
-             parset.replace('atrous_do', 'True')
-             parset.replace('thresh', 'hard')              
-             
-
-        if parset_name == "pybdsm_second_pass.par":
-             
-             parset.replace('advanced_opts', 'True')
-             parset.replace('atrous_do', 'True')
-             parset.replace('rms_box', '(80.0,15.0)')
-             parset.replace('thresh_isl', '5')
-             parset.replace('thresh_pix', '5')
-             parset.replace('adaptive_rms_box', 'True')
-             parset.replace('blank_limit', '1E-4')
-             parset.replace('ini_method', 'curvature')
-             parset.replace('atrous_do', 'True')
-             parset.replace('thresh', 'hard')              
-
-
-if __name__ == '__main__':
-    sys.exit(selfcal_imager_pipeline().main())
diff --git a/CEP/Pipeline/recipes/sip/master/imager_awimager.py b/CEP/Pipeline/recipes/sip/master/imager_awimager.py
deleted file mode 100644
index 2afebe04f23..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_awimager.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#                                                         LOFAR IMAGING PIPELINE
-#
-#                                    Example recipe with simple job distribution
-#                                                          Wouter Klijn, 2010
-#                                                      swinbank@transientskp.org
-# ------------------------------------------------------------------------------
-import sys
-import os
-import copy
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, validate_data_maps
-
-class imager_awimager(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Master script for the awimager. Collects arguments from command line and
-    pipeline inputs.
-    
-    1. Load mapfiles and validate these
-    2. Run the awimage node scripts
-    3. Retrieve output. Construct output map file succesfull runs
-    
-    Details regarding the implementation of the imaging step can be found in 
-    the node recipe 
-    **CommandLine Arguments**
-    
-    A mapfile containing (node, datafile) pairs. The measurements set use as
-    input for awimager executable  
- 
-    """
-    inputs = {
-        'executable': ingredient.ExecField(
-            '--executable',
-            help = "The full path to the  awimager executable"
-        ),
-        'parset': ingredient.FileField(
-            '-p', '--parset',
-            help = "The full path to a awimager configuration parset."
-        ),
-        'nthreads': ingredient.IntField(
-            '--nthreads',
-            default=8,
-            help="Number of threads per process"
-        ),
-        'working_directory': ingredient.StringField(
-            '-w', '--working-directory',
-            help = "Working directory used on output nodes. Results location"
-        ),
-        'output_image': ingredient.StringField(
-            '--output-image',
-            help = "Path of the image to be create by the awimager"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path for output mapfile. A list of the"
-                 "successfully generated images will be written here"
-        ),
-        'sourcedb_path': ingredient.StringField(
-            '--sourcedb-path',
-            help = "Full path of sourcedb used to create a mask for known sources"
-        ),
-        'mask_patch_size': ingredient.FloatField(
-            '--mask-patch-size',
-            help = "Scale factor for patches in the awimager mask"
-        ),
-        'autogenerate_parameters': ingredient.BoolField(
-            '--autogenerate-parameters',
-            default = True,
-            help = "Turns on the autogeneration of: cellsize, image-size, fov."
-            " MSSS 'type' functionality"
-        ),
-        'specify_fov': ingredient.FloatField(
-            '--specify-fov',
-            default = False,
-            help = "calculated Image parameters are relative to fov, parameter"
-            " is active when autogenerate_parameters is False"
-        ),
-        'fov': ingredient.FloatField(
-            '--fov',
-            default = 0.0,
-            help = "calculated Image parameters are relative to this"
-            " Field Of View in arcSec. This parameter is obligatory when"
-            " specify_fov is True"
-        )
-    }
-
-    outputs = {
-        'mapfile': ingredient.StringField(),
-    }
-
-    def go(self):
-        """
-        This member contains all the functionality of the imager_awimager.
-        Functionality is all located at the node side of the script.
-        """
-        super(imager_awimager, self).go()
-        self.logger.info("Starting imager_awimager run")
-
-        # *********************************************************************
-        # 1. collect the inputs and validate
-        input_map = DataMap.load(self.inputs['args'][0])
-        sourcedb_map = DataMap.load(self.inputs['sourcedb_path'])
-
-        if not validate_data_maps(input_map, sourcedb_map):
-            self.logger.error(
-                        "the supplied input_ms mapfile and sourcedb mapfile"
-                        "are incorrect. Aborting")
-            self.logger.error(repr(input_map))
-            self.logger.error(repr(sourcedb_map))
-            return 1
-
-        # *********************************************************************
-        # 2. Start the node side of the awimager recipe
-        # Compile the command to be executed on the remote machine
-        node_command = "python3 %s" % (self.__file__.replace("master", "nodes"))
-        jobs = []
-
-        output_map = copy.deepcopy(input_map)        
-        for w, x, y in zip(input_map, output_map, sourcedb_map):
-            w.skip = x.skip = y.skip = (
-                w.skip or x.skip or y.skip
-            )
-
-        sourcedb_map.iterator = input_map.iterator = output_map.iterator = \
-            DataMap.SkipIterator
-
-        for idx, (measurement_item, source_item) in enumerate(zip(input_map, sourcedb_map)):
-            if measurement_item.skip or source_item.skip:
-                jobs.append(None)
-                continue
-            # both the sourcedb and the measurement are in a map
-            # unpack both
-            host , measurement_path = measurement_item.host, measurement_item.file
-            host2 , sourcedb_path = source_item.host, source_item.file
-
-            # use unique working directories per job, to prevent interference between jobs on a global fs
-            working_dir = os.path.join(self.inputs['working_directory'], "imager_awimager_{0}".format(idx))
-
-            # construct and save the output name
-            arguments = [self.inputs['executable'],
-                         self.environment,
-                         self.inputs['parset'],
-                         working_dir,
-                         # put in unique dir, as node script wants to put private .par files next to it
-                         "%s_%s/image" % (self.inputs['output_image'], idx), 
-                         measurement_path,
-                         sourcedb_path,
-                         self.inputs['mask_patch_size'],
-                         self.inputs['autogenerate_parameters'],
-                         self.inputs['specify_fov'],
-                         self.inputs['fov'],
-                         ]
-
-            jobs.append(ComputeJob(host, node_command, arguments,
-                    resources={
-                        "cores": self.inputs['nthreads']
-                    }))
-        self._schedule_jobs(jobs)
-
-        # *********************************************************************
-        # 3. Check output of the node scripts
-
-        for job, output_item in  zip(jobs, output_map):
-            # job ==  None on skipped job
-            if not "image" in job.results:
-                output_item.file = "failed"
-                output_item.skip = True
-
-            else:
-                output_item.file = job.results["image"]
-                output_item.skip = False
-
-        # Check if there are finished runs
-        succesfull_runs = None
-        for item in output_map:
-            if item.skip == False:
-                succesfull_runs = True
-                break
-
-        if not succesfull_runs:
-            self.logger.error(
-                    "None of the starter awimager run finished correct")
-            self.logger.error(
-                    "No work left to be done: exiting with error status")
-            return 1
-
-        # If partial succes
-        if self.error.isSet():
-            self.logger.warn("Failed awimager node run detected. continue with"
-                              "successful tasks.")
-
-        self._store_data_map(self.inputs['mapfile'], output_map,
-                             "mapfile containing produces awimages")
-
-        self.outputs["mapfile"] = self.inputs['mapfile']
-        return 0
-
-
-if __name__ == "__main__":
-    sys.exit(imager_awimager().main())
-
diff --git a/CEP/Pipeline/recipes/sip/master/imager_bbs.py b/CEP/Pipeline/recipes/sip/master/imager_bbs.py
deleted file mode 100644
index 2d8558bc34a..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_bbs.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# LOFAR IMAGING PIPELINE
-# imager_bbz BBS (BlackBoard Selfcal) recipe
-# Wouter Klijn
-# klijn@astron.nl
-# ------------------------------------------------------------------------------
-
-import sys
-import os
-
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.data_map import DataMap, MultiDataMap
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.remotecommand import ComputeJob
-
-class imager_bbs(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Imager_bbs master performs a bbs run based on the supplied parset it is a
-    shallow wrapper around bbs. Additional functionality compared to the default
-    bbs recipe is the capability to add an id that allows multiple
-    runs to have different output files
-
-    1. Load and validates that the input mapfiles are correct
-    2. and then starts the node script, use indexed path names for the
-       communication
-    3. Check if all nodes succeeded. If so return a mapfile with calibrated
-       ms
-
-    **Command line Arguments**
-
-    1. Path to a mapfile with measurement sets to calibrate
-
-    """
-    inputs = {
-        'parset': ingredient.FileField(
-            '-p', '--parset',
-            help = "BBS configuration parset"
-        ),
-        'nthreads': ingredient.IntField(
-            '--nthreads',
-            default = 8,
-            help = "Number of threads per process"
-        ),
-        'bbs_executable': ingredient.StringField(
-            '--bbs-executable',
-            help = "BBS standalone executable (bbs-reducer)"
-        ),
-        'instrument_mapfile': ingredient.FileField(
-            '--instrument-mapfile',
-            help = "Full path to the mapfile containing the names of the "
-                 "instrument model files generated by the `parmdb` recipe"
-        ),
-        'sourcedb_mapfile': ingredient.FileField(
-            '--sourcedb-mapfile',
-            help = "Full path to the mapfile containing the names of the "
-                 "sourcedbs generated by the `sourcedb` recipe"
-        ),
-        'id': ingredient.IntField(
-            '--id',
-            default = 0,
-            help = "Optional integer id for distinguishing multiple runs"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path to the file containing the output data products"
-        ),
-    }
-
-    outputs = {
-        'mapfile': ingredient.FileField(
-            help = "Full path to a mapfile describing the processed data"
-        )
-    }
-
-    def go(self):
-        """
-        imager_bbs functionality. Called by framework performing all the work
-        """
-        super(imager_bbs, self).go()
-        self.logger.info("Starting imager_bbs run")
-
-        # ********************************************************************
-        # 1. Load the and validate the data
-
-        ms_map = MultiDataMap.load(self.inputs['args'][0])
-        parmdb_map = MultiDataMap.load(self.inputs['instrument_mapfile'])
-        sourcedb_map = DataMap.load(self.inputs['sourcedb_mapfile'])
-
-        # TODO: DataMap extention
-#        #Check if the input has equal length and on the same nodes
-#        if not validate_data_maps(ms_map, parmdb_map):
-#            self.logger.error("The combination of mapfiles failed validation:")
-#            self.logger.error("ms_map: \n{0}".format(ms_map))
-#            self.logger.error("parmdb_map: \n{0}".format(parmdb_map))
-#            return 1
-
-        # *********************************************************************
-        # 2. Start the node scripts
-        jobs = []
-        node_command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        map_dir = os.path.join(
-                        self.config.get("layout", "job_directory"), "mapfiles")
-        run_id = str(self.inputs.get("id"))
-
-        # Update the skip fields of the four maps. If 'skip' is True in any of
-        # these maps, then 'skip' must be set to True in all maps.
-        for w, x, y in zip(ms_map, parmdb_map, sourcedb_map):
-            w.skip = x.skip = y.skip = (
-                w.skip or x.skip or y.skip
-            )
-
-        ms_map.iterator = parmdb_map.iterator = sourcedb_map.iterator = \
-            DataMap.SkipIterator
-        for (idx, (ms, parmdb, sourcedb)) in enumerate(zip(ms_map, parmdb_map, sourcedb_map)):
-            # host is same for each entry (validate_data_maps)
-            host, ms_list = ms.host, ms.file
-
-            # Write data maps to MultaDataMaps
-            ms_list_path = os.path.join(
-                    map_dir, "%s-%s_map_%s.map" % (host, idx, run_id))
-            MultiDataMap([tuple([host, ms_list, False])]).save(ms_list_path)
-
-            parmdb_list_path = os.path.join(
-                    map_dir, "%s-%s_parmdb_%s.map" % (host, idx, run_id))
-            MultiDataMap(
-                [tuple([host, parmdb.file, False])]).save(parmdb_list_path)
-
-            sourcedb_list_path = os.path.join(
-                    map_dir, "%s-%s_sky_%s.map" % (host, idx, run_id))
-            MultiDataMap(
-                [tuple([host, [sourcedb.file], False])]).save(sourcedb_list_path)
-
-            arguments = [self.inputs['bbs_executable'],
-                         self.inputs['parset'],
-                         ms_list_path, parmdb_list_path, sourcedb_list_path]
-            jobs.append(ComputeJob(host, node_command, arguments,
-                    resources = {
-                        "cores": self.inputs['nthreads']
-                    }))
-
-        # start and wait till all are finished
-        self._schedule_jobs(jobs)
-
-        # **********************************************************************
-        # 3. validate the node output and construct the output mapfile.
-        if self.error.isSet():    # if one of the nodes failed
-            self.logger.error("One of the nodes failed while performing"
-                              "a BBS run. Aborting: concat.ms corruption")
-            return 1
-
-        # return the output: The measurement set that are calibrated:
-        # calibrated data is placed in the ms sets
-        MultiDataMap(ms_map).save(self.inputs['mapfile'])
-        self.logger.info("Wrote file with  calibrated data")
-
-        self.outputs['mapfile'] = self.inputs['mapfile']
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(imager_bbs().main())
diff --git a/CEP/Pipeline/recipes/sip/master/imager_create_dbs.py b/CEP/Pipeline/recipes/sip/master/imager_create_dbs.py
deleted file mode 100644
index b2b0738d8f0..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_create_dbs.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# imager_create_dbs (master)
-# Wouter Klijn, 2012
-# klijn@astron.nl
-# ------------------------------------------------------------------------------
-import os
-import sys
-import copy
-
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, MultiDataMap, \
-                                       validate_data_maps, align_data_maps
-
-class imager_create_dbs(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    responsible for creating a number
-    of databases needed by imaging pipeline:
-
-    1. Using pointing extracted from the input measurement set a database is
-       created of sources based on information in the global sky model (gsm)
-       One source db is created for each image/node:
-
-       a. The pointing is supplied to to GSM database resulting in a sourcelist
-       b. This sourcelist is converted into a source db
-       c. Possible additional sourcelist from external sources are added to this
-          source list
-    2. For each of the timeslice in image a parmdb is created. Each timeslice is
-       recorded on a different time and needs its own calibration and therefore
-       instrument parameters.
-    """
-
-    inputs = {
-        'working_directory': ingredient.StringField(
-            '-w', '--working-directory',
-            help = "Working directory used on nodes. Results location"
-        ),
-        'sourcedb_suffix': ingredient.StringField(
-            '--sourcedb-suffix',
-            default = ".sky",
-            help = "suffix for created sourcedbs"
-        ),
-        'monetdb_hostname': ingredient.StringField(
-            '--monetdb-hostname',
-            help = "Hostname of monet database"
-        ),
-        'monetdb_port': ingredient.IntField(
-            '--monetdb-port',
-            help = "port for monet database"
-        ),
-        'monetdb_name': ingredient.StringField(
-            '--monetdb-name',
-            help = "db name of monet database"
-        ),
-        'monetdb_user': ingredient.StringField(
-            '--monetdb-user',
-            help = "user on the monet database"
-        ),
-        'monetdb_password': ingredient.StringField(
-            '--monetdb-password',
-            help = "password on monet database"
-        ),
-        'assoc_theta': ingredient.StringField(
-            '--assoc-theta',
-            default = "",
-            help = "assoc_theta is used in creating the skymodel, default == None"
-        ),
-        'parmdb_executable': ingredient.ExecField(
-            '--parmdbm-executable',
-            help = "Location of the parmdb executable"
-        ),
-        'slice_paths_mapfile': ingredient.FileField(
-            '--slice-paths-mapfile',
-            help = "Location of the mapfile containing the slice paths"
-        ),
-        'parmdb_suffix': ingredient.StringField(
-            '--parmdb-suffix',
-            help = "suffix of the to be created paramdbs"
-        ),
-        'makesourcedb_path': ingredient.ExecField(
-             '--makesourcedb-path',
-             help = "Path to makesourcedb executable."
-        ),
-        'source_list_map_path': ingredient.StringField(
-             '--source-list-map-path',
-             help = "Path to sourcelist map from external source (eg. bdsm) "\
-             "use an empty string for gsm generated data"
-        ),
-        'parmdbs_map_path': ingredient.StringField(
-            '--parmdbs-map-path',
-            help = "path to mapfile containing produced parmdb files"
-        ),
-        'sourcedb_map_path': ingredient.StringField(
-            '--sourcedb-map-path',
-            help = "path to mapfile containing produced sourcedb files"
-        ),
-        'major_cycle': ingredient.IntField(
-            '--major_cycle',
-            default = 0,
-            help = "The number of the current cycle"
-        ),
-    }
-
-    outputs = {
-        'sourcedb_map_path': ingredient.FileField(
-            help = "On succes contains path to mapfile containing produced "
-            "sourcedb files"),
-        'parmdbs_map_path': ingredient.FileField(
-            help = "On succes contains path to mapfile containing produced"
-            "parmdb files")
-    }
-
-    def __init__(self):
-        super(imager_create_dbs, self).__init__()
-
-    def go(self):
-        super(imager_create_dbs, self).go()
-
-        # get assoc_theta, convert from empty string if needed
-        assoc_theta = self.inputs["assoc_theta"]
-        if assoc_theta == "":
-            assoc_theta = None
-
-        # Load mapfile data from files
-        self.logger.info(self.inputs["slice_paths_mapfile"])
-        slice_paths_map = MultiDataMap.load(self.inputs["slice_paths_mapfile"])
-        input_map = DataMap.load(self.inputs['args'][0])
-        source_list_map = DataMap.load(self.inputs['source_list_map_path'])
-
-        if self._validate_input_data(input_map, slice_paths_map):
-            return 1
-
-        # Run the nodes with now collected inputs
-        jobs, output_map = self._run_create_dbs_node(
-                 input_map, slice_paths_map, assoc_theta,
-                 source_list_map)
-
-        # Collect the output of the node scripts write to (map) files
-        return self._collect_and_assign_outputs(jobs, output_map,
-                                    slice_paths_map)
-
-    def _validate_input_data(self, slice_paths_map, input_map):
-        """
-        Performs a validation of the supplied slice_paths_map and inputmap.
-        Displays error message if this fails
-        """
-        validation_failed = None
-        error_received = None
-        try:
-            validation_failed = not validate_data_maps(slice_paths_map,
-                                                     input_map)
-        except  AssertionError as exception :
-            validation_failed = True
-            error_received = str(exception)
-
-        if validation_failed:
-            self.logger.error(error_received)
-            self.logger.error("Incorrect mapfiles: {0} and {1}".format(
-                 self.inputs["slice_paths_mapfile"], self.inputs['args'][0]))
-            self.logger.error("content input_map: \n{0}".format(input_map))
-            self.logger.error("content slice_paths_map: \n{0}".format(
-                                                            slice_paths_map))
-            # return with failure
-            return 1
-
-        # return with zero (all is ok state)
-        return 0
-
-    def _run_create_dbs_node(self, input_map, slice_paths_map,
-             assoc_theta, source_list_map):
-        """
-        Decompose the input mapfiles into task for specific nodes and
-        distribute these to the node recipes. Wait for the jobs to finish and
-        return the list of created jobs.
-        """
-        # Compile the command to be executed on the remote machine
-        node_command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        # create jobs
-        jobs = []
-        output_map = copy.deepcopy(input_map)
-
-        # Update the skip fields of the four maps. If 'skip' is True in any of
-        # these maps, then 'skip' must be set to True in all maps.
-        align_data_maps(input_map, output_map, slice_paths_map,
-                        source_list_map)
-
-        source_list_map.iterator = slice_paths_map.iterator = \
-               input_map.iterator = DataMap.SkipIterator
-        for idx, (input_item, slice_item, source_list_item) in enumerate(zip(
-                                  input_map, slice_paths_map, source_list_map)):
-            host_ms, concat_ms = input_item.host, input_item.file
-            host_slice, slice_paths = slice_item.host, slice_item.file
-
-            # Create the parameters depending on the input_map
-            sourcedb_target_path = os.path.join(
-                  concat_ms + self.inputs["sourcedb_suffix"])
-
-            # use unique working directories per job, to prevent interference between jobs on a global fs
-            working_dir = os.path.join(self.inputs['working_directory'], "imager_create_dbs_{0}".format(idx))
-
-            # The actual call for the node script
-            arguments = [concat_ms,
-                         sourcedb_target_path,
-                         self.inputs["monetdb_hostname"],
-                         self.inputs["monetdb_port"],
-                         self.inputs["monetdb_name"],
-                         self.inputs["monetdb_user"],
-                         self.inputs["monetdb_password"],
-                         assoc_theta,
-                         self.inputs["parmdb_executable"],
-                         slice_paths,
-                         self.inputs["parmdb_suffix"],
-                         self.environment,
-                         working_dir,
-                         self.inputs["makesourcedb_path"],
-                         source_list_item.file,
-                         self.inputs["major_cycle"]]
-
-            jobs.append(ComputeJob(host_ms, node_command, arguments))
-        # Wait the nodes to finish
-        if len(jobs) > 0:
-            self._schedule_jobs(jobs)
-
-        return jobs, output_map
-
-    def _collect_and_assign_outputs(self, jobs, output_map, slice_paths_map):
-        """
-        Collect and combine the outputs of the individual create_dbs node
-        recipes. Combine into output mapfiles and save these at the supplied
-        path locations
-        """
-        # Create a container for the output parmdbs: same host and
-        output_map.iterator = DataMap.TupleIterator
-        parmdbs_list = []
-        # loop over the raw data including the skip file (use the data member)
-        for output_entry in output_map.data:
-            parms_tuple = tuple([output_entry.host, [],
-                                output_entry.skip])
-            parmdbs_list.append(parms_tuple)
-
-        parmdbs_map = MultiDataMap(parmdbs_list)
-
-        output_map.iterator = parmdbs_map.iterator = DataMap.SkipIterator    # The maps are synced
-        succesfull_run = False
-        for (output_item, parmdbs_item, job) in zip(
-                                                output_map, parmdbs_map, jobs):
-            node_succeeded = "parmdbs" in job.results and \
-                    "sourcedb" in job.results
-
-            host = output_item.host
-
-            # The current job has to be skipped (due to skip field)
-            # Or if the node failed:
-            if not node_succeeded:
-                self.logger.warn("Warning failed selfcalCreateDBs run "
-                    "detected: No sourcedb file created, {0} continue".format(
-                                                            host))
-                output_item.file = "failed"
-                output_item.skip = True
-                parmdbs_item.file = []
-                parmdbs_item.skip = True
-
-            # Else it succeeded and we can write te results
-            else:
-                succesfull_run = True
-                output_item.file = job.results["sourcedb"]
-                parmdbs_item.file = job.results["parmdbs"]
-
-                # we also need to manually set the skip for this new
-                # file list
-                parmdbs_item.file_skip = [False] * len(job.results["parmdbs"])
-
-        # Fail if none of the nodes returned all data
-        if not succesfull_run:
-            self.logger.error("The creation of dbs on the nodes failed:")
-            self.logger.error("Not a single node produces all needed data")
-            self.logger.error(
-                "products. sourcedb_files: {0}".format(output_map))
-            self.logger.error("parameter dbs: {0}".format(parmdbs_map))
-            return 1
-
-        # write the mapfiles
-        output_map.save(self.inputs["sourcedb_map_path"])
-        parmdbs_map.save(self.inputs["parmdbs_map_path"])
-        self.logger.debug("Wrote sourcedb dataproducts: {0} \n {1}".format(
-            self.inputs["sourcedb_map_path"], self.inputs["parmdbs_map_path"]))
-
-        # Set the outputs
-        self.outputs['sourcedb_map_path'] = self.inputs["sourcedb_map_path"]
-        self.outputs['parmdbs_map_path'] = self.inputs["parmdbs_map_path"]
-
-        return 0
-
-if __name__ == "__main__":
-    sys.exit(imager_create_dbs().main())
diff --git a/CEP/Pipeline/recipes/sip/master/imager_finalize.py b/CEP/Pipeline/recipes/sip/master/imager_finalize.py
deleted file mode 100644
index e0652234d5b..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_finalize.py
+++ /dev/null
@@ -1,167 +0,0 @@
-
-import sys
-
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, validate_data_maps, \
-                                       align_data_maps
-
-class imager_finalize(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    The Imager_finalizer performs a number of steps needed for integrating the
-    msss_imager_pipeline in the LOFAR framework: It places the image on the
-    output location in the correcy image type (hdf5).
-    It also adds some meta data collected from the individual measurement sets
-    and the found data.
-
-    This recipe does not have positional commandline arguments
-    """
-    inputs = {
-        'awimager_output_map': ingredient.FileField(
-            '--awimager-output-mapfile',
-            help = """Mapfile containing (host, path) pairs of created sky
-                   images """
-        ),
-        'ms_per_image_map': ingredient.FileField(
-            '--ms-per-image-map',
-            help = '''Mapfile containing (host, path) pairs of mapfiles used
-            to create image on that node'''
-        ),
-        'sourcelist_map': ingredient.FileField(
-            '--sourcelist-map',
-            help = '''mapfile containing (host, path) pairs to a list of sources
-            found in the image'''
-        ),
-        'sourcedb_map': ingredient.FileField(
-            '--sourcedb_map',
-            help = '''mapfile containing (host, path) pairs to a db of sources
-            found in the image'''
-        ),
-        'target_mapfile': ingredient.FileField(
-            '--target-mapfile',
-            help = "Mapfile containing (host, path) pairs to the concatenated and"
-            "combined measurement set, the source for the actual sky image"
-        ),
-        'minbaseline': ingredient.FloatField(
-            '--minbaseline',
-            help = '''Minimum length of the baseline used for the images'''
-        ),
-        'maxbaseline': ingredient.FloatField(
-            '--maxbaseline',
-            help = '''Maximum length of the baseline used for the images'''
-        ),
-        'output_image_mapfile': ingredient.FileField(
-            '--output-image-mapfile',
-            help = '''mapfile containing (host, path) pairs with the final
-            output image (hdf5) location'''
-        ),
-        'processed_ms_dir': ingredient.StringField(
-            '--processed-ms-dir',
-            help = '''Path to directory for processed measurment sets'''
-        ),
-        'fillrootimagegroup_exec': ingredient.ExecField(
-            '--fillrootimagegroup_exec',
-            help = '''Full path to the fillRootImageGroup executable'''
-        ),
-        'placed_image_mapfile': ingredient.FileField(
-            '--placed-image-mapfile',
-            help = "location of mapfile with proced and correctly placed,"
-                " hdf5 images"
-        )
-    }
-
-    outputs = {
-        'placed_image_mapfile': ingredient.StringField()
-    }
-
-    def go(self):
-        """
-        Steps:
-
-        1. Load and validate the input datamaps
-        2. Run the node parts of the recipe
-        3. Validate node output and format the recipe output
-        """
-        super(imager_finalize, self).go()
-        # *********************************************************************
-        # 1. Load the datamaps
-        awimager_output_map = DataMap.load(
-                                self.inputs["awimager_output_map"])
-        ms_per_image_map = DataMap.load(
-                                    self.inputs["ms_per_image_map"])
-        sourcelist_map = DataMap.load(self.inputs["sourcelist_map"])
-        sourcedb_map = DataMap.load(self.inputs["sourcedb_map"])
-        target_mapfile = DataMap.load(self.inputs["target_mapfile"])
-        output_image_mapfile = DataMap.load(
-                                    self.inputs["output_image_mapfile"])
-        processed_ms_dir = self.inputs["processed_ms_dir"]
-        fillrootimagegroup_exec = self.inputs["fillrootimagegroup_exec"]
-
-        # Align the skip fields
-        align_data_maps(awimager_output_map, ms_per_image_map,
-                sourcelist_map, target_mapfile, output_image_mapfile,
-                sourcedb_map)
-
-        # Set the correct iterator
-        sourcelist_map.iterator = awimager_output_map.iterator = \
-            ms_per_image_map.iterator = target_mapfile.iterator = \
-            output_image_mapfile.iterator = sourcedb_map.iterator = \
-                DataMap.SkipIterator
-
-        # *********************************************************************
-        # 2. Run the node side of the recupe
-        command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        jobs = []
-        for  (awimager_output_item, ms_per_image_item, sourcelist_item,
-              target_item, output_image_item, sourcedb_item) in zip(
-                  awimager_output_map, ms_per_image_map, sourcelist_map,
-                  target_mapfile, output_image_mapfile, sourcedb_map):
-            # collect the files as argument
-            arguments = [awimager_output_item.file,
-                         ms_per_image_item.file,
-                         sourcelist_item.file,
-                         target_item.file,
-                         output_image_item.file,
-                         self.inputs["minbaseline"],
-                         self.inputs["maxbaseline"],
-                         processed_ms_dir,
-                         fillrootimagegroup_exec,
-                         self.environment,
-                         sourcedb_item.file]
-
-            self.logger.info(
-                "Starting finalize with the folowing args: {0}".format(
-                                                                    arguments))
-            jobs.append(ComputeJob(target_item.host, command, arguments))
-
-        self._schedule_jobs(jobs)
-
-        # *********************************************************************
-        # 3. Validate the performance of the node script and assign output
-        succesful_run = False
-        for (job, output_image_item) in  zip(jobs, output_image_mapfile):
-            if not "hdf5" in job.results:
-                # If the output failed set the skip to True
-                output_image_item.skip = True
-            else:
-                succesful_run = True
-                # signal that we have at least a single run finished ok.
-                # No need to set skip in this case
-
-        if not succesful_run:
-            self.logger.warn("Failed finalizer node run detected")
-            return 1
-
-        output_image_mapfile.save(self.inputs['placed_image_mapfile'])
-        self.logger.debug(
-           "Wrote mapfile containing placed hdf5 images: {0}".format(
-                           self.inputs['placed_image_mapfile']))
-        self.outputs["placed_image_mapfile"] = self.inputs[
-                                                    'placed_image_mapfile']
-
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(imager_finalize().main())
diff --git a/CEP/Pipeline/recipes/sip/master/imager_prepare.py b/CEP/Pipeline/recipes/sip/master/imager_prepare.py
deleted file mode 100644
index 4d83fe3cf26..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_prepare.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# LOFAR IMAGING PIPELINE
-# Prepare phase master
-#
-# 1. Create input files for individual nodes based on the  input mapfile
-# 2. Perform basic input parsing and input validation
-# 3. Call the node scripts with correct input
-# 4. validate performance
-#
-# Wouter Klijn
-# 2012
-# klijn@astron.nl
-# ------------------------------------------------------------------------------
-
-import os
-import sys
-import copy
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, MultiDataMap
-
-class imager_prepare(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Prepare phase master:
-
-    1. Validate input
-    2. Create mapfiles with input for work to be perform on the individual nodes
-       based on the structured input mapfile. The input mapfile contains a list
-       of measurement sets.
-       Each node computes a single subband group but needs this for all
-       timeslices.
-    3. Call the node scripts with correct input
-    4. validate performance
-       Only output the measurement nodes that finished succesfull
-
-    **Command Line arguments:**
-
-    The only command line argument is the a to a mapfile containing "all"
-    the measurement sets needed for creating the sky images. First ordered on
-    timeslice then on subband group and finaly on index in the frequency
-    range.
-
-    **Arguments:**
-    """
-
-    inputs = {
-        'ndppp_exec': ingredient.ExecField(
-            '--ndppp-exec',
-            help = "The full path to the ndppp executable"
-        ),
-        'parset': ingredient.FileField(
-            '-p', '--parset',
-            help = "The full path to a prepare parset"
-        ),
-        'working_directory': ingredient.StringField(
-            '-w', '--working-directory',
-            help = "Working directory used by the nodes: local data"
-        ),
-        'nthreads': ingredient.IntField(
-            '--nthreads',
-            default = 8,
-            help = "Number of threads per process"
-        ),
-        'target_mapfile': ingredient.StringField(
-            '--target-mapfile',
-            help = "Contains the node and path to target files, defines"
-               " the number of nodes the script will start on."
-        ),
-        'slices_per_image': ingredient.IntField(
-            '--slices-per-image',
-            help = "The number of (time) slices for each output image"
-        ),
-        'subbands_per_image': ingredient.IntField(
-            '--subbands-per-image',
-            help = "The number of subbands to be collected in each output image"
-        ),
-        'asciistat_executable': ingredient.ExecField(
-            '--asciistat-executable',
-            help = "full path to the ascii stat executable"
-        ),
-        'statplot_executable': ingredient.ExecField(
-            '--statplot-executable',
-            help = "The full path to the statplot executable"
-        ),
-        'msselect_executable': ingredient.ExecField(
-            '--msselect-executable',
-            help = "The full path to the msselect executable "
-        ),
-        'rficonsole_executable': ingredient.ExecField(
-            '--rficonsole-executable',
-            help = "The full path to the rficonsole executable "
-        ),
-        'do_rficonsole': ingredient.BoolField(
-            '--do_rficonsole',
-            default = True,
-            help = "toggle the rficonsole step in preprocessing (default True)"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path of mapfile; contains a list of the "
-                 "successfully generated and concatenated sub-band groups"
-        ),
-        'slices_mapfile': ingredient.StringField(
-            '--slices-mapfile',
-            help = "Path to mapfile containing the produced subband groups"
-        ),
-        'ms_per_image_mapfile': ingredient.StringField(
-            '--ms-per-image-mapfile',
-            help = "Path to mapfile containing the ms for each produced"
-                "image"
-        ),
-        'processed_ms_dir': ingredient.StringField(
-            '--processed-ms-dir',
-            help = "Path to directory for processed measurment sets"
-        ),
-        'add_beam_tables': ingredient.BoolField(
-            '--add_beam_tables',
-            default = False,
-            help = "Developer option, adds beamtables to ms"
-        )
-    }
-
-    outputs = {
-        'mapfile': ingredient.FileField(
-            help = "path to a mapfile Which contains a list of the"
-                 "successfully generated and concatenated measurement set"
-            ),
-        'slices_mapfile': ingredient.FileField(
-            help = "Path to mapfile containing the produced subband groups"),
-
-        'ms_per_image_mapfile': ingredient.FileField(
-            help = "Path to mapfile containing the used ms for each produced"
-                "image")
-    }
-
-    def go(self):
-        """
-        Entry point for recipe: Called by the pipeline framework
-        """
-        super(imager_prepare, self).go()
-        self.logger.info("Starting imager_prepare run")
-        job_directory = self.config.get("layout", "job_directory")
-        # *********************************************************************
-        # input data
-        input_map = DataMap.load(self.inputs['args'][0])
-        output_map = DataMap.load(self.inputs['target_mapfile'])
-        slices_per_image = self.inputs['slices_per_image']
-        subbands_per_image = self.inputs['subbands_per_image']
-        # Validate input
-        if not self._validate_input_map(input_map, output_map, slices_per_image,
-                            subbands_per_image):
-            return 1
-
-        # outputs
-        output_ms_mapfile_path = self.inputs['mapfile']
-
-        # *********************************************************************
-        # schedule the actual work
-        # TODO: Refactor this function into: load data, perform work,
-        # create output
-        node_command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-
-        jobs = []
-        paths_to_image_mapfiles = []
-        n_subband_groups = len(output_map)    # needed for subsets in sb list
-
-        globalfs = self.config.has_option("remote", "globalfs") and self.config.getboolean("remote", "globalfs")
-
-        for idx_sb_group, item in enumerate(output_map):
-            # create the input files for this node
-            self.logger.debug("Creating input data subset for processing"
-                              "on: {0}".format(item.host))
-            inputs_for_image_map = \
-                self._create_input_map_for_sbgroup(
-                                slices_per_image, n_subband_groups,
-                                subbands_per_image, idx_sb_group, input_map)
-
-            # Save the mapfile
-            inputs_for_image_mapfile_path = os.path.join(
-               job_directory, "mapfiles",
-               "ms_per_image_{0}.map".format(idx_sb_group))
-
-            self._store_data_map(inputs_for_image_mapfile_path,
-                                inputs_for_image_map, "inputmap for location")
-
-            # skip the current step if skip is set, cannot use skip due to
-            # the enumerate: dependency on the index in the map
-            if item.skip == True:
-                # assure that the mapfile is correct
-                paths_to_image_mapfiles.append(
-                    tuple([item.host, [], True]))
-                continue
-
-            # save the (input) ms, as a list of  mapfiles
-            paths_to_image_mapfiles.append(
-                tuple([item.host, inputs_for_image_mapfile_path, False]))
-
-            # use unique working directories per job, to prevent interference between jobs on a global fs
-            working_dir = os.path.join(self.inputs['working_directory'], "imager_prepare_{0}".format(idx_sb_group))
-
-            arguments = [self.environment,
-                         self.inputs['parset'],
-                         working_dir,
-                         self.inputs['processed_ms_dir'],
-                         self.inputs['ndppp_exec'],
-                         item.file,
-                         slices_per_image,
-                         subbands_per_image,
-                         inputs_for_image_mapfile_path,
-                         self.inputs['asciistat_executable'],
-                         self.inputs['statplot_executable'],
-                         self.inputs['msselect_executable'],
-                         self.inputs['rficonsole_executable'],
-                         self.inputs['do_rficonsole'],
-                         self.inputs['add_beam_tables'],
-                         globalfs]
-
-            jobs.append(ComputeJob(item.host, node_command, arguments,
-                    resources = {
-                        "cores": self.inputs['nthreads']
-                    }))
-
-        # Hand over the job(s) to the pipeline scheduler
-        self._schedule_jobs(jobs)
-
-        # *********************************************************************
-        # validate the output, cleanup, return output
-        if self.error.isSet():    # if one of the nodes failed
-            self.logger.warn("Failed prepare_imager run detected: Generating "
-                             "new output_ms_mapfile_path without failed runs:"
-                             " {0}".format(output_ms_mapfile_path))
-
-        concat_ms = copy.deepcopy(output_map)
-        slices = []
-        finished_runs = 0
-        # scan the return dict for completed key
-        # loop over the potential jobs including the skipped
-        # If we have a skipped item, add the item to the slices with skip set
-        jobs_idx = 0
-        for item in concat_ms:
-            # If this is an item that is skipped via the skip parameter in
-            # the parset, append a skipped
-            if item.skip:
-                slices.append(tuple([item.host, [], True]))
-                continue
-
-            # we cannot use the skip iterator so we need to manually get the
-            # current job from the list
-            job = jobs[jobs_idx]
-
-            # only save the slices if the node has completed succesfull
-            if job.results["returncode"] == 0:
-                finished_runs += 1
-                slices.append(tuple([item.host,
-                                 job.results["time_slices"], False]))
-            else:
-                # Set the dataproduct to skipped!!
-                item.skip = True
-                slices.append(tuple([item.host, [], True]))
-                msg = "Failed run on {0}. NOT Created: {1} ".format(
-                    item.host, item.file)
-                self.logger.warn(msg)
-
-            # we have a non skipped workitem, increase the job idx
-            jobs_idx += 1
-
-        if finished_runs == 0:
-            self.logger.error("None of the started compute node finished:"
-                "The current recipe produced no output, aborting")
-            return 1
-
-        # Write the output mapfiles:
-        # concat.ms paths:
-        self._store_data_map(output_ms_mapfile_path, concat_ms,
-                    "mapfile with concat.ms")
-
-        # timeslices
-        MultiDataMap(slices).save(self.inputs['slices_mapfile'])
-        self.logger.info(
-            "Wrote MultiMapfile with produces timeslice: {0}".format(
-                self.inputs['slices_mapfile']))
-
-        # map with actual input mss.
-        self._store_data_map(self.inputs["ms_per_image_mapfile"],
-            DataMap(paths_to_image_mapfiles),
-                "mapfile containing (used) input ms per image:")
-
-        # Set the return values
-        self.outputs['mapfile'] = output_ms_mapfile_path
-        self.outputs['slices_mapfile'] = self.inputs['slices_mapfile']
-        self.outputs['ms_per_image_mapfile'] = \
-            self.inputs["ms_per_image_mapfile"]
-        return 0
-
-    def _create_input_map_for_sbgroup(self, slices_per_image,
-            n_subband_groups, subbands_per_image, idx_sb_group, input_mapfile):
-        """
-        Creates an input mapfile:
-        This is a subset of the complete input_mapfile based on the subband
-        details suplied: The input_mapfile is structured: First all subbands for
-        a complete timeslice and the the next timeslice. The result value
-        contains all the information needed for a single subbandgroup to be
-        computed on a single compute node
-        """
-        inputs_for_image = []
-        # collect the inputs: first step over the time slices
-        for idx_slice in range(slices_per_image):
-            # calculate the first line for current time slice and subband group
-            line_idx_start = idx_slice * \
-                (n_subband_groups * subbands_per_image) + \
-                (idx_sb_group * subbands_per_image)
-            line_idx_end = line_idx_start + subbands_per_image
-
-            # extend inputs with the files for the current time slice
-            inputs_for_image.extend(input_mapfile[line_idx_start: line_idx_end])
-
-        return DataMap(inputs_for_image)
-
-    def _validate_input_map(self, input_map, output_map, slices_per_image,
-                            subbands_per_image):
-        """
-        Return False if the inputs supplied are incorrect:
-        the number if inputs and  output does not match.
-        Return True if correct.
-        The number of inputs is correct iff.
-        len(input_map) ==
-        len(output_map) * slices_per_image * subbands_per_image
-        """
-        # The output_map contains a number of path/node pairs. The final data
-        # dataproduct of the prepare phase: The 'input' for each of these pairs
-        # is a number of measurement sets: The number of time slices times
-        # the number of subbands collected into each of these time slices.
-        # The total length of the input map should match this.
-        if len(input_map) != len(output_map) * \
-                                   (slices_per_image * subbands_per_image):
-            self.logger.error(
-                "Incorrect number of input ms for supplied parameters:\n\t"
-                "len(input_map) = {0}\n\t"
-                "len(output_map) * slices_per_image * subbands_per_image = "
-                "{1} * {2} * {3} = {4}".format(
-                    len(input_map), len(output_map),
-                    slices_per_image, subbands_per_image,
-                    len(output_map) * slices_per_image * subbands_per_image
-                )
-            )
-            return False
-
-        return True
-
-if __name__ == "__main__":
-    sys.exit(imager_prepare().main())
diff --git a/CEP/Pipeline/recipes/sip/master/imager_source_finding.py b/CEP/Pipeline/recipes/sip/master/imager_source_finding.py
deleted file mode 100644
index bb5e24f6b0f..00000000000
--- a/CEP/Pipeline/recipes/sip/master/imager_source_finding.py
+++ /dev/null
@@ -1,164 +0,0 @@
-
-import os
-import sys
-import copy
-
-from lofarpipe.support.baserecipe import BaseRecipe
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.data_map import DataMap
-
-class imager_source_finding(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Master side of imager_source_finder. Collects arguments from command line
-    and pipeline inputs. (for the implementation details see node):
-
-    1. load mapfiles with input images and collect some parameters from
-       The input ingredients.
-    2. Call the node recipe.
-    3. Validate performance of the node recipe and construct output value.
-
-    **CommandLine Arguments**
-
-    A mapfile containing (node, image_path) pairs. The image to look for sources
-    in.
-    """
-    inputs = {
-        'bdsm_parset_file_run1': ingredient.FileField(
-            '--bdsm-parset-file-run1',
-            help = "Path to bdsm parameter set for the first sourcefinding run"
-        ),
-        'bdsm_parset_file_run2x': ingredient.FileField(
-            '--bdsm-parset-file-run2x',
-            help = "Path to bdsm parameter set for the second and later" \
-                   " sourcefinding runs"
-        ),
-        'catalog_output_path': ingredient.StringField(
-            '--catalog-output-path',
-            help = "Path to write the catalog created by bdsm)"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path of mapfile; containing the succesfull generated"
-            "source list"
-        ),
-        'working_directory': ingredient.StringField(
-            '--working-directory',
-            help = "Working directory used by the nodes: local data"
-        ),
-        'sourcedb_target_path': ingredient.StringField(
-            '--sourcedb-target-path',
-            help = "Target path for the sourcedb created based on the"
-                 " found sources"
-        ),
-        'makesourcedb_path': ingredient.ExecField(
-             '--makesourcedb-path',
-             help = "Path to makesourcedb executable."
-        ),
-        'sourcedb_map_path': ingredient.StringField(
-            '--sourcedb-map-path',
-            help = "Full path of mapfile; containing the succesfull generated"
-            "sourcedbs"
-        ),
-
-    }
-
-    outputs = {
-        'mapfile': ingredient.StringField(
-        help = "Full path of mapfile; containing the succesfull generated"
-            ),
-        'sourcedb_map_path': ingredient.StringField(
-        help = "Full path of mapfile; containing the succesfull generated"
-             "sourcedbs"
-            )
-    }
-
-    def go(self):
-        """
-        """
-        super(imager_source_finding, self).go()
-        self.logger.info("Starting imager_source_finding run")
-        # ********************************************************************
-        # 1. load mapfiles with input images and collect some parameters from
-        # The input ingredients
-        input_map = DataMap.load(self.inputs['args'][0])
-        catalog_output_path = self.inputs["catalog_output_path"]
-
-        # ********************************************************************
-        # 2. Start the node script
-        node_command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        jobs = []
-        input_map.iterator = DataMap.SkipIterator
-        for idx, item in enumerate(input_map):
-            # use unique working directories per job, to prevent interference between jobs on a global fs
-            working_dir = os.path.join(self.inputs['working_directory'], "imager_source_finding_{0}".format(idx))
-
-            arguments = [item.file,
-                         self.inputs["bdsm_parset_file_run1"],
-                         self.inputs["bdsm_parset_file_run2x"],
-                         "%s-%s" % (catalog_output_path, idx),
-                         os.path.join(
-                             self.inputs["working_directory"],
-                             "bdsm_output-%s.img" % (idx,)),
-                         "%s-%s" % (self.inputs['sourcedb_target_path'], idx),
-                         self.environment,
-                         working_dir,
-                         self.inputs['makesourcedb_path']
-                        ]
-
-            jobs.append(ComputeJob(item.host, node_command, arguments))
-
-        # Hand over the job(s) to the pipeline scheduler
-        self._schedule_jobs(jobs)
-
-        # ********************************************************************
-        # 3. Test for errors and return output
-        if self.error.isSet():
-            self.logger.warn("Failed imager_source_finding run detected")
-
-        # Collect the nodes that succeeded
-        source_dbs_from_nodes = copy.deepcopy(input_map)
-        catalog_output_path_from_nodes = copy.deepcopy(input_map)
-        source_dbs_from_nodes.iterator = \
-            catalog_output_path_from_nodes.iterator = DataMap.SkipIterator
-
-        # Job is succesfull if at least one source_db is found
-        succesfull_job = False
-
-        for job, sourcedb_item, catalog_item in zip(jobs,
-                                   source_dbs_from_nodes,
-                                   catalog_output_path_from_nodes):
-
-            if "source_db"  in job.results:
-                succesfull_job = True
-                sourcedb_item.file = job.results["source_db"]
-                catalog_item.file = job.results["catalog_output_path"]
-            else:
-                sourcedb_item.file = "failed"
-                sourcedb_item.skip = True
-                catalog_item.file = "failed"
-                catalog_item.skip = True
-                # We now also have catalog path
-
-        # Abort if none of the recipes succeeded
-        if not succesfull_job:
-            self.logger.error("None of the source finding recipes succeeded")
-            self.logger.error("Exiting with a failure status")
-            return 1
-
-        self._store_data_map(self.inputs['mapfile'],
-                 catalog_output_path_from_nodes,
-                "datamap with created sourcelists")
-        self._store_data_map(self.inputs['sourcedb_map_path'],
-                source_dbs_from_nodes,
-                 " datamap with created sourcedbs")
-
-        self.outputs["mapfile"] = self.inputs['mapfile']
-        self.outputs["sourcedb_map_path"] = self.inputs['sourcedb_map_path']
-
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(imager_source_finding().main())
-
diff --git a/CEP/Pipeline/recipes/sip/master/selfcal_awimager.py b/CEP/Pipeline/recipes/sip/master/selfcal_awimager.py
deleted file mode 100644
index a7d8cf826d0..00000000000
--- a/CEP/Pipeline/recipes/sip/master/selfcal_awimager.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#                                                         LOFAR IMAGING PIPELINE
-#
-#                                    Example recipe with simple job distribution
-#                                                          Wouter Klijn, 2010
-#                                                      swinbank@transientskp.org
-#                                                         Nicolas Vilchez, 2014
-#                                                             vilchez@astron.nl
-# ------------------------------------------------------------------------------
-import sys
-import copy
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, validate_data_maps,\
-                                       align_data_maps
-
-class selfcal_awimager(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Master script for the awimager. Collects arguments from command line and
-    pipeline inputs.
-    
-    1. Load mapfiles and validate these
-    2. Run the awimage node scripts
-    3. Retrieve output. Construct output map file succesfull runs
-    
-    Details regarding the implementation of the imaging step can be found in 
-    the node recipe 
-    **CommandLine Arguments**
-    
-    A mapfile containing (node, datafile) pairs. The measurements set use as
-    input for awimager executable  
- 
-    """
-    inputs = {
-        'executable': ingredient.ExecField(
-            '--executable',
-            help = "The full path to the  awimager executable"
-        ),
-        'parset': ingredient.FileField(
-            '-p', '--parset',
-            help = "The full path to a awimager configuration parset."
-        ),
-        'working_directory': ingredient.StringField(
-            '-w', '--working-directory',
-            help = "Working directory used on output nodes. Results location"
-        ),
-        'output_image': ingredient.StringField(
-            '--output-image',
-            help = "Path of the image to be create by the awimager"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path for output mapfile. A list of the"
-                 "successfully generated images will be written here"
-        ),
-        'sourcedb_path': ingredient.StringField(
-            '--sourcedb-path',
-            help = "Full path of sourcedb used to create a mask for known sources"
-        ),
-        'mask_patch_size': ingredient.FloatField(
-            '--mask-patch-size',
-            help = "Scale factor for patches in the awimager mask"
-        ),
-        'autogenerate_parameters': ingredient.BoolField(
-            '--autogenerate-parameters',
-            default = True,
-            help = "Turns on the autogeneration of: cellsize, image-size, fov."
-            " MSSS 'type' functionality"
-        ),
-        'specify_fov': ingredient.BoolField(
-            '--specify-fov',
-            default = False,
-            help = "calculated Image parameters are relative to fov, parameter"
-            " is active when autogenerate_parameters is False"
-        ),
-        'fov': ingredient.FloatField(
-            '--fov',
-            default = 0.0,
-            help = "calculated Image parameters are relative to this"
-            " Field Of View in arcSec. This parameter is obligatory when"
-            " specify_fov is True"
-        ),
-        'major_cycle': ingredient.IntField(
-            '--major_cycle',
-            help = "The number of the current cycle to modify the parset."
-        ),
-        'nr_cycles': ingredient.IntField(
-            '--nr-cycles',
-            help = "The number major cycles."
-        ) ,
-        'perform_self_cal': ingredient.BoolField(
-            '--perform-self-cal',
-            default=False,          
-            help = "Control the usage of the self callibartion functionality"
-        )
-    }
-
-    outputs = {
-        'mapfile': ingredient.StringField(),
-    }
-
-    def go(self):
-        """
-        This member contains all the functionality of the imager_awimager.
-        Functionality is all located at the node side of the script.
-        """
-        super(selfcal_awimager, self).go()
-        self.logger.info("Starting imager_awimager run")
-
-        # *********************************************************************
-        # 1. collect the inputs and validate
-        input_map = DataMap.load(self.inputs['args'][0])
-        sourcedb_map = DataMap.load(self.inputs['sourcedb_path'])
-
-        if not validate_data_maps(input_map, sourcedb_map):
-            self.logger.error(
-                        "the supplied input_ms mapfile and sourcedb mapfile"
-                        "are incorrect. Aborting")
-            self.logger.error(repr(input_map))
-            self.logger.error(repr(sourcedb_map))
-            return 1
-
-        # *********************************************************************
-        # 2. Start the node side of the awimager recipe
-        # Compile the command to be executed on the remote machine
-        node_command = "python3 %s" % (self.__file__.replace("master", "nodes"))
-        jobs = []
-
-        output_map = copy.deepcopy(input_map)        
-        align_data_maps(input_map, output_map, sourcedb_map)
-
-        sourcedb_map.iterator = input_map.iterator = output_map.iterator = \
-            DataMap.SkipIterator
-
-        for measurement_item, source_item in zip(input_map, sourcedb_map):
-            if measurement_item.skip or source_item.skip:
-                jobs.append(None)
-                continue
-            # both the sourcedb and the measurement are in a map
-            # unpack both
-            host , measurement_path = measurement_item.host, measurement_item.file
-            host2 , sourcedb_path = source_item.host, source_item.file
-
-            # construct and save the output name
-            arguments = [self.inputs['executable'],
-                         self.environment,
-                         self.inputs['parset'],
-                         self.inputs['working_directory'],
-                         self.inputs['output_image'],
-                         measurement_path,
-                         sourcedb_path,
-                         self.inputs['mask_patch_size'],
-                         self.inputs['autogenerate_parameters'],
-                         self.inputs['specify_fov'],
-                         self.inputs['fov'],
-                         self.inputs['major_cycle'],
-                         self.inputs['nr_cycles'],
-                         self.inputs['perform_self_cal']
-                         ]
-
-            jobs.append(ComputeJob(host, node_command, arguments))
-        self._schedule_jobs(jobs)
-
-        # *********************************************************************
-        # 3. Check output of the node scripts
-
-        for job, output_item in  zip(jobs, output_map):
-            # job ==  None on skipped job
-            if not "image" in job.results:
-                output_item.file = "failed"
-                output_item.skip = True
-
-            else:
-                output_item.file = job.results["image"]
-                output_item.skip = False
-
-        # Check if there are finished runs
-        succesfull_runs = None
-        for item in output_map:
-            if item.skip == False:
-                succesfull_runs = True
-                break
-
-        if not succesfull_runs:
-            self.logger.error(
-                    "None of the started awimager run finished correct")
-            self.logger.error(
-                    "No work left to be done: exiting with error status")
-            return 1
-
-        # If partial succes
-        if self.error.isSet():
-            self.logger.error("Failed awimager node run detected. continue with"
-                              "successful tasks.")
-
-        self._store_data_map(self.inputs['mapfile'], output_map,
-                             "mapfile containing produces awimages")
-
-        self.outputs["mapfile"] = self.inputs['mapfile']
-        return 0
-
-
-if __name__ == "__main__":
-    sys.exit(selfcal_awimager().main())
-
diff --git a/CEP/Pipeline/recipes/sip/master/selfcal_bbs.py b/CEP/Pipeline/recipes/sip/master/selfcal_bbs.py
deleted file mode 100644
index 22d19700ae8..00000000000
--- a/CEP/Pipeline/recipes/sip/master/selfcal_bbs.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# LOFAR IMAGING PIPELINE
-# selfcal_bbs BBS (BlackBoard Selfcal) recipe
-# Wouter Klijn
-# klijn@astron.nl
-# ------------------------------------------------------------------------------
-
-import sys
-import os
-
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.data_map import DataMap, MultiDataMap, \
-                                      validate_data_maps, align_data_maps
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.remotecommand import ComputeJob
-
-class selfcal_bbs(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    Imager_bbs master performs a bbs run based on the supplied parset it is a
-    shallow wrapper around bbs. Additional functionality compared to the default
-    bbs recipe is the capability to add an id that allows multiple
-    runs to have different output files
-
-    1. Load and validates that the input mapfiles are correct
-    2. and then starts the node script, use indexed path names for the
-       communication
-    3. Check if all nodes succeeded. If so return a mapfile with calibrated
-       ms
-
-    **Command line Arguments**
-
-    1. Path to a mapfile with measurement sets to calibrate
-
-    """
-    inputs = {
-        'parset': ingredient.FileField(
-            '-p', '--parset',
-            help = "BBS configuration parset"
-        ),
-        'bbs_executable': ingredient.StringField(
-            '--bbs-executable',
-            help = "BBS standalone executable (bbs-reducer)"
-        ),
-        'instrument_mapfile': ingredient.FileField(
-            '--instrument-mapfile',
-            help = "Full path to the mapfile containing the names of the "
-                 "instrument model files generated by the `parmdb` recipe"
-        ),
-        'sourcedb_mapfile': ingredient.FileField(
-            '--sourcedb-mapfile',
-            help = "Full path to the mapfile containing the names of the "
-                 "sourcedbs generated by the `sourcedb` recipe"
-        ),
-        'id': ingredient.IntField(
-            '--id',
-            default = 0,
-            help = "Optional integer id for distinguishing multiple runs"
-        ),
-        'mapfile': ingredient.StringField(
-            '--mapfile',
-            help = "Full path to the file containing the output data products"
-        ),
-        'concat_ms_map_path': ingredient.FileField(
-            '--concat-ms-map-path',
-            help = "Output of the concat MS file"
-        ),
-        'major_cycle': ingredient.IntField(
-            '--major_cycle',
-            help = "ID for the current major cycle"
-        )
-    }
-
-    outputs = {
-        'mapfile': ingredient.FileField(
-            help = "Full path to a mapfile describing the processed data"
-        )
-    }
-
-    def go(self):
-        """
-        imager_bbs functionality. Called by framework performing all the work
-        """
-        super(selfcal_bbs, self).go()
-        self.logger.info("Starting imager_bbs run")
-
-        # ********************************************************************
-        # 1. Load the and validate the data
-        ms_map = MultiDataMap.load(self.inputs['args'][0])
-        parmdb_map = MultiDataMap.load(self.inputs['instrument_mapfile'])
-        sourcedb_map = DataMap.load(self.inputs['sourcedb_mapfile'])
-        concat_ms_map = DataMap.load(self.inputs['concat_ms_map_path'])
-
-        # *********************************************************************
-        # 2. Start the node scripts
-        jobs = []
-        node_command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        map_dir = os.path.join(
-                        self.config.get("layout", "job_directory"), "mapfiles")
-        run_id = str(self.inputs.get("id"))
-
-        # Update the skip fields of the four maps. If 'skip' is True in any of
-        # these maps, then 'skip' must be set to True in all maps.
-        align_data_maps(ms_map, parmdb_map, sourcedb_map, concat_ms_map)
-
-        ms_map.iterator = parmdb_map.iterator = sourcedb_map.iterator = \
-            concat_ms_map.iterator = DataMap.SkipIterator
-
-        # *********************************************************************
-        for (ms, parmdb, sourcedb, concat_ms) in zip(ms_map, parmdb_map,
-                                                  sourcedb_map, concat_ms_map):
-            # host is same for each entry (validate_data_maps)
-            host, ms_list = ms.host, ms.file
-
-            # Write data maps to MultaDataMaps
-            ms_list_path = os.path.join(
-                    map_dir, host + "_ms_" + run_id + ".map")
-            MultiDataMap([tuple([host, ms_list, False])]).save(ms_list_path)
-
-            parmdb_list_path = os.path.join(
-                    map_dir, host + "_parmdb_" + run_id + ".map")
-            MultiDataMap(
-                [tuple([host, parmdb.file, False])]).save(parmdb_list_path)
-
-            sourcedb_list_path = os.path.join(
-                    map_dir, host + "_sky_" + run_id + ".map")
-            MultiDataMap(
-                [tuple([host, [sourcedb.file], False])]).save(sourcedb_list_path)
-
-            # THe concat ms does not have to be written: It already is a
-            # singular item (it is the output of the reduce step)
-            # redmine issue #6021
-            arguments = [self.inputs['bbs_executable'],
-                         self.inputs['parset'],
-                         ms_list_path,
-                         parmdb_list_path,
-                         sourcedb_list_path,
-                         concat_ms.file,
-                         self.inputs['major_cycle']]
-            jobs.append(ComputeJob(host, node_command, arguments))
-
-        # start and wait till all are finished
-        self._schedule_jobs(jobs)
-
-        # **********************************************************************
-        # 3. validate the node output and construct the output mapfile.
-        if self.error.isSet():    # if one of the nodes failed
-            self.logger.warn("Failed bbs node run detected, skipping work"
-                             "on this work item for further computations")
-
-        # find failed job and set the skip field
-        for (ms_item, concat_item, job) in zip(ms_map, concat_ms_map, jobs):
-            if job.results["returncode"] == 0:
-                continue
-            else:
-                ms_item.skip = True
-                concat_item.skip = True
-                self.logger.warn("bbs failed on item: {0}".format(ms_item.file))
-
-        # return the output: The measurement set that are calibrated:
-        # calibrated data is placed in the ms sets
-        MultiDataMap(ms_map).save(self.inputs['mapfile'])
-        # also save the concat_ms map with possible skips
-        DataMap(concat_ms_map).save(self.inputs['concat_ms_map_path'])
-        self.logger.info("Wrote file with  calibrated data")
-
-        self.outputs['mapfile'] = self.inputs['mapfile']
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(selfcal_bbs().main())
diff --git a/CEP/Pipeline/recipes/sip/master/selfcal_finalize.py b/CEP/Pipeline/recipes/sip/master/selfcal_finalize.py
deleted file mode 100644
index 4690a446623..00000000000
--- a/CEP/Pipeline/recipes/sip/master/selfcal_finalize.py
+++ /dev/null
@@ -1,207 +0,0 @@
-
-import sys
-
-import lofarpipe.support.lofaringredient as ingredient
-from lofarpipe.support.baserecipe import BaseRecipe
-from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
-from lofarpipe.support.remotecommand import ComputeJob
-from lofarpipe.support.data_map import DataMap, validate_data_maps, \
-                                       align_data_maps
-
-class selfcal_finalize(BaseRecipe, RemoteCommandRecipeMixIn):
-    """
-    The Imager_finalizer performs a number of steps needed for integrating the
-    msss_imager_pipeline in the LOFAR framework: It places the image on the
-    output location in the correcy image type (hdf5).
-    It also adds some meta data collected from the individual measurement sets
-    and the found data.
-
-    This recipe does not have positional commandline arguments
-    """
-    inputs = {
-        'awimager_output_map': ingredient.FileField(
-            '--awimager-output-mapfile',
-            help = """Mapfile containing (host, path) pairs of created sky
-                   images """
-        ),
-        'ms_per_image_map': ingredient.FileField(
-            '--ms-per-image-map',
-            help = '''Mapfile containing (host, path) pairs of mapfiles used
-            to create image on that node'''
-        ),
-        'sourcelist_map': ingredient.FileField(
-            '--sourcelist-map',
-            help = '''mapfile containing (host, path) pairs to a list of sources
-            found in the image'''
-        ),
-        'sourcedb_map': ingredient.FileField(
-            '--sourcedb_map',
-            help = '''mapfile containing (host, path) pairs to a db of sources
-            found in the image'''
-        ),
-        'target_mapfile': ingredient.FileField(
-            '--target-mapfile',
-            help = "Mapfile containing (host, path) pairs to the concatenated and"
-            "combined measurement set, the source for the actual sky image"
-        ),
-        'minbaseline': ingredient.FloatField(
-            '--minbaseline',
-            help = '''Minimum length of the baseline used for the images'''
-        ),
-        'maxbaseline': ingredient.FloatField(
-            '--maxbaseline',
-            help = '''Maximum length of the baseline used for the images'''
-        ),
-        'output_image_mapfile': ingredient.FileField(
-            '--output-image-mapfile',
-            help = '''mapfile containing (host, path) pairs with the final
-            output image (hdf5) location'''
-        ),
-        'processed_ms_dir': ingredient.StringField(
-            '--processed-ms-dir',
-            help = '''Path to directory for processed measurment sets'''
-        ),
-        'fillrootimagegroup_exec': ingredient.ExecField(
-            '--fillrootimagegroup_exec',
-            help = '''Full path to the fillRootImageGroup executable'''
-        ),
-        'placed_image_mapfile': ingredient.FileField(
-            '--placed-image-mapfile',
-            help = "location of mapfile with processed and correctly placed,"
-                " hdf5 images"
-        ),
-        'placed_correlated_mapfile': ingredient.FileField(
-            '--placed-correlated-mapfile',
-            help = "location of mapfile with processedd and correctly placed,"
-                " correlated ms"
-        ),
-        'concat_ms_map_path': ingredient.FileField(
-            '--concat-ms-map-path',
-            help = "Output of the concat MS file"
-        ),
-        'output_correlated_mapfile': ingredient.FileField(
-            '--output-correlated-mapfile',
-            help = "location of mapfile where output paths for mss are located"
-        ),
-        'msselect_executable': ingredient.ExecField(
-            '--msselect-executable',
-            help = "The full path to the msselect executable "
-        ),
-    }
-
-    outputs = {
-        'placed_image_mapfile': ingredient.StringField(),
-        'placed_correlated_mapfile': ingredient.StringField(),
-    }
-
-    def go(self):
-        """
-        Steps:
-
-        1. Load and validate the input datamaps
-        2. Run the node parts of the recipe
-        3. Validate node output and format the recipe output
-        """
-        super(selfcal_finalize, self).go()
-        # *********************************************************************
-        # 1. Load the datamaps
-        awimager_output_map = DataMap.load(
-                                self.inputs["awimager_output_map"])
-        ms_per_image_map = DataMap.load(
-                                    self.inputs["ms_per_image_map"])
-        sourcelist_map = DataMap.load(self.inputs["sourcelist_map"])
-        sourcedb_map = DataMap.load(self.inputs["sourcedb_map"])
-        target_mapfile = DataMap.load(self.inputs["target_mapfile"])
-        output_image_mapfile = DataMap.load(
-                                    self.inputs["output_image_mapfile"])
-        concat_ms_mapfile = DataMap.load(
-                                    self.inputs["concat_ms_map_path"])
-        output_correlated_map = DataMap.load(
-                                    self.inputs["output_correlated_mapfile"])
-        processed_ms_dir = self.inputs["processed_ms_dir"]
-        fillrootimagegroup_exec = self.inputs["fillrootimagegroup_exec"]
-
-        # Align the skip fields
-        align_data_maps(awimager_output_map, ms_per_image_map,
-                sourcelist_map, target_mapfile, output_image_mapfile,
-                sourcedb_map, concat_ms_mapfile, output_correlated_map)
-
-        # Set the correct iterator
-        sourcelist_map.iterator = awimager_output_map.iterator = \
-            ms_per_image_map.iterator = target_mapfile.iterator = \
-            output_image_mapfile.iterator = sourcedb_map.iterator = \
-            concat_ms_mapfile.iterator = output_correlated_map.iterator = \
-            DataMap.SkipIterator
-
-        # *********************************************************************
-        # 2. Run the node side of the recupe
-        command = " python3 %s" % (self.__file__.replace("master", "nodes"))
-        jobs = []
-        for  (awimager_output_item, ms_per_image_item, sourcelist_item,
-              target_item, output_image_item, sourcedb_item,
-              concat_ms_item, correlated_item) in zip(
-                  awimager_output_map, ms_per_image_map, sourcelist_map,
-                  target_mapfile, output_image_mapfile, sourcedb_map,
-                  concat_ms_mapfile, output_correlated_map):
-            # collect the files as argument
-            arguments = [awimager_output_item.file,
-                         ms_per_image_item.file,
-                         sourcelist_item.file,
-                         target_item.file,
-                         output_image_item.file,
-                         self.inputs["minbaseline"],
-                         self.inputs["maxbaseline"],
-                         processed_ms_dir,
-                         fillrootimagegroup_exec,
-                         self.environment,
-                         sourcedb_item.file,
-                         concat_ms_item.file,
-                         correlated_item.file,
-                         self.inputs["msselect_executable"], ]
-
-            self.logger.info(
-                "Starting finalize with the folowing args: {0}".format(
-                                                                    arguments))
-            jobs.append(ComputeJob(target_item.host, command, arguments))
-
-        self._schedule_jobs(jobs)
-
-        # *********************************************************************
-        # 3. Validate the performance of the node script and assign output
-        succesful_run = False
-        for (job, output_image_item, output_correlated_item) in  zip(jobs,
-                                output_image_mapfile, output_correlated_map):
-            if not "hdf5" in job.results:
-                # If the output failed set the skip to True
-                output_image_item.skip = True
-                output_correlated_item = True
-            else:
-                succesful_run = True
-                # signal that we have at least a single run finished ok.
-                # No need to set skip in this case
-
-        if not succesful_run:
-            self.logger.warn("Not a single finalizer succeeded")
-            return 1
-
-        # Save the location of the output images
-        output_image_mapfile.save(self.inputs['placed_image_mapfile'])
-        self.logger.debug(
-           "Wrote mapfile containing placed hdf5 images: {0}".format(
-                           self.inputs['placed_image_mapfile']))
-
-        # save the location of measurements sets
-        output_correlated_map.save(self.inputs['placed_correlated_mapfile'])
-        self.logger.debug(
-           "Wrote mapfile containing placed mss: {0}".format(
-                           self.inputs['placed_correlated_mapfile']))
-
-        self.outputs["placed_image_mapfile"] = self.inputs[
-                                                    'placed_image_mapfile']
-        self.outputs["placed_correlated_mapfile"] = self.inputs[
-                                             'placed_correlated_mapfile']
-
-        return 0
-
-if __name__ == '__main__':
-    sys.exit(selfcal_finalize().main())
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_awimager.py b/CEP/Pipeline/recipes/sip/nodes/imager_awimager.py
deleted file mode 100644
index 6920f961a9a..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_awimager.py
+++ /dev/null
@@ -1,569 +0,0 @@
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# awimager
-# The awimager recipe creates based an image of the field of view. Based on
-# nine concatenated and measurementsets each spanning 10 subbands
-# The recipe contains two parts: The call to awimager
-# and secondairy some functionality that calculates settings (for awimager)
-# based on the information present in the measurement set
-# The calculated parameters are:
-#        1: The cellsize
-#        2: The npixels in a each of the two dimension of the image
-#        3. What columns use to determine the maximum baseline
-#        4. The number of projection planes
-# Wouter Klijn 2012
-# klijn@astron.nl
-# -----------------------------------------------------------------------------
-
-import sys
-import shutil
-import os.path
-import math
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.pipelinelogging import log_time
-from lofarpipe.support.utilities import patch_parset
-from lofarpipe.support.utilities import get_parset
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.utilities import create_directory
-from lofarpipe.support.lofarexceptions import PipelineException
-import pyrap.tables as pt  # @UnresolvedImport
-from subprocess import CalledProcessError
-from lofarpipe.support.utilities import create_directory
-import pyrap.images as pim  # @UnresolvedImport
-from lofarpipe.support.parset import Parset
-import lofar.parmdb  # @UnresolvedImport
-import numpy as np
-
-
-class imager_awimager(LOFARnodeTCP):
-    def run(self, executable, environment, parset, working_directory,
-            output_image, concatenated_measurement_set, sourcedb_path,
-             mask_patch_size, autogenerate_parameters, specify_fov, fov):
-        """
-        :param executable: Path to awimager executable
-        :param environment: environment for catch_segfaults (executable runner)
-        :param parset: parameters for the awimager,
-        :param working_directory: directory the place temporary files
-        :param output_image: location and filesname to story the output images
-          the multiple images are appended with type extentions
-        :param concatenated_measurement_set: Input measurement set
-        :param sourcedb_path: Path the the sourcedb used to create the image 
-          mask
-        :param mask_patch_size: Scaling of the patch around the source in the 
-          mask
-        :param autogenerate_parameters: Turns on the autogeneration of: 
-           cellsize, npix, wprojplanes, wmax, fov
-        :param fov: if  autogenerate_parameters is false calculate 
-           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
-           fov
-        :rtype: self.outputs["image"] The path to the output image
-        """
-        self.logger.info("Start imager_awimager node run:")
-        log4_cplus_name = "imager_awimager"
-        self.environment.update(environment)
-
-        with log_time(self.logger):
-            # Read the parameters as specified in the parset
-            parset_object = get_parset(parset)
-
-            #******************************************************************
-            # 0. Create the directories used in this recipe
-            create_directory(working_directory)
-
-            # *************************************************************
-            # 1. Calculate awimager parameters that depend on measurement set
-            # and the parset
-
-            cell_size, npix, w_max, w_proj_planes = \
-                    self._get_imaging_parameters(
-                            concatenated_measurement_set,
-                            parset,
-                            autogenerate_parameters,
-                            specify_fov,
-                            fov)
-
-            self.logger.info("Using autogenerated parameters; ")
-            self.logger.info(
-                 "Calculated parameters: cell_size: {0}, npix: {1}".format(
-                     cell_size, npix))
-
-            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
-                        w_max, w_proj_planes))
-
-            # ****************************************************************
-            # 2. Get the target image location from the mapfile for the parset.
-            # Create target dir if it not exists
-            image_path_head = os.path.dirname(output_image)
-            create_directory(image_path_head)
-            self.logger.debug("Created directory to place awimager output"
-                              " files: {0}".format(image_path_head))
-
-            # ****************************************************************
-            # 3. Create the mask
-            mask_file_path = self._create_mask(npix, cell_size, output_image,
-                         concatenated_measurement_set, executable,
-                         working_directory, log4_cplus_name, sourcedb_path,
-                          mask_patch_size, image_path_head)
-
-            # *****************************************************************
-            # 4. Update the parset with calculated parameters, and output image
-            patch_dictionary = {'uselogger': 'True',  # enables log4cpluscd log
-                               'ms': str(concatenated_measurement_set),
-                               'cellsize': str(cell_size),
-                               'npix': str(npix),
-                               'wmax': str(w_max),
-                               'wprojplanes': str(w_proj_planes),
-                               'image': str(output_image),
-                               'maxsupport': str(npix),
-                               # 'mask':str(mask_file_path),  #TODO REINTRODUCE
-                               # MASK, excluded to speed up in this debug stage
-                               }
-
-            # save the parset at the target dir for the image
-            calculated_parset_path = os.path.join(image_path_head,
-                                                       "parset.par")
-
-            try:
-                temp_parset_filename = patch_parset(parset, patch_dictionary)
-                # Copy tmp file to the final location
-                shutil.copyfile(temp_parset_filename, calculated_parset_path)
-                self.logger.debug("Wrote parset for awimager run: {0}".format(
-                                                    calculated_parset_path))
-            finally:
-                # remove temp file
-                os.remove(temp_parset_filename)
-
-            # *****************************************************************
-            # 5. Run the awimager with the updated parameterset
-            cmd = [executable, calculated_parset_path]
-            try:
-                with CatchLog4CPlus(working_directory,
-                        self.logger.name + "." +
-                        os.path.basename(log4_cplus_name),
-                        os.path.basename(executable)
-                ) as logger:
-                    catch_segfaults(cmd, working_directory, self.environment,
-                                            logger, usageStats=self.resourceMonitor)
-
-            # Thrown by catch_segfault
-            except CalledProcessError as exception:
-                self.logger.error(str(exception))
-                return 1
-
-            except Exception as exception:
-                self.logger.error(str(exception))
-                return 1
-
-        # *********************************************************************
-        # 6. Return output
-        # Append static .restored: This might change but prob. not
-        # The actual output image has this extention always, default of
-        # awimager
-        self.outputs["image"] = output_image + ".restored"
-        return 0
-
-    def _get_imaging_parameters(self, measurement_set, parset,
-                autogenerate_parameters, specify_fov, fov):
-        """
-        (1) calculate and format some parameters that are determined runtime.
-        Based  on values in the measurementset and input parameter (set):
-        
-        a. <string> The cellsize
-        b. <int> The npixels in a each of the two dimension of the image
-        c. <string> The largest baseline in the ms smaller then the maxbaseline
-        d. <string> The number of projection planes
-        
-        The calculation of these parameters is done in three steps:
-        
-        1. Calculate intermediate results based on the ms. 
-        2. The calculation of the actual target values using intermediate
-           result       
-        """
-        # *********************************************************************
-        # 1. Get partial solutions from the parameter set
-        # Get the parset and a number of raw parameters from this parset
-        parset_object = get_parset(parset)
-        baseline_limit = parset_object.getInt('maxbaseline')
-
-        # Get the longest baseline
-        max_baseline = pt.taql(
-                        'CALC sqrt(max([select sumsqr(UVW[:2]) from ' + \
-            '{0} where sumsqr(UVW[:2]) <{1} giving as memory]))'.format(\
-            measurement_set, baseline_limit *
-            baseline_limit))[0]  # ask ger van diepen for details if ness.
-        # Calculate the wave_length
-        table_ms = pt.table(measurement_set)
-        table_spectral_window = pt.table(
-                                        table_ms.getkeyword("SPECTRAL_WINDOW"))
-        freq = table_spectral_window.getcell("REF_FREQUENCY", 0)
-
-        table_spectral_window.close()
-        wave_length = pt.taql('CALC C()') / freq
-        wave_length = wave_length[0]
-
-        # Calculate the cell_size from the ms
-        arc_sec_in_degree = 3600
-        arc_sec_in_rad = (180.0 / math.pi) * arc_sec_in_degree
-        cell_size = (1.0 / 3) * (wave_length / float(max_baseline))\
-             * arc_sec_in_rad
-
-        # Calculate the number of pixels in x and y dim
-        #    fov and diameter depending on the antenna name
-        fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
-                                                            measurement_set)
-
-        # use fov for to calculate a semi 'user' specified npix and cellsize
-        # The npix thus depends on the ms cellsize and fov
-        # Do not use use supplied Fov if autogenerating
-        if not autogenerate_parameters and specify_fov:
-            if fov == 0.0:
-                raise PipelineException("fov set to 0.0: invalid value.")
-
-        # else use full resolution (calculate the fov)
-        else:
-            self.logger.info("Using fov calculated on measurement data: " + str(fov_from_ms))
-            fov = fov_from_ms
-
-        # ********************************************************************
-        # 2. Calculate the ms based output variables
-        # 'optimal' npix based on measurement set calculations or user specified
-        npix = (arc_sec_in_degree * fov) / cell_size
-
-        # Get the closest power of two larger then the calculated pixel size
-        npix = self._nearest_ceiled_power2(npix)
-
-        # Get the max w with baseline < 10000
-        w_max = pt.taql('CALC max([select UVW[2] from ' + \
-            '{0} where sumsqr(UVW[:2]) <{1} giving as memory])'.format(
-            measurement_set, baseline_limit * baseline_limit))[0]
-
-        # Calculate number of projection planes
-        w_proj_planes = min(257, math.floor((max_baseline * wave_length) /
-                                             (station_diameter ** 2)))
-        w_proj_planes = int(round(w_proj_planes))
-
-        # MAximum number of proj planes set to 1024: George Heald, Ger van
-        # Diepen if this exception occurs
-        maxsupport = max(1024, npix)
-        if w_proj_planes > maxsupport:
-            raise Exception("The number of projections planes for the current"
-                            + "measurement set is to large.")
-
-        # *********************************************************************
-        # 3. if the npix from the parset is different to the ms calculations,
-        # calculate a sizeconverter value  (to be applied to the cellsize)
-        if npix < 256:
-            self.logger.warn("Using a image size smaller then 256x256:"
-            " This leads to problematic imaging in some instances!!")
-
-        # If we are not autocalculating based on ms or fov, use the npix
-        # and cell_size specified in the parset
-        # keep the wmax and w_proj_planes
-        if (not autogenerate_parameters and not specify_fov):
-            npix = parset_object.getString('npix')
-            cell_size_formatted = parset_object.getString('cellsize')
-        else:
-            cell_size_formatted = str(
-                        int(round(cell_size))) + 'arcsec'
-
-        self.logger.info("Using the following awimager parameters:"
-            " cell_size: {0}, npix: {1},".format(
-                        cell_size_formatted, npix) +
-             " w_max: {0}, w_proj_planes: {1}".format(w_max, w_proj_planes))
-
-        return cell_size_formatted, str(npix), str(w_max), str(w_proj_planes)
-
-    def _get_fov_and_station_diameter(self, measurement_set):
-        """
-        _field_of_view calculates the fov, which is dependend on the
-        station type, location and mode:
-        For details see:
-        (1) http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofar
-        
-        """
-        # Open the ms
-        table_ms = pt.table(measurement_set)
-
-        # Get antenna name and observation mode
-        antenna = pt.table(table_ms.getkeyword("ANTENNA"))
-        antenna_name = antenna.getcell('NAME', 0)
-        antenna.close()
-
-        observation = pt.table(table_ms.getkeyword("OBSERVATION"))
-        antenna_set = observation.getcell('LOFAR_ANTENNA_SET', 0)
-        observation.close()
-
-        # static parameters for the station diameters ref (1)
-        hba_core_diameter = 30.8
-        hba_remote_diameter = 41.1
-        lba_inner = 32.3
-        lba_outer = 81.3
-
-        # use measurement set information to assertain antenna diameter
-        station_diameter = None
-        if antenna_name.count('HBA'):
-            if antenna_name.count('CS'):
-                station_diameter = hba_core_diameter
-            elif antenna_name.count('RS'):
-                station_diameter = hba_remote_diameter
-        elif antenna_name.count('LBA'):
-            if antenna_set.count('INNER'):
-                station_diameter = lba_inner
-            elif antenna_set.count('OUTER'):
-                station_diameter = lba_outer
-
-        # raise exception if the antenna is not of a supported type
-        if station_diameter == None:
-            self.logger.error(
-                    'Unknown antenna type for antenna: {0} , {1}'.format(\
-                              antenna_name, antenna_set))
-            raise PipelineException(
-                    "Unknown antenna type encountered in Measurement set")
-
-        # Get the wavelength
-        spectral_window_table = pt.table(table_ms.getkeyword(
-                                                            "SPECTRAL_WINDOW"))
-        freq = float(spectral_window_table.getcell("REF_FREQUENCY", 0))
-        wave_length = pt.taql('CALC C()') / freq
-        spectral_window_table.close()
-
-        # Now calculate the FOV see ref (1)
-        # alpha_one is a magic parameter: The value 1.3 is representative for a
-        # WSRT dish, where it depends on the dish illumination
-        alpha_one = 1.3
-
-        # alpha_one is in radians so transform to degrees for output
-        fwhm = alpha_one * (wave_length / station_diameter) * (180 / math.pi)
-        fov = fwhm / 2.0
-        table_ms.close()
-
-        return fov, station_diameter
-
-    def _create_mask(self, npix, cell_size, output_image,
-                     concatenated_measurement_set, executable,
-                     working_directory, log4_cplus_name, sourcedb_path,
-                     mask_patch_size, image_path_directory):
-        """
-        (3) create a casa image containing an mask blocking out the
-        sources in the provided sourcedb.
-        
-        It expects:
-        
-        a. the ms for which the mask will be created, it is used to de
-           termine some image details: (eg. pointing)
-        b. parameters for running within the catchsegfault framework
-        c. and the size of the mask_pach.
-           To create a mask, first a empty measurement set is created using
-           awimager: ready to be filled with mask data 
-           
-        This function is a wrapper around some functionality written by:
-        fdg@mpa-garching.mpg.de
-        
-        steps: 
-        1. Create a parset with image paramters used by:
-        2. awimager run. Creating an empty casa image.
-        3. Fill the casa image with mask data
-           
-        """
-        # ********************************************************************
-        # 1. Create the parset used to make a mask
-        mask_file_path = output_image + ".mask"
-
-        mask_patch_dictionary = {"npix": str(npix),
-                                 "cellsize": str(cell_size),
-                                 "image": str(mask_file_path),
-                                 "ms": str(concatenated_measurement_set),
-                                 "operation": "empty",
-                                 "stokes": "'I'"
-                                 }
-        mask_parset = Parset.fromDict(mask_patch_dictionary)
-        mask_parset_path = os.path.join(image_path_directory, "mask.par")
-        mask_parset.writeFile(mask_parset_path)
-        self.logger.debug(
-                "Write parset for awimager mask creation: {0}".format(
-                                                      mask_parset_path))
-
-        # *********************************************************************
-        # 2. Create an empty mask using awimager
-        cmd = [executable, mask_parset_path]
-        self.logger.info(" ".join(cmd))
-        try:
-            with CatchLog4CPlus(working_directory,
-                    self.logger.name + "." + os.path.basename(log4_cplus_name),
-                    os.path.basename(executable)
-            ) as logger:
-                catch_segfaults(cmd, working_directory, self.environment,
-                                        logger)
-        # Thrown by catch_segfault
-        except CalledProcessError as exception:
-            self.logger.error(str(exception))
-            return 1
-        except Exception as exception:
-            self.logger.error(str(exception))
-            return 1
-
-        # ********************************************************************
-        # 3. create the actual mask
-        self.logger.debug("Started mask creation using mask_patch_size:"
-                          " {0}".format(mask_patch_size))
-
-        self._msss_mask(mask_file_path, sourcedb_path, mask_patch_size)
-        self.logger.debug("Fished mask creation")
-        return mask_file_path
-
-    def _msss_mask(self, mask_file_path, sourcedb_path, mask_patch_size = 1.0):
-        """
-        Fill casa image with a mask based on skymodel(sourcedb)
-        Bugs: fdg@mpa-garching.mpg.de
-        
-        pipeline implementation klijn@astron.nl
-        version 0.32
-        
-        Edited by JDS, 2012-03-16:
-         - Properly convert maj/minor axes to half length
-         - Handle empty fields in sky model by setting them to 0
-         - Fix off-by-one error at mask boundary
-        
-        FIXED BUG
-         - if a source is outside the mask, the script ignores it
-         - if a source is on the border, the script draws only the inner part
-         - can handle skymodels with different headers
-        
-        KNOWN BUG
-         - not works with single line skymodels, workaround: add a fake
-           source outside the field
-         - mask patched display large amounts of aliasing. A possible 
-           sollution would
-           be normalizing to pixel centre. ( int(normalize_x * npix) /
-           npix + (0.5 /npix)) 
-           ideally the patch would increment in pixel radiuses
-             
-        Version 0.3  (Wouter Klijn, klijn@astron.nl)
-         - Usage of sourcedb instead of txt document as 'source' of sources
-           This allows input from different source sources
-        Version 0.31  (Wouter Klijn, klijn@astron.nl)
-         - Adaptable patch size (patch size needs specification)
-         - Patch size and geometry is broken: needs some astronomer magic to
-           fix it, problem with afine transformation prol.
-        Version 0.32 (Wouter Klijn, klijn@astron.nl)
-         - Renaming of variable names to python convention
-        """
-        # increment in maj/minor axes [arcsec]
-        pad = 500.
-
-        # open mask
-        mask = pim.image(mask_file_path, overwrite = True)
-        mask_data = mask.getdata()
-        xlen, ylen = mask.shape()[2:]
-        freq, stokes, null, null = mask.toworld([0, 0, 0, 0])
-
-        # Open the sourcedb:
-        table = pt.table(sourcedb_path + "::SOURCES")
-        pdb = lofar.parmdb.parmdb(sourcedb_path)
-
-        # Get the data of interest
-        source_list = table.getcol("SOURCENAME")
-        source_type_list = table.getcol("SOURCETYPE")
-        # All date in the format valuetype:sourcename
-        all_values_dict = pdb.getDefValues()
-
-        # Loop the sources
-        for source, source_type in zip(source_list, source_type_list):
-            if source_type == 1:
-                type_string = "Gaussian"
-            else:
-                type_string = "Point"
-            self.logger.info("processing: {0} ({1})".format(source,
-                                                             type_string))
-
-            # Get de right_ascension and declination (already in radians)
-            right_ascension = all_values_dict["Ra:" + source][0, 0]
-            declination = all_values_dict["Dec:" + source][0, 0]
-            if source_type == 1:
-                # Get the raw values from the db
-                maj_raw = all_values_dict["MajorAxis:" + source][0, 0]
-                min_raw = all_values_dict["MinorAxis:" + source][0, 0]
-                pa_raw = all_values_dict["Orientation:" + source][0, 0]
-                # convert to radians (conversion is copy paste JDS)
-                # major radius (+pad) in rad
-                maj = (((maj_raw + pad)) / 3600.) * np.pi / 180.
-                # minor radius (+pad) in rad
-                minor = (((min_raw + pad)) / 3600.) * np.pi / 180.
-                pix_asc = pa_raw * np.pi / 180.
-                # wenss writes always 'GAUSSIAN' even for point sources
-                # -> set to wenss beam+pad
-                if maj == 0 or minor == 0:
-                    maj = ((54. + pad) / 3600.) * np.pi / 180.
-                    minor = ((54. + pad) / 3600.) * np.pi / 180.
-            # set to wenss beam+pad
-            elif source_type == 0:
-                maj = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
-                minor = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
-                pix_asc = 0.
-            else:
-                self.logger.info(
-                    "WARNING: unknown source source_type ({0}),"
-                    "ignoring: ".format(source_type))
-                continue
-
-            # define a small square around the source to look for it
-            null, null, border_y1, border_x1 = mask.topixel(
-                    [freq, stokes, declination - maj,
-                      right_ascension - maj / np.cos(declination - maj)])
-            null, null, border_y2, border_x2 = mask.topixel(
-                    [freq, stokes, declination + maj,
-                     right_ascension + maj / np.cos(declination + maj)])
-            xmin = np.int(np.floor(np.min([border_x1, border_x2])))
-            xmax = np.int(np.ceil(np.max([border_x1, border_x2])))
-            ymin = np.int(np.floor(np.min([border_y1, border_y2])))
-            ymax = np.int(np.ceil(np.max([border_y1, border_y2])))
-
-            if xmin > xlen or ymin > ylen or xmax < 0 or ymax < 0:
-                self.logger.info(
-                    "WARNING: source {0} falls outside the mask,"
-                    " ignoring: ".format(source))
-                continue
-
-            if xmax > xlen or ymax > ylen or xmin < 0 or ymin < 0:
-                self.logger.info(
-                    "WARNING: source {0} falls across map edge".format(source))
-
-            for pixel_x in range(xmin, xmax):
-                for pixel_y in range(ymin, ymax):
-                    # skip pixels outside the mask field
-                    if pixel_x >= xlen or pixel_y >= ylen or\
-                       pixel_x < 0 or pixel_y < 0:
-                        continue
-                    # get pixel right_ascension and declination in rad
-                    null, null, pix_dec, pix_ra = mask.toworld(
-                                                    [0, 0, pixel_y, pixel_x])
-                    # Translate and rotate coords.
-                    translated_pixel_x = (pix_ra - right_ascension) * np.sin(
-                        pix_asc) + (pix_dec - declination) * np.cos(pix_asc)
-                    # to align with ellipse
-                    translate_pixel_y = -(pix_ra - right_ascension) * np.cos(
-                        pix_asc) + (pix_dec - declination) * np.sin(pix_asc)
-                    if (((translated_pixel_x ** 2) / (maj ** 2)) +
-                        ((translate_pixel_y ** 2) / (minor ** 2))) < \
-                                                         mask_patch_size:
-                        mask_data[0, 0, pixel_y, pixel_x] = 1
-        null = null
-        mask.putdata(mask_data)
-        table.close()
-
-    # some helper functions
-    def _nearest_ceiled_power2(self, value):
-        """
-        Return int value of  the nearest Ceiled power of 2 for the
-        suplied argument
-        
-        """
-        return int(pow(2, math.ceil(math.log(value, 2))))
-
-
-if __name__ == "__main__":
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(imager_awimager(
-                    _JOBID, _JOBHOST, _JOBPORT).run_with_stored_arguments())
-
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_bbs.py b/CEP/Pipeline/recipes/sip/nodes/imager_bbs.py
deleted file mode 100644
index 06f3874a07f..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_bbs.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# imager_bbs
-# Wouter Klijn 2012
-# klijn@astron.nl
-# -----------------------------------------------------------------------------
-
-import sys
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.group_data import load_data_map
-from lofarpipe.support.subprocessgroup import SubProcessGroup
-from lofarpipe.support.data_map import MultiDataMap
-
-class imager_bbs(LOFARnodeTCP):
-    """
-    imager_bbs node performs a bbs run for each of measuremt sets supplied in 
-    the  mapfile at ms_list_path. Calibration is done on the sources in 
-    the sourcedb in the mapfile sky_list_path. Solutions are stored in the 
-    parmdb_list_path
-    
-    1. Load the mapfiles
-    2. For each measurement set to calibrate start a subprocess
-    3. Check if the processes finished correctly
-    """
-    def run(self, bbs_executable, parset, ms_list_path, parmdb_list_path,
-             sky_list_path):
-        """
-        imager_bbs functionality. Called by framework performing all the work
-        """
-        self.logger.debug("Starting imager_bbs Node")
-        # *********************************************************************
-        # 1. Load mapfiles
-        # read in the mapfiles to data maps: The master recipe added the single
-        # path to a mapfilem which allows usage of default data methods 
-        # (load_data_map)
-        # TODO: Datamap
-        ms_map = MultiDataMap.load(ms_list_path)
-        parmdb_map = MultiDataMap.load(parmdb_list_path)
-        sky_list = MultiDataMap.load(sky_list_path)
-        source_db = sky_list[0].file[0] # the sourcedb is the first file entry
-
-        try:
-            bbs_process_group = SubProcessGroup(self.logger,
-                                  self.resourceMonitor)
-            # *****************************************************************
-            # 2. start the bbs executable with data
-            for (measurement_set, parmdm) in zip(ms_map[0].file,
-                                                parmdb_map[0].file):
-                command = [
-                    bbs_executable,
-                    "--sourcedb={0}".format(source_db),
-                    "--parmdb={0}".format(parmdm) ,
-                    measurement_set,
-                    parset]
-                self.logger.info("Executing bbs command: {0}".format(" ".join(
-                            command)))
-
-                bbs_process_group.run(command)
-
-            # *****************************************************************
-            # 3. check status of the processes
-            if bbs_process_group.wait_for_finish() != None:
-                self.logger.error(
-                            "Failed bbs run detected Aborting")
-                return 1    # If bbs failed we need to abort: the concat
-                            # is now corrupt
-
-        except OSError as exception:
-            self.logger.error("Failed to execute bbs: {0}".format(str(
-                                                                    exception)))
-            return 1
-        return 0
-
-
-if __name__ == "__main__":
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(imager_bbs(_JOBID, _JOBHOST, _JOBPORT).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_create_dbs.py b/CEP/Pipeline/recipes/sip/nodes/imager_create_dbs.py
deleted file mode 100644
index 98b0192849d..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_create_dbs.py
+++ /dev/null
@@ -1,536 +0,0 @@
-"""
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# imager_create_dbs (node)
-# Wouter Klijn 2012
-# klijn@astron.nl
-# -----------------------------------------------------------------------------
-"""
-
-import sys
-import subprocess
-import math
-import shutil
-import os
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.pipelinelogging import log_process_output
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.utilities import create_directory
-from lofar.common.subprocess_utils import communicate_returning_strings
-
-import monetdb.sql as db
-import lofar.gsm.gsmutils as gsm
-import pyrap.tables as pt
-
-_TEMPLATE_PARMDB = """
-create tablename="{0}"
-adddef Gain:0:0:Ampl  values=1.0
-adddef Gain:1:1:Ampl  values=1.0
-adddef Gain:0:0:Real  values=1.0
-adddef Gain:1:1:Real  values=1.0
-adddef DirectionalGain:0:0:Ampl  values=1.0
-adddef DirectionalGain:1:1:Ampl  values=1.0
-adddef DirectionalGain:0:0:Real  values=1.0
-adddef DirectionalGain:1:1:Real  values=1.0
-adddef AntennaOrientation values=5.497787144
-quit
-"""
-
-
-class imager_create_dbs(LOFARnodeTCP):
-    """
-    Creates two dbs: A sourcedb containgin sources in the direction of the
-    current measurement. And a parmdb which will be used for an instrument
-    table:
-    
-    1. Create a sourcelist
-       In the first major imaging cycle filled by the gsm. In later cycles
-       this list is retreived from the sourcefinder
-    2. The GSM does not create a sourceDB. It creates a text file which is con-
-       sumed by makesourcedb resulting in a sourceDB (casa table). 
-       Later cycles will be added to this existent sourcedb
-       There is a single sourcedb for a concatenated measurement set/ image
-    3. Each individual timeslice needs a place to collect parameters: This is
-       done in the paramdb. 
-    4. Add the created databases as meta information to the measurment set
-       
-    5. Assign the outputs of the script
-    
-    """
-    def run(self, concatenated_measurement_set, sourcedb_target_path,
-            monet_db_hostname, monet_db_port, monet_db_name, monet_db_user,
-            monet_db_password, assoc_theta, parmdb_executable, slice_paths,
-            parmdb_suffix, environment, working_directory, makesourcedb_path,
-            source_list_path_extern, major_cycle):
-
-        self.logger.info("Starting imager_create_dbs Node")
-        self.environment.update(environment)
-
-        #******************************************************************
-        # 0. Create the directories used in this recipe
-        create_directory(working_directory)
-
-        #*******************************************************************
-        # 1. get a sourcelist: from gsm or from file
-        source_list, append = self._create_source_list(
-            source_list_path_extern,sourcedb_target_path, 
-            concatenated_measurement_set,monet_db_hostname, 
-            monet_db_port, monet_db_name, monet_db_user,
-            monet_db_password, assoc_theta)       
-
-        #*******************************************************************
-        # 2convert it to a sourcedb (casa table)
-        if self._create_source_db(source_list, sourcedb_target_path,
-                                  working_directory, makesourcedb_path,
-                                  append) == None:
-            self.logger.error("failed creating sourcedb")
-            return 1
-
-        #*******************************************************************
-        # 3. Create a empty parmdb for each timeslice\
-        parmdbs = self._create_parmdb_for_timeslices(parmdb_executable,
-                                    slice_paths, parmdb_suffix)
-        if parmdbs == None:
-            self.logger.error("failed creating paramdb for slices")
-            return 1
-
-        # *******************************************************************
-        # Add the create databases to the measurments set,
-        self._add_dbs_to_ms(concatenated_measurement_set, sourcedb_target_path,
-                            parmdbs, major_cycle)
-
-
-        #*******************************************************************
-        # 5. Assign the outputs
-        self.outputs["sourcedb"] = sourcedb_target_path
-        self.outputs["parmdbs"] = parmdbs
-        return 0
-
-    def _create_source_list(self, source_list_path_extern, sourcedb_target_path,
-            concatenated_measurement_set, monet_db_hostname,
-            monet_db_port, monet_db_name, monet_db_user, monet_db_password,
-            assoc_theta):
-        """
-        Create a sourcelist file with sources in the current fov of the ms.
-        If no external path is provided a call is done to the gsm to retrieve
-        a list.
-        return both the created sourcelist and a boolean to signal if an
-        external sourcelist has been retrieved.
-        """
-        # If a (local) sourcelist is received use it else
-        # construct one
-        if source_list_path_extern == "":
-            #create a temporary file to contain the skymap
-            source_list = sourcedb_target_path + ".temp"
-            if self._get_soucelist_from_gsm(
-                    concatenated_measurement_set,
-                    source_list, monet_db_hostname, monet_db_port,
-                    monet_db_name, monet_db_user, monet_db_password,
-                    assoc_theta):
-                self.logger.error("failed creating skymodel")
-                return 1
-            append = False
-        else:
-            source_list = source_list_path_extern
-            append = False # Nicolas Should this be true or false? 
-            # later steps should not contain the original bootstrapping input
-
-        return source_list, append
-
-    def _create_source_db(self, source_list, sourcedb_target_path,
-                          working_directory, executable, append=False):
-        """
-        _create_source_db consumes a sourcelist text file and produces a 
-        source db (pyraptable).
-        If the append parameter is set to true. It expects an already existing
-        sourcedb on the supplied path and will then append the sources in
-        the list. typically used multiple iterations of the imaging pipeline, 
-        with self calibration 
-        """
-        #remove existing sourcedb if not appending
-        if (append == False) and os.path.isdir(sourcedb_target_path):
-            shutil.rmtree(sourcedb_target_path)
-            self.logger.debug("Removed existing sky model: {0}".format(
-                                            sourcedb_target_path))
-
-        # The command and parameters to be run
-        cmd = [executable, "in={0}".format(source_list),
-               "out={0}".format(sourcedb_target_path),
-               "format=<", # format according to Ger van Diepen
-               "append=true"] # Always set append flag: no effect on non exist
-                              # db
-
-        try:
-            with CatchLog4CPlus(working_directory,
-                 self.logger.name + "." + os.path.basename("makesourcedb"),
-                 os.path.basename(executable)
-            ) as logger:
-                catch_segfaults(cmd, working_directory, self.environment,
-                                            logger, cleanup=None)
-
-        except subprocess.CalledProcessError as called_proc_error:
-            self.logger.error("Execution of external failed:")
-            self.logger.error(" ".join(cmd))
-            self.logger.error("exception details:")
-            self.logger.error(str(called_proc_error))
-            return 1
-
-        return 0
-
-
-    def _field_of_view(self, measurement_set, alpha_one=None):
-        """
-        _field_of_view calculates the fov, which is dependend on the
-        station type, location and mode:
-        For details see:        
-        http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofar
-        
-        """
-        # Open the ms
-        try:
-            table = pt.table(measurement_set)
-
-            # Get antenna name and observation mode
-            antenna = pt.table(table.getkeyword("ANTENNA"))
-            antenna_name = antenna.getcell('NAME', 0)
-
-            observation = pt.table(table.getkeyword("OBSERVATION"))
-            antenna_set = observation.getcell('LOFAR_ANTENNA_SET', 0)
-            observation.close()
-
-            #static parameters for the station diameters ref (1)     
-            hba_core_diameter = 30.8
-            hba_remote_diameter = 41.1
-            lba_inner = 32.3
-            lba_outer = 81.3
-
-            #use measurement set information to assertain antenna diameter
-            station_diameter = None
-            if antenna_name.count('HBA'):
-                if antenna_name.count('CS'):
-                    station_diameter = hba_core_diameter
-                elif antenna_name.count('RS'):
-                    station_diameter = hba_remote_diameter
-            elif antenna_name.count('LBA'):
-                if antenna_set.count('INNER'):
-                    station_diameter = lba_inner
-                elif antenna_set.count('OUTER'):
-                    station_diameter = lba_outer
-                elif antenna_set.count('SPARSE'):
-                    station_diameter = lba_outer
-
-            #raise exception if the antenna is not of a supported type
-            if station_diameter == None:
-                self.logger.error(
-                        'Unknown antenna type for antenna: {0} , {1}'.format(
-                                  antenna_name, antenna_set))
-                raise Exception(
-                        "Unknown antenna type encountered in Measurement set")
-
-            #Get the wavelength
-            spectral_window_table = pt.table(table.getkeyword(
-                                                            "SPECTRAL_WINDOW"))
-            freq = float(spectral_window_table.getcell("REF_FREQUENCY", 0))
-            wave_length = pt.taql('CALC C()')[0] / freq
-
-            # Now calculate the FOV see ref (1)
-            # alpha_one is a magic parameter: The value 1.3 is representative  
-            # for a WSRT dish, where it depends on the dish illumination
-            # For LOFAR it will depend on the final tapering of the station.
-            # For the LBA probably no tapering will be applied. In that case it
-            # is expected that the value of a1 will turn out to be between 1.2 
-            # and 1.4. For reference, the value for the LOFAR Initial Test 
-            # Station (ITS) was 1.42. 
-            alpha_one = 1.3
-
-            #alpha_one is in radians so transform to degrees for output
-            fwhm = alpha_one * (wave_length / station_diameter) * (180 /
-                                                                    math.pi)
-            fov = fwhm / 2.0
-        finally:
-            antenna.close()
-            table.close()
-
-        return fov
-
-
-    def _create_parmdb(self, parmdb_executable, target_dir_path):
-        """
-        _create_parmdb, creates a parmdb_executable at the target_dir_path using 
-        the suplied executable. Does not test for existence of target parent dir       
-        returns 1 if parmdb_executable failed 0 otherwise
-        """
-        # Format the template string by inserting the target dir
-        formatted_template = _TEMPLATE_PARMDB.format(target_dir_path)
-        try:
-            # Spawn a subprocess and connect the pipelines
-            parmdbm_process = subprocess.Popen(
-                parmdb_executable,
-                stdin=subprocess.PIPE,
-                stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE
-            )
-            # Send formatted template on stdin
-            sout, serr = communicate_returning_strings(parmdbm_process,input=formatted_template)
-
-            # Log the output
-            log_process_output("parmdbm", sout, serr, self.logger)
-        except OSError as oserror:
-            self.logger.error("Failed to spawn parmdbm: {0}".format(
-                                                            str(oserror)))
-            return 1
-
-        return 0
-
-
-    def _create_parmdb_for_timeslices(self, parmdb_executable, slice_paths,
-                                       suffix):
-        """
-        _create_parmdb_for_timeslices creates a paramdb for each of the
-        supplied time slices. The paramdb path = input path + suffix.
-        returns 0 on succes 1 on failure:
-        """
-        parmdbms = []
-        for slice_path in slice_paths:
-            #Create the paths based on the 'source ms'
-            ms_parmdb_path = slice_path + suffix
-            parmdbms.append(ms_parmdb_path)
-            #call parmdb return failure if a single create failed 
-            if self._create_parmdb(parmdb_executable, ms_parmdb_path) != 0:
-                return None
-
-        return parmdbms
-
-
-
-    def _create_monet_db_connection(self, hostname, database, username,
-                                    password, port):
-        """
-        Create and return a monat db connection. Return None if the creation 
-        failed and log the error. Returns the connection if succeed.
-        """
-        try:
-            conn = db.connect(hostname=hostname, database=database,
-                                       username=username, password=password,
-                                       port=port)
-        except db.Error as dberror:
-            self.logger.error("Failed to create a monetDB connection: "
-                              "{0}".format(str(dberror)))
-            raise dberror
-
-        return conn
-
-
-    def _get_ra_and_decl_from_ms(self, measurement_set):
-        """
-        This function uses pyrap to read the ra and declanation from a 
-        measurement set (used by expected_fluxes_in_fov). This is a position 
-        in the sky. These values are stored in the field.phase_dir in the first
-        row. All exceptions thrown are caught and logged, return None if reading
-        failed
-        """
-
-        table = None;
-        field = None;
-        ra_and_decl = None;
-
-        try:
-            # open the ms, get the phase direction
-            table = pt.table(measurement_set)
-            field = pt.table(table.getkeyword("FIELD"))
-            ra_and_decl = field.getcell("PHASE_DIR", 0)[0]
-
-        except Exception as exception:
-            #catch all exceptions and log
-            self.logger.error("Error loading FIELD/PHASE_DIR from "
-                              "measurementset {0} : {1}".format(measurement_set,
-                                                                str(exception)))
-            raise exception
-
-        finally:
-            if field is not None:
-                field.close()
-            if table is not None:
-                table.close()
-
-        # Return the ra and decl
-        if ra_and_decl is None:
-            return None
-        if len(ra_and_decl) != 2:
-            self.logger.error(
-                    "returned PHASE_DIR data did not contain two values")
-            return None
-
-        return (ra_and_decl[0], ra_and_decl[1])
-
-
-    def _get_soucelist_from_gsm(self, measurement_set,
-                    sourcelist, monet_db_host, monet_db_port, monet_db_name,
-                    monet_db_user, monet_db_password, assoc_theta=None):
-        """
-        Create a bbs sky model. Based on the measurement (set) suplied
-        The skymap is created at the sourcelist
-        """
-        # Create monetdb connection
-        conn = self._create_monet_db_connection(monet_db_host, monet_db_name,
-                 monet_db_user, monet_db_password, monet_db_port)
-        self.logger.debug("Connected to monet db at: {0}:{1}  {2}".format(
-                monet_db_host, monet_db_port, monet_db_name))
-
-        # get position of the target in the sky
-        (ra_c, decl_c) = self._get_ra_and_decl_from_ms(measurement_set)
-        self.logger.debug("ra and dec from measurement set: {0}, {1}".format(
-                    ra_c, decl_c))
-
-        # Get the Fov: sources in this fov should be included in the skumodel
-        fov_radius = self._field_of_view(measurement_set)
-        self.logger.debug(
-            "Using the folowing calculated field of view: {0}".format(
-                fov_radius))
-
-        # !!magic constant!! This value is calculated based on        
-        # communications with Bart Sheers
-        if assoc_theta == None:
-            assoc_theta = 90.0 / 3600
-        try:
-            # Transform the ra and decl to rad
-            ra_c = float(ra_c) * (180 / math.pi)
-            if ra_c < 0:  #gsm utils break when using negative ra_c ergo add 360
-                ra_c += 360.0
-            decl_c = float(decl_c) * (180 / math.pi)
-            self.logger.debug("external call to gsm module:")
-            self.logger.debug("gsm.expected_fluxes_in_fov(conn, {0} , {1}, {2}, {3}, {4}, {5})".format(
-                ra_c, decl_c, float(fov_radius), float(assoc_theta), sourcelist, "storespectraplots=False"))
-
-            gsm.expected_fluxes_in_fov(conn, ra_c ,
-                        decl_c, float(fov_radius),
-                        float(assoc_theta), sourcelist,
-                        storespectraplots=False)
-            self.logger.debug(gsm.__file__)
-
-        except Exception as exception:
-            self.logger.error("expected_fluxes_in_fov raise exception: " +
-                              str(exception))
-            return 1
-
-        # validate the retrieve sourcelist
-        fp = open(sourcelist)
-        sourcelist_corrected = self._validate_and_correct_sourcelist(fp.read())
-        fp.close()
-
-        if sourcelist_corrected != None:
-            self.logger.debug("Found duplicates in the sourcelist!")
-            self.logger.debug("Creating a new sourcelist")
-            #if a corrected sourcelist is created.
-            # move original sourcelist
-            shutil.move(sourcelist, sourcelist + "_with_duplicates")
-            # write correcte sourcelist at that location
-            fp = open(sourcelist, "w",)
-            fp.write(sourcelist_corrected)
-            self.logger.debug("Moved sourcelist and create a new sourcelist")
-            fp.close()
-        else:
-            self.logger.debug("Sourcelist did not contain duplicates")
-        return 0
-
-    def _validate_and_correct_sourcelist(self, sourcelist):
-        """
-        Create a sourcelist with non duplicate entries based on the
-        supplied sourcelist
-        Return None of no duplicate found        
-        """
-        all_lines = sourcelist.split("\n")
-        header = ""
-        all_entries_list = []
-        for line in all_lines:
-            #skip the whiteline
-            if len(line) == 0:
-                continue
-            # get the header
-            if line[0] == "#":
-                header = line
-                continue
-            # unpack the values
-            all_entries_list.append(line.split(","))
-
-        # Get the names for the entries
-        entrie_names = []
-        for entrie in all_entries_list:
-            entrie_names.append(entrie[0]) #name is first index in entrie
-
-        #enumerate over all names-1
-        duplicate_entry_idx = 0
-        for idx, name in enumerate(entrie_names[:-1]):
-            if name in entrie_names[idx + 1:]:
-                # If duplicate change current entrie to unique name
-                entrie_names[idx] = name + "_duplicate_{0}".format(duplicate_entry_idx)
-                duplicate_entry_idx += 1
-
-        # now put back the possible changed name
-        for entrie, entrie_name in zip(all_entries_list,
-                                entrie_names) :
-            entrie[0] = entrie_name
-
-        # Write the new sourcelist if we found duplicate entries!
-        if duplicate_entry_idx > 0:
-            new_lines = []
-            # add header
-            new_lines.append(header)
-            # empty line
-            new_lines.append("")
-            # entries with non duplicate names
-            for entrie in all_entries_list:
-                new_lines.append(",".join(entrie))
-            # return the sourcelist
-            return "\n".join(new_lines)
-
-        return None
-
-    def _add_dbs_to_ms(self, concatenated_measurement_set, sourcedb_target_path,
-                            parmdbs_path, major_cycle):
-        """
-        Add the in this recipe created sourcedb and instrument table(parmdb)
-        to the local measurementset.
-        """
-        self.logger.info("Adding sourcemodel and instrument model to output ms.")
-        # Create the base meta information directory        
-        meta_directory = concatenated_measurement_set + "_selfcal_information"
-        if not os.path.exists(meta_directory):
-             os.makedirs(meta_directory)
-
-        # Cycle dir
-        cycle_directory = os.path.join(meta_directory,
-                                "cycle_" + str(major_cycle))
-        if not os.path.exists(cycle_directory):
-             os.makedirs(cycle_directory)
-
-        #COpy the actual data. parmdbs_path is a list!
-        sourcedb_directory = os.path.join(cycle_directory,
-               os.path.basename(sourcedb_target_path))
-        if os.path.exists(sourcedb_directory):
-            shutil.rmtree(sourcedb_directory)  # delete dir to assure copy succeeds
-        shutil.copytree(sourcedb_target_path, sourcedb_directory)
-
-        #parmdbs_path is a list!
-        for parmdb_entry in parmdbs_path:
-            try:
-                parmdb_directory = os.path.join(cycle_directory,
-                    os.path.basename(parmdb_entry))
-                # delete dir to assure copy succeeds
-                if os.path.exists(parmdb_directory):
-                    shutil.rmtree(parmdb_directory)
-                shutil.copytree(parmdb_entry, parmdb_directory)
-            except:
-                self.logger.warn("Failed copying parmdb:")
-                self.logger.warn(parmdb_entry)
-                continue    # slices might be missing, not an exit error
-
-
-if __name__ == "__main__":
-    # args contain information regarding to the logging server
-    _jobid, _jobhost, _jobport = sys.argv[1:4]
-    sys.exit(imager_create_dbs(
-        _jobid, _jobhost, _jobport).run_with_stored_arguments())
-
-
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_finalize.py b/CEP/Pipeline/recipes/sip/nodes/imager_finalize.py
deleted file mode 100644
index 6b5c0afce36..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_finalize.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#                                                         LOFAR IMAGING PIPELINE
-#
-#                                                           imager_finalize
-#                                                            Wouter Klijn 2012
-#                                                           klijn@astron.nl
-# ------------------------------------------------------------------------------
-
-import sys
-import subprocess
-import os
-import tempfile
-import shutil
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.utilities import log_time, create_directory
-import lofar.addImagingInfo as addimg
-import pyrap.images as pim
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.data_map import DataMap
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofar.common.subprocess_utils import communicate_returning_strings
-
-import urllib.request, urllib.error, urllib.parse
-import lofarpipe.recipes.helpers.MultipartPostHandler as mph
-
-class imager_finalize(LOFARnodeTCP):
-    """
-    This script performs the folowing functions:
-    
-    1. Add the image info to the casa image:
-       addimg.addImagingInfo (imageName, msNames, sourcedbName, minbl, maxbl)
-    2. Convert the image to hdf5 and fits image
-    3. Filling of the HDF5 root group
-    4. Export fits image to msss image server
-    5. Export sourcelist to msss server, copy the sourcelist to hdf5 location
-    6. Return the outputs
-    """
-    def run(self, awimager_output, ms_per_image, sourcelist, target,
-            output_image, minbaseline, maxbaseline, processed_ms_dir,
-            fillrootimagegroup_exec, environment, sourcedb):
-        self.environment.update(environment)
-        """
-        :param awimager_output: Path to the casa image produced by awimager 
-        :param ms_per_image: The X (90) measurements set scheduled to 
-            create the image
-        :param sourcelist: list of sources found in the image 
-        :param target: <unused>
-        :param minbaseline: Minimum baseline used for the image 
-        :param maxbaseline: largest/maximum baseline used for the image
-        :param processed_ms_dir: The X (90) measurements set actually used to 
-            create the image
-        :param fillrootimagegroup_exec: Executable used to add image data to
-            the hdf5 image  
-                 
-        :rtype: self.outputs['hdf5'] set to "succes" to signal node succes
-        :rtype: self.outputs['image'] path to the produced hdf5 image
-        """
-        with log_time(self.logger):
-            ms_per_image_map = DataMap.load(ms_per_image)
-
-            # *****************************************************************
-            # 1. add image info                      
-            # Get all the files in the processed measurement dir
-            file_list = os.listdir(processed_ms_dir)
-            # TODO: BUG!! the meta data might contain files that were copied
-            # but failed in imager_bbs 
-            processed_ms_paths = []
-            for item in ms_per_image_map:
-                path = item.file
-                ms_file_name = os.path.split(path)[1]
-                #if the ms is in the processed dir (additional check)
-                if (ms_file_name in file_list):
-                    # save the path
-                    processed_ms_paths.append(os.path.join(processed_ms_dir,
-                                                            ms_file_name))
-            #add the information the image
-            try:
-                addimg.addImagingInfo(awimager_output, processed_ms_paths,
-                    sourcedb, minbaseline, maxbaseline)
-
-            except Exception as error:
-                self.logger.warn("addImagingInfo Threw Exception:")
-                self.logger.warn(error)
-                # Catch raising of already done error: allows for rerunning
-                # of the recipe
-                if "addImagingInfo already done" in str(error):
-                    pass
-                else:
-                    raise Exception(error) 
-                #The majority of the tables is updated correctly
-
-            # ***************************************************************
-            # 2. convert to hdf5 image format
-            output_directory = None
-            pim_image = pim.image(awimager_output)
-            try:
-                self.logger.info("Saving image in HDF5 Format to: {0}" .format(
-                                output_image))
-                # Create the output directory
-                output_directory = os.path.dirname(output_image)
-                create_directory(output_directory)
-                # save the image
-                pim_image.saveas(output_image, hdf5=True)
-
-            except Exception as error:
-                self.logger.error(
-                    "Exception raised inside pyrap.images: {0}".format(
-                                                                str(error)))
-                raise error
-
-            # Convert to fits
-            # create target location
-            fits_output = output_image + ".fits"
-            # To allow reruns a possible earlier version needs to be removed!
-            # image2fits fails if not done!!
-            if os.path.exists(fits_output):
-                os.unlink(fits_output)
-
-            try:
-                temp_dir = tempfile.mkdtemp(suffix=".%s" % (os.path.basename(__file__),))
-                with CatchLog4CPlus(temp_dir,
-                    self.logger.name + '.' + os.path.basename(awimager_output),
-                            "image2fits") as logger:
-                    catch_segfaults(["image2fits", '-in', awimager_output,
-                                                 '-out', fits_output],
-                                    temp_dir, self.environment, logger)
-            except Exception as excp:
-                self.logger.error(str(excp))
-                return 1
-            finally:
-                shutil.rmtree(temp_dir)
-
-            # ****************************************************************
-            # 3. Filling of the HDF5 root group
-            command = [fillrootimagegroup_exec, output_image]
-            self.logger.info(" ".join(command))
-            #Spawn a subprocess and connect the pipes
-            proc = subprocess.Popen(
-                        command,
-                        stdin=subprocess.PIPE,
-                        stdout=subprocess.PIPE,
-                        stderr=subprocess.PIPE)
-
-            (stdoutdata, stderrdata) = communicate_returning_strings(proc)
-
-            exit_status = proc.returncode
-            self.logger.info(stdoutdata)
-            self.logger.info(stderrdata)
-
-            #if copy failed log the missing file
-            if  exit_status != 0:
-                self.logger.error("Error using the fillRootImageGroup command"
-                    "see above lines. Exit status: {0}".format(exit_status))
-
-                return 1
-
-            # *****************************************************************
-            # 4 Export the fits image to the msss server
-            url = "http://tanelorn.astron.nl:8000/upload"
-            try:
-                self.logger.info("Starting upload of fits image data to server!")
-                opener = urllib.request.build_opener(mph.MultipartPostHandler)
-                filedata = {"file": open(fits_output, "rb")}
-                opener.open(url, filedata, timeout=2)
-
-                # HTTPError needs to be caught first.
-            except urllib.error.HTTPError as httpe:
-                self.logger.warn("HTTP status is: {0}".format(httpe.code))
-                self.logger.warn("failed exporting fits image to server")
-
-            except urllib.error.URLError as urle:
-                self.logger.warn(str(urle.reason))
-                self.logger.warn("failed exporting fits image to server")
-
-            except Exception as exc:
-                self.logger.warn(str(exc))
-                self.logger.warn("failed exporting fits image to server")
-
-
-            # *****************************************************************
-            # 5. export the sourcelist to the msss server
-            url = "http://tanelorn.astron.nl:8000/upload_srcs"
-            try:
-                # Copy file to output location
-                new_sourcelist_path = output_image + ".sourcelist"
-                if os.path.exists(new_sourcelist_path):
-                    os.unlink(new_sourcelist_path)
-
-                shutil.copy(sourcelist, new_sourcelist_path)
-                self.logger.info(
-                            "Starting upload of sourcelist data to server!")
-                opener = urllib.request.build_opener(mph.MultipartPostHandler)
-                filedata = {"file": open(new_sourcelist_path, "rb")}
-                opener.open(url, filedata, timeout=2)
-
-                # HTTPError needs to be caught first.
-            except urllib.error.HTTPError as httpe:
-                self.logger.warn("HTTP status is: {0}".format(httpe.code))
-                self.logger.warn("failed exporting sourcelist to server")
-
-            except urllib.error.URLError as urle:
-                self.logger.warn(str(urle.reason))
-                self.logger.warn("failed exporting sourcelist image to server")
-
-            except Exception as exc:
-                self.logger.warn(str(exc))
-                self.logger.warn("failed exporting sourcelist image to serve")
-
-
-            self.outputs["hdf5"] = "succes"
-            self.outputs["image"] = output_image
-
-        return 0
-
-
-if __name__ == "__main__":
-
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(imager_finalize(_JOBID, _JOBHOST,
-                             _JOBPORT).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_prepare.py b/CEP/Pipeline/recipes/sip/nodes/imager_prepare.py
deleted file mode 100644
index 8a817de1ef5..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_prepare.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# LOFAR IMAGING PIPELINE
-# Prepare phase node
-# Wouter Klijn
-# 2012
-# klijn@astron.nl
-# -----------------------------------------------------------------------------
-
-import sys
-import shutil
-import os
-import subprocess
-import copy
-import pyrap.tables as pt   # order of pyrap import influences the type
-                            # conversion binding
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.pipelinelogging import log_time
-from lofarpipe.support.utilities import patch_parset
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.lofarnode import  LOFARnodeTCP
-from lofarpipe.support.utilities import create_directory
-from lofarpipe.support.data_map import DataMap
-from lofarpipe.support.subprocessgroup import SubProcessGroup
-from lofarpipe.recipes.helpers.data_quality import run_rficonsole, filter_bad_stations
-from lofar.common.subprocess_utils import communicate_returning_strings
-
-# Some constant settings for the recipe
-_time_slice_dir_name = "time_slices"
-
-
-class imager_prepare(LOFARnodeTCP):
-    """
-    Steps perform on the node:
-    
-    1. Create directories and assure that they are empty.
-    2. Collect the Measurement Sets (MSs): copy to the current node.
-    3. Start dppp: Combines the data from subgroups into single timeslice.
-    4. Flag rfi (toggle by parameter)
-    5. Add addImagingColumns to the casa ms.
-    6. Filter bad stations. Find station with repeated bad measurement and
-       remove these completely from the dataset.
-    7. Add measurmentset tables
-    8. Perform the (virtual) concatenation of the timeslices
-    """
-    def run(self, environment, parset, working_dir, processed_ms_dir,
-            ndppp_executable, output_measurement_set,
-            time_slices_per_image, subbands_per_group, input_ms_mapfile,
-            asciistat_executable, statplot_executable, msselect_executable,
-            rficonsole_executable, do_rficonsole, add_beam_tables, globalfs):
-        """
-        Entry point for the node recipe
-        """
-        self.environment.update(environment)
-        self.globalfs = globalfs
-        with log_time(self.logger):
-            input_map = DataMap.load(input_ms_mapfile)
-
-            #******************************************************************
-            # 1. Create the directories used in this recipe
-            create_directory(processed_ms_dir)
-            create_directory(os.path.dirname(output_measurement_set))
-
-            # time slice dir_to_remove: assure empty directory: Stale data
-            # is problematic for dppp
-            time_slice_dir = os.path.join(working_dir, _time_slice_dir_name)
-            create_directory(time_slice_dir)
-            for root, dirs, files in os.walk(time_slice_dir):
-                for file_to_remove in files:
-                    os.unlink(os.path.join(root, file_to_remove))
-                for dir_to_remove in dirs:
-                    shutil.rmtree(os.path.join(root, dir_to_remove))
-            self.logger.debug("Created directory: {0}".format(time_slice_dir))
-            self.logger.debug("and assured it is empty")
-
-            #******************************************************************
-            # 2. Copy the input files
-            # processed_ms_map will be the map containing all the 'valid'
-            # input ms
-            processed_ms_map = self._copy_input_files(
-                            processed_ms_dir, input_map)
-
-            #******************************************************************
-            # 3. run dppp: collect frequencies into larger group
-            time_slices_path_list = \
-                self._run_dppp(working_dir, time_slice_dir,
-                    time_slices_per_image, processed_ms_map, subbands_per_group,
-                    processed_ms_dir, parset, ndppp_executable)
-
-            # If no timeslices were created, bail out with exit status 1
-            if len(time_slices_path_list) == 0:
-                self.logger.error("No timeslices were created.")
-                self.logger.error("Exiting with error state 1")
-                return 1
-
-            self.logger.debug(
-                    "Produced time slices: {0}".format(time_slices_path_list))
-
-            #***********************************************************
-            # 4. run rfi_concole: flag datapoints which are corrupted
-            if (do_rficonsole):
-                run_rficonsole(rficonsole_executable, time_slice_dir,
-                                 time_slices_path_list, self.logger,
-                                self.resourceMonitor )
-
-            #******************************************************************
-            # 5. Add imaging columns to each timeslice
-            # ndppp_executable fails if not present
-            for time_slice_path in time_slices_path_list:
-                pt.addImagingColumns(time_slice_path)
-                self.logger.debug(
-                "Added imaging columns to time_slice: {0}".format(
-                                                            time_slice_path))
-
-            #*****************************************************************
-            # 6. Filter bad stations
-            time_slice_filtered_path_list = filter_bad_stations(
-                time_slices_path_list, asciistat_executable,
-                statplot_executable, msselect_executable,
-                self.logger, self.resourceMonitor)
-
-            #*****************************************************************
-            # 7. Add measurementtables
-            if add_beam_tables:
-                self._add_beam_tables(time_slice_filtered_path_list)
-
-            #******************************************************************
-            # 8. Perform the (virtual) concatenation of the timeslices
-            self._concat_timeslices(time_slice_filtered_path_list,
-                                    output_measurement_set)
-
-            # *****************************************************************
-            # Write the actually used ms for the created dataset to the input 
-            # mapfile
-            processed_ms_map.save(input_ms_mapfile)
-
-            # return
-            self.outputs["time_slices"] = \
-                time_slices_path_list
-
-        return 0
-
-    def _add_beam_tables(self, time_slices_path_list):
-        beamtable_proc_group = SubProcessGroup(self.logger)
-        for ms_path in time_slices_path_list:
-            self.logger.debug("makebeamtables start")
-            cmd_string = "makebeamtables ms={0} overwrite=true".format(ms_path)
-            self.logger.debug(cmd_string)
-            beamtable_proc_group.run(cmd_string)
-
-        if beamtable_proc_group.wait_for_finish() != None:
-            # TODO: Exception on error: make time_slices_path_list a mapfile
-            raise Exception("an makebeamtables run failed!")
-
-        self.logger.debug("makebeamtables finished")
-
-    def _copy_input_files(self, processed_ms_dir, input_map):
-        """
-        Collect all the measurement sets in a single directory:
-        The measurement sets are located on different nodes on the cluster.
-        This function collects all the file in the input map in the
-        processed_ms_dir Return value is a set of missing files
-        """
-        processed_ms_map = copy.deepcopy(input_map)
-        # loop all measurement sets
-        for input_item, processed_item in zip(input_map, processed_ms_map):
-            # fill the copied item with the correct data
-            processed_item.host = self.host
-            processed_item.file = os.path.join(
-                    processed_ms_dir, os.path.basename(input_item.file))
-
-            stderrdata = None
-            # If we have to skip this ms
-            if input_item.skip == True:
-                exit_status = 1  
-                stderrdata = "SKIPPED_FILE"
-
-            else:
-                # use cp the copy if machine is the same ( localhost) 
-                # make sure data is in the correct directory. for now:
-                # working_dir/[jobname]/subbands
-                # construct copy command
-                command = ["rsync", "-r", "{0}:{1}".format(
-                                input_item.host, input_item.file),
-                                   "{0}".format(processed_ms_dir)]
-                if self.globalfs or input_item.host == "localhost":
-                    # symlinking is enough
-                    command = ["ln", "-sf", "{0}".format(input_item.file),
-                                      "-t", "{0}".format(processed_ms_dir)]
-
-                self.logger.debug("executing: " + " ".join(command))
-
-                # Spawn a subprocess and connect the pipes
-                # The copy step is performed 720 at once in that case which 
-                # might saturate the cluster.
-                copy_process = subprocess.Popen(
-                            command,
-                            stdin = subprocess.PIPE,
-                            stdout = subprocess.PIPE,
-                            stderr = subprocess.PIPE)
-
-                # Wait for finish of copy inside the loop: enforce single tread
-                # copy
-                (stdoutdata, stderrdata) = communicate_returning_strings(copy_process)
-
-                exit_status = copy_process.returncode
-
-            # if copy failed log the missing file and update the skip fields
-            if  exit_status != 0:
-                input_item.skip = True
-                processed_item.skip = True
-                self.logger.warning(
-                            "Failed loading file: {0}".format(input_item.file))
-                self.logger.warning(stderrdata)
-
-            self.logger.debug(stdoutdata)
-
-        return processed_ms_map
-
-
-    def _dppp_call(self, working_dir, ndppp, cmd, environment):
-        """
-        Muckable function running the dppp executable.
-        Wraps dppp with catchLog4CPLus and catch_segfaults
-        """
-        with CatchLog4CPlus(working_dir, self.logger.name +
-             "." + os.path.basename("imager_prepare_ndppp"),
-                  os.path.basename(ndppp)) as logger:
-            catch_segfaults(cmd, working_dir, environment,
-                   logger, cleanup = None, usageStats=self.resourceMonitor)
-
-    def _get_nchan_from_ms(self, file):
-        """
-        Wrapper for pt call to retrieve the number of channels in a ms
-
-        Uses Pyrap functionality throws 'random' exceptions.
-        """
-
-        # open the datasetassume same nchan for all sb
-        table = pt.table(file)  # 
-           
-        # get the data column, get description, get the 
-        # shape, first index returns the number of channels
-        nchan = str(pt.tablecolumn(table, 'DATA').getdesc()["shape"][0])
-
-        return nchan
-
-    def _run_dppp(self, working_dir, time_slice_dir_path, slices_per_image,
-                  processed_ms_map, subbands_per_image, collected_ms_dir_name, 
-                  parset, ndppp):
-        """
-        Run NDPPP:
-        Create dir for grouped measurements, assure clean workspace
-        Call with log for cplus and catch segfaults. Pparameters are
-        supplied in parset
-        """
-        time_slice_path_list = []
-        for idx_time_slice in range(slices_per_image):
-            start_slice_range = idx_time_slice * subbands_per_image
-            end_slice_range = (idx_time_slice + 1) * subbands_per_image
-            output_ms_name = "time_slice_{0}.dppp.ms".format(idx_time_slice)
-
-            # construct time slice name
-            time_slice_path = os.path.join(time_slice_dir_path,
-                                         output_ms_name)
-
-            # convert the datamap to a file list: Add nonfalid entry for
-            # skipped files: ndppp needs the incorrect files there to allow 
-            # filling with zeros           
-            ndppp_input_ms = []
-            nchan_known = False
-
-            for item in processed_ms_map[start_slice_range:end_slice_range]:
-                if item.skip:
-                    ndppp_input_ms.append("SKIPPEDSUBBAND")
-                else:
-                    # From the first non skipped filed get the nchan
-                    if not nchan_known:
-                        try:
-                            # We want toAutomatically average the number 
-                            # of channels in the output to 1, get the current
-                            # nr of channels
-                            nchan_input = self._get_nchan_from_ms(item.file)
-                            nchan_known = True
-
-                        # corrupt input measurement set
-                        except Exception as e:
-                            self.logger.warn(str(e))
-                            item.skip = True
-                            ndppp_input_ms.append("SKIPPEDSUBBAND")
-                            continue
-
-                    ndppp_input_ms.append(item.file)
-            
-            # if none of the input files was valid, skip the creation of the 
-            # timeslice all together, it will not show up in the timeslice 
-            # mapfile
-            if not nchan_known:
-                continue
-           
-            # TODO/FIXME: dependency on the step name!!!!
-            ndppp_nchan_key = "avg1.freqstep"  
-            
-            # Join into a single string list of paths.
-            msin = "['{0}']".format("', '".join(ndppp_input_ms))
-            
-            # Update the parset with computed parameters
-            patch_dictionary = {'uselogger': 'True',  # enables log4cplus
-                               'msin': msin,
-                               'msout': time_slice_path,
-                               ndppp_nchan_key:nchan_input}
-
-
-            nddd_parset_path = time_slice_path + ".ndppp.par"
-            try:
-                temp_parset_filename = patch_parset(parset, patch_dictionary)
-                shutil.copyfile(temp_parset_filename, nddd_parset_path)
-            # Remove the temp file
-            finally:
-                os.remove(temp_parset_filename)
-
-            try:
-                nddd_parset_path = time_slice_path + ".ndppp.par"
-                temp_parset_filename = patch_parset(parset, patch_dictionary)
-                shutil.copy(temp_parset_filename, nddd_parset_path)
-                self.logger.debug(
-                            "Wrote a ndppp parset with runtime variables:"
-                                  " {0}".format(nddd_parset_path))
-
-            except Exception as exception:
-                self.logger.error("failed loading and updating the " +
-                                  "parset: {0}".format(parset))
-                raise exception
-            # remove the temp file
-            finally:
-                os.unlink(temp_parset_filename)
-
-            # run ndppp
-            cmd = [ndppp, nddd_parset_path]
-
-            try:
-                # Actual dppp call to externals (allows mucking)
-                self._dppp_call(working_dir, ndppp, cmd, self.environment)
-                # append the created timeslice on succesfull run
-                time_slice_path_list.append(time_slice_path)
-
-            # On error the current timeslice should be skipped
-            # and the input ms should have the skip  set
-            except Exception as exception:
-                for item in processed_ms_map[start_slice_range:end_slice_range]:
-                    item.skip = True
-                self.logger.warning(str(exception))
-                continue
-
-        return time_slice_path_list
-
-    def _concat_timeslices(self, group_measurements_collected,
-                                    output_file_path):
-        """
-        Msconcat to combine the time slices in a single ms:
-        It is a virtual ms, a ms with symbolic links to actual data is created!
-        """
-        pt.msconcat(group_measurements_collected,
-                               output_file_path, concatTime = True)
-        self.logger.debug("Concatenated the files: {0} into the single measure"
-            "mentset: {1}".format(
-                ", ".join(group_measurements_collected), output_file_path))
-
-
-
-if __name__ == "__main__":
-    _jobid, _jobhost, _jobport = sys.argv[1:4]
-    sys.exit(
-        imager_prepare(_jobid, _jobhost, _jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/imager_source_finding.py b/CEP/Pipeline/recipes/sip/nodes/imager_source_finding.py
deleted file mode 100644
index 239183f5ce2..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/imager_source_finding.py
+++ /dev/null
@@ -1,275 +0,0 @@
-
-import sys
-import os
-import shutil
-
-from lofar.parameterset import parameterset
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.utilities import create_directory
-
-
-class imager_source_finding(LOFARnodeTCP):
-    """
-    The imager_source_finding recipe. In this script a number of pyBDSM call is 
-    made. pyBDSM is a source finder which produces a list of sources and images
-    with those sources removed.
-    By using multiple iterations weak sources can be found and indexed. 
-    
-    For (max iter) or (no sources found):
-    
-    1. Select correct input image and parset based on the current iteration
-    2. Convert the string values retrieved from the parset to python types
-    3. Start pybdsm
-    4. Export a sourcelist if sources found and save the image with source
-       substracted
-    
-    And then:
-    
-    5. Combine the source lists into a single large sourcelist
-    6. Create sourcedb based on the sourcelist and return this
-       
-    """
-    def run(self, input_image, bdsm_parameter_run1_path,
-            bdsm_parameter_run2x_path, catalog_output_path, image_output_path,
-            sourcedb_target_path, environment, working_directory,
-            create_sourcdb_exec):
-        """
-        :param input_image: image to look for sources in
-        :param bdsm_parameter_run1_path: parset with bdsm parameters for the 
-               first run
-        :param bdsm_parameter_run2x_path: second ron bdsm parameters
-        :param catalog_output_path: Path to full list of sources found
-        :param image_output_path: Path to fits image with all sources 
-               substracted
-        :param sourcedb_target_path: Path to store the sourcedb created from 
-            containing all the found sources
-        :param environment: environment for runwithlog4cplus
-        :param working_directory: Working dir
-        :param create_sourcdb_exec: Path to create sourcedb executable 
-        
-        :rtype: self.outputs['source_db'] sourcedb_target_path
-        
-        """
-
-        #******************************************************************
-        # 0. Create the directories used in this recipe
-        create_directory(working_directory)
-
-        import lofar.bdsm as bdsm#@UnresolvedImport
-        self.logger.info("Starting imager_source_finding")
-        self.environment.update(environment)
-        # default frequency is None (read from image), save for later cycles.
-        # output of pybdsm forgets freq of source image
-        frequency = None
-        # Output of the for loop: n iterations and any source found
-        n_itter_sourcefind = None
-        sources_found = False
-        max_sourcefind_itter = 5  # TODO: maximum itter is a magic value
-        for idx in range(max_sourcefind_itter):
-            # ******************************************************************
-            # 1. Select correct input image
-            # The first iteration uses the input image, second and later use the
-            # output of the previous iteration. The 1+ iteration have a 
-            # seperate parameter set. 
-            if idx == 0:
-                input_image_local = input_image # input_image_cropped
-                image_output_path_local = image_output_path + "_0"
-                bdsm_parameter_local = parameterset(bdsm_parameter_run1_path)
-            else:
-                input_image_local = image_output_path + "_{0}".format(
-                                                                str(idx - 1))
-                image_output_path_local = image_output_path + "_{0}".format(
-                                                                    str(idx))
-                bdsm_parameter_local = parameterset(bdsm_parameter_run2x_path)
-
-            # *****************************************************************
-            # 2. parse the parameters and convert to python if possible 
-            # this is needed for pybdsm
-            bdsm_parameters = {}
-            for key in list(bdsm_parameter_local.keys()):
-                parameter_value = bdsm_parameter_local.getStringVector(key)[0]
-                try:
-                    parameter_value = eval(parameter_value)
-                except:
-                    pass  #do nothing
-                bdsm_parameters[key] = parameter_value
-
-            # pybdsm needs its filename here, to derive the log location
-            bdsm_parameters["filename"] = input_image_local
-
-
-            # *****************************************************************
-            # 3. Start pybdsm
-            self.logger.debug(
-                "Starting sourcefinder bdsm on {0} using parameters:".format(
-                                                        input_image_local))
-            self.logger.debug(repr(bdsm_parameters))
-            img = bdsm.process_image(bdsm_parameters, frequency = frequency)
-
-            # Always export the catalog 
-            img.write_catalog(
-                outfile = catalog_output_path + "_{0}".format(str(idx)),
-                catalog_type = 'gaul', clobber = True,
-                format = "bbs", force_output = True)
-
-            # If no more matching of sources with gausians is possible (nsrc==0)
-            # break the loop
-            if img.nsrc == 0:
-                n_itter_sourcefind = idx
-                break
-
-            # We have at least found a single source!
-            self.logger.debug("Number of source found: {0}".format(
-                                                                img.nsrc))
-            # *****************************************************************
-            # 4. export the image 
-
-            self.logger.debug("Wrote list of sources to file at: {0})".format(
-                                                            catalog_output_path))
-            img.export_image(outfile = image_output_path_local,
-                                 img_type = 'gaus_resid', clobber = True,
-                                 img_format = "fits")
-            self.logger.debug("Wrote fits image with substracted sources"
-                                  " at: {0})".format(image_output_path_local))
-
-            # Save the frequency from image header of the original input file,
-            # This information is not written by pybdsm to the exported image
-            frequency = img.frequency
-
-
-        # if not set the maximum number of itteration us performed
-        if n_itter_sourcefind == None:
-            n_itter_sourcefind = max_sourcefind_itter
-
-        # ********************************************************************
-        # 5. The produced catalogs now need to be combined into a single list
-        # Call with the number of loops and the path to the files, only combine
-        # if we found sources
-        self.logger.debug(
-                "Writing source list to file: {0}".format(catalog_output_path))
-        self._combine_source_lists(n_itter_sourcefind, catalog_output_path)
-
-        # *********************************************************************
-        # 6. Convert sourcelist to sourcedb
-        self._create_source_db(catalog_output_path, sourcedb_target_path,
-            working_directory, create_sourcdb_exec, False)
-        # Assign the outputs
-        self.outputs["catalog_output_path"] = catalog_output_path
-        self.outputs["source_db"] = sourcedb_target_path
-        return 0
-
-    def _combine_source_lists(self, n_itter_sourcefind, catalog_output_path):
-        """
-        Parse  and concate the produces sourcelists, files are numbered using 
-        the sourcefind iteration. 
-        For all sourcefind itterations with sources produced:
-        
-        1. Open the file for this itteration
-        2. parse the files:
-        
-            a. get the format line
-            b. skip whiteline
-            c. collect all sources as strings
-        
-        3. Save the collected data:
-        
-            a. The format line (only a single formatter is need, same for each file)
-            b. add the sources
-            c. finish with an endl
-            
-        """
-        source_list_lines = []
-        format_line = None
-
-        # If no sources are found at all, n_itter_sourcefind == 0
-        # But we do need to create a combined sourcelist and read this list
-        if n_itter_sourcefind == 0:
-            n_itter_sourcefind = 1 # at least use the first empty bdsm output  
-
-        for idx_source_file in range(n_itter_sourcefind):
-            # *****************************************************************
-            # 1 . Open the file
-            filepointer = open(catalog_output_path + "_{0}".format(
-                                                            idx_source_file))
-            #**************************************************
-            # 2. Parse the files
-            #   a. Read the format line and save (same for all bdsm runs)
-            format_line = filepointer.readline()
-
-            #read the rest of the file
-            for line in filepointer.readlines():
-            #   b. if empty line (only endl)   
-                if len(line) == 1:
-                    continue
-            #   c. Collect the sources a strings
-                source_list_lines.append(line)
-            filepointer.close()
-
-        #**************************************************
-        #3. write the concatenated sourcelist to a file (the full catalog path)
-        filepointer = open(catalog_output_path, "w")
-        #   a. first the header
-        filepointer.write(format_line)
-        filepointer.write("\n")
-
-        #   b. then the sources
-        for line in source_list_lines:
-            filepointer.write(line)
-        #   c. Whiteline
-        filepointer.write("\n")
-        filepointer.close()
-        self.logger.debug("Wrote concatenated sourcelist to: {0}".format(
-                                                catalog_output_path))
-
-
-    def _create_source_db(self, source_list, sourcedb_target_path,
-                          working_directory, create_sourcdb_exec, append = False):
-        """
-        Convert a sourcelist to a sourcedb:
-        
-        1. Remove existing sourcedb if not appending (sourcedb fails else)
-        2. Call the sourcedb executable with the supplied parameters
-         
-        """
-        # *********************************************************************
-        # 1. remove existing sourcedb if not appending
-        if (append == False) and os.path.isdir(sourcedb_target_path):
-            shutil.rmtree(sourcedb_target_path)
-            self.logger.debug("Removed existing sky model: {0}".format(
-                                            sourcedb_target_path))
-
-        # *********************************************************************
-        # 2. The command and parameters to be run
-        cmd = [create_sourcdb_exec, "in={0}".format(source_list),
-               "out={0}".format(sourcedb_target_path),
-               "format=<", # format according to Ger van Diepen
-               "append=true"] # Always set append: no effect on non exist db
-        self.logger.info(' '.join(cmd))
-
-        try:
-            with CatchLog4CPlus(working_directory,
-                 self.logger.name + "." + os.path.basename("makesourcedb"),
-                 os.path.basename(create_sourcdb_exec)
-            ) as logger:
-                catch_segfaults(cmd, working_directory, self.environment,
-                                            logger, cleanup = None)
-
-        except Exception as exception:
-            self.logger.error("Execution of external failed:")
-            self.logger.error(" ".join(cmd))
-            self.logger.error("exception details:")
-            self.logger.error(str(exception))
-            return 1
-
-        return 0
-
-if __name__ == "__main__":
-    #sys.path.insert(0, "/usr/lib/pymodules/python2.6")  #matlib plot fix (might not be needed anymore)
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(imager_source_finding(_JOBID, _JOBHOST,
-                                   _JOBPORT).run_with_stored_arguments())
-    #del sys.path[0] # TODO: REMOVE FIRST ENTRY
-
diff --git a/CEP/Pipeline/recipes/sip/nodes/selfcal_awimager.py b/CEP/Pipeline/recipes/sip/nodes/selfcal_awimager.py
deleted file mode 100644
index 9b7136740cd..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/selfcal_awimager.py
+++ /dev/null
@@ -1,820 +0,0 @@
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# awimager
-# The awimager recipe creates based an image of the field of view. Based on
-# nine concatenated and measurementsets each spanning 10 subbands
-# The recipe contains two parts: The call to awimager
-# and secondairy some functionality that calculates settings (for awimager)
-# based on the information present in the measurement set
-# The calculated parameters are:
-#        1: The cellsize
-#        2: The npixels in a each of the two dimension of the image
-#        3. What columns use to determine the maximum baseline
-#        4. The number of projection planes
-# Wouter Klijn 2012
-# klijn@astron.nl
-# Nicolas Vilchez, 2014
-# vilchez@astron.nl
-# -----------------------------------------------------------------------------
-
-import sys
-import shutil
-import os.path
-import math
-import pyfits
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.pipelinelogging import log_time
-from lofarpipe.support.utilities import patch_parset
-from lofarpipe.support.utilities import get_parset
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.lofarexceptions import PipelineException
-import pyrap.tables as pt  # @UnresolvedImport
-from subprocess import CalledProcessError
-from lofarpipe.support.utilities import create_directory
-import pyrap.images as pim  # @UnresolvedImport
-from lofarpipe.support.parset import Parset
-import lofar.parmdb  # @UnresolvedImport
-import numpy as np
-
-
-class selfcal_awimager(LOFARnodeTCP):
-    def run(self, executable, environment, parset, working_directory,
-            output_image, concatenated_measurement_set, sourcedb_path,
-             mask_patch_size, autogenerate_parameters, specify_fov, fov, 
-             major_cycle, nr_cycles, perform_self_cal):
-        """
-        :param executable: Path to awimager executable
-        :param environment: environment for catch_segfaults (executable runner)
-        :param parset: parameters for the awimager,
-        :param working_directory: directory the place temporary files
-        :param output_image: location and filesname to story the output images
-          the multiple images are appended with type extentions
-        :param concatenated_measurement_set: Input measurement set
-        :param sourcedb_path: Path the the sourcedb used to create the image 
-          mask
-        :param mask_patch_size: Scaling of the patch around the source in the 
-          mask
-        :param autogenerate_parameters: Turns on the autogeneration of: 
-           cellsize, npix, wprojplanes, wmax, fov
-        :param fov: if  autogenerate_parameters is false calculate 
-           imageparameter (cellsize, npix, wprojplanes, wmax) relative to this 
-           fov
-        :param major_cycle: number of the self calibration cycle to determine 
-            the imaging parameters: cellsize, npix, wprojplanes, wmax, fov            
-        :param nr_cycles: The requested number of self cal cycles           
-        :param perform_self_cal: Bool used to control the selfcal functionality
-            or the old semi-automatic functionality       
-        :rtype: self.outputs["image"] The path to the output image
-        """
-        self.logger.info("Start selfcal_awimager node run:")
-        log4_cplus_name = "selfcal_awimager"
-        self.environment.update(environment)
-
-        with log_time(self.logger):
-            # Read the parameters as specified in the parset
-            parset_object = get_parset(parset)
-
-            # *************************************************************
-            # 1. Calculate awimager parameters that depend on measurement set
-            # and the parset
-            if perform_self_cal:
-                # Calculate awimager parameters that depend on measurement set
-                # and the parset              
-                self.logger.info(
-                   "Calculating selfcalibration parameters  ")
-                cell_size, npix, w_max, w_proj_planes, \
-                   UVmin, UVmax, robust, threshold =\
-                        self._get_selfcal_parameters(
-                            concatenated_measurement_set,
-                            parset, major_cycle, nr_cycles) 
-
-                self._save_selfcal_info(concatenated_measurement_set, 
-                                        major_cycle, npix, UVmin, UVmax)
-
-            else:
-                self.logger.info(
-                   "Calculating parameters.. ( NOT selfcalibration)")
-                cell_size, npix, w_max, w_proj_planes = \
-                    self._get_imaging_parameters(
-                            concatenated_measurement_set,
-                            parset,
-                            autogenerate_parameters,
-                            specify_fov,
-                            fov)
-
-            self.logger.info("Using autogenerated parameters; ")
-            self.logger.info(
-                 "Calculated parameters: cell_size: {0}, npix: {1}".format(
-                     cell_size, npix))
-
-            self.logger.info("w_max: {0}, w_proj_planes: {1} ".format(
-                        w_max, w_proj_planes))
-
-            # ****************************************************************
-            # 2. Get the target image location from the mapfile for the parset.
-            # Create target dir if it not exists
-            image_path_head = os.path.dirname(output_image)
-            create_directory(image_path_head)
-            self.logger.debug("Created directory to place awimager output"
-                              " files: {0}".format(image_path_head))
-
-            # ****************************************************************
-            # 3. Create the mask
-            #mask_file_path = self._create_mask(npix, cell_size, output_image,
-            #             concatenated_measurement_set, executable,
-            #             working_directory, log4_cplus_name, sourcedb_path,
-            #              mask_patch_size, image_path_head)
-            # *****************************************************************
-            # 4. Update the parset with calculated parameters, and output image
-            patch_dictionary = {'uselogger': 'True',  # enables log4cpluscd log
-                               'ms': str(concatenated_measurement_set),
-                               'cellsize': str(cell_size),
-                               'npix': str(npix),
-                               'wmax': str(w_max),
-                               'wprojplanes': str(w_proj_planes),
-                               'image': str(output_image),
-                               'maxsupport': str(npix)
-                               # 'mask':str(mask_file_path),  #TODO REINTRODUCE
-                               # MASK, excluded to speed up in this debug stage                               
-                               }
-
-            # Add some aditional keys from the self calibration method
-            if perform_self_cal:
-                self_cal_patch_dict = {
-                               'weight': 'briggs', 
-                               'padding': str(1.18),
-                               'niter' : str(1000000), 
-                               'operation' : 'mfclark',
-                               'timewindow' : '300',
-                               'fits' : '',
-                               'threshold' : str(threshold),
-                               'robust' : str(robust),
-                               'UVmin' : str(UVmin), 
-                               'UVmax' : str(UVmax),
-                               'maxbaseline' : str(10000000),
-                               'select' : str("sumsqr(UVW[:2])<1e12"), 
-                               }
-                patch_dictionary.update(self_cal_patch_dict)
-
-            # save the parset at the target dir for the image
-            calculated_parset_path = os.path.join(image_path_head,
-                                                       "parset.par")
-
-            try:
-                temp_parset_filename = patch_parset(parset, patch_dictionary)
-                # Copy tmp file to the final location
-                shutil.copyfile(temp_parset_filename, calculated_parset_path)
-                self.logger.debug("Wrote parset for awimager run: {0}".format(
-                                                    calculated_parset_path))
-            finally:
-                # remove temp file
-                os.remove(temp_parset_filename)
-
-            # *****************************************************************
-            # 5. Run the awimager with the parameterset
-
-            cmd = [executable, calculated_parset_path]
-            self.logger.debug("Parset used for awimager run:")
-            self.logger.debug(cmd)
-            try:
-                with CatchLog4CPlus(working_directory,
-                        self.logger.name + "." +
-                        os.path.basename(log4_cplus_name),
-                        os.path.basename(executable)
-                ) as logger:
-                    catch_segfaults(cmd, working_directory, self.environment,
-                                            logger, usageStats=self.resourceMonitor)
-
-            # Thrown by catch_segfault
-            except CalledProcessError as exception:
-                self.logger.error(str(exception))
-                return 1
-
-            except Exception as exception:
-                self.logger.error(str(exception))
-                return 1
-
-        # *********************************************************************
-        # 6. Return output
-        # Append static .restored: This might change but prob. not
-        # The actual output image has this extention always, default of
-        # awimager
-        self.outputs["image"] = output_image + ".restored"
-        return 0
-
-    def _get_imaging_parameters(self, measurement_set, parset,
-                autogenerate_parameters, specify_fov, fov):
-        """
-        (1) calculate and format some parameters that are determined runtime.
-        Based  on values in the measurementset and input parameter (set):
-        
-        a. <string> The cellsize
-        b. <int> The npixels in a each of the two dimension of the image
-        c. <string> The largest baseline in the ms smaller then the maxbaseline
-        d. <string> The number of projection planes
-        
-        The calculation of these parameters is done in three steps:
-        
-        1. Calculate intermediate results based on the ms. 
-        2. The calculation of the actual target values using intermediate
-           result       
-        """
-        # *********************************************************************
-        # 1. Get partial solutions from the parameter set
-        # Get the parset and a number of raw parameters from this parset
-        parset_object = get_parset(parset)
-        baseline_limit = parset_object.getInt('maxbaseline')
-
-        # Get the longest baseline
-        max_baseline = pt.taql(
-                        'CALC sqrt(max([select sumsqr(UVW[:2]) from ' + \
-            '{0} where sumsqr(UVW[:2]) <{1} giving as memory]))'.format(\
-            measurement_set, baseline_limit *
-            baseline_limit))[0]  # ask ger van diepen for details if ness.
-        # Calculate the wave_length
-        table_ms = pt.table(measurement_set)
-        table_spectral_window = pt.table(
-                                        table_ms.getkeyword("SPECTRAL_WINDOW"))
-        freq = table_spectral_window.getcell("REF_FREQUENCY", 0)
-
-        table_spectral_window.close()
-        wave_length = pt.taql('CALC C()') / freq
-        wave_length = wave_length[0]
-
-        # Calculate the cell_size from the ms
-        arc_sec_in_degree = 3600
-        arc_sec_in_rad = (180.0 / math.pi) * arc_sec_in_degree
-        cell_size = (1.0 / 3) * (wave_length / float(max_baseline))\
-             * arc_sec_in_rad
-
-        # Calculate the number of pixels in x and y dim
-        #    fov and diameter depending on the antenna name
-        fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
-                                                            measurement_set)
-
-        # use fov for to calculate a semi 'user' specified npix and cellsize
-        # The npix thus depends on the ms cellsize and fov
-        # Do not use use supplied Fov if autogenerating
-        if not autogenerate_parameters and specify_fov:
-            if fov == 0.0:
-                raise PipelineException("fov set to 0.0: invalid value.")
-
-        # else use full resolution (calculate the fov)
-        else:
-            self.logger.info("Using fov calculated on measurement data: " +
-                             str(fov_from_ms))
-            fov = fov_from_ms
-
-        # ********************************************************************
-        # 2. Calculate the ms based output variables
-        # 'optimal' npix based on measurement set calculations or user specified
-        npix = (arc_sec_in_degree * fov) / cell_size
-
-        # Get the closest power of two larger then the calculated pixel size
-        npix = self._nearest_ceiled_power2(npix)
-
-        # Get the max w with baseline < 10000
-        w_max = pt.taql('CALC max([select UVW[2] from ' + \
-            '{0} where sumsqr(UVW[:2]) <{1} giving as memory])'.format(
-            measurement_set, baseline_limit * baseline_limit))[0]
-
-        # Calculate number of projection planes
-        w_proj_planes = min(257, math.floor((max_baseline * wave_length) /
-                                             (station_diameter ** 2)))
-        w_proj_planes = int(round(w_proj_planes))
-
-        # MAximum number of proj planes set to 1024: George Heald, Ger van
-        # Diepen if this exception occurs
-        maxsupport = max(1024, npix)
-        if w_proj_planes > maxsupport:
-            raise Exception("The number of projections planes for the current"
-                            + "measurement set is to large.")
-
-        # *********************************************************************
-        # 3. if the npix from the parset is different to the ms calculations,
-        # calculate a sizeconverter value  (to be applied to the cellsize)
-        if npix < 256:
-            self.logger.warn("Using a image size smaller then 256x256:"
-            " This leads to problematic imaging in some instances!!")
-
-        # If we are not autocalculating based on ms or fov, use the npix
-        # and cell_size specified in the parset
-        # keep the wmax and w_proj_planes
-        if (not autogenerate_parameters and not specify_fov):
-            npix = parset_object.getString('npix')
-            cell_size_formatted = parset_object.getString('cellsize')
-        else:
-            cell_size_formatted = str(
-                        int(round(cell_size))) + 'arcsec'
-
-        self.logger.info("Using the following awimager parameters:"
-            " cell_size: {0}, npix: {1},".format(
-                        cell_size_formatted, npix) +
-             " w_max: {0}, w_proj_planes: {1}".format(w_max, w_proj_planes))
-
-        return cell_size_formatted, str(npix), str(w_max), str(w_proj_planes)
-
-
-    # Awimager parameters  for selfcal process (depends with major cycle)
-    # nicolas: THis function needs a lot more documentation:
-    # THis is the function that does the magic.
-    # For each step everything must be cristal clear what is happening.
-    # I will need to support this function 
-    # this function is full with magic numbers
-    # Instead of:
-    # variable_a = 3.14 * 3600 * 5
-    # use:
-    # pi = math.pi
-    # second_in_hour = 3600
-    # factorx = 5    # Factor controlling x, value based on manual optimalization    
-    def _get_selfcal_parameters(self, measurement_set, parset, major_cycle,
-                                nr_cycles): 
-      """
-      0. modify the nof cycle to have a final step at the same resolution 
-      as the previous last cycle
-      1. Determine target coordinates especially declinaison, because 
-      for low dec (<35 deg) UVmin = 0.1 to excluse very short baseline
-      2. Determine the frequency and the wavelenght
-      3. Determine the longuest baseline and the best resolution avaible
-      4. Estimate all imaging parameters
-      5. Calculate number of projection planes
-      6. Pixelsize must be a string number : number +arcsec
-
-      # Nicolas Vilchez, 2014
-      # vilchez@astron.nl
-      """		
-      
-      
-      # ********************************************************************
-      #0. modify the nof cycle to have a final step at the same resolution 
-      #as the previous last cycle
-
-      if major_cycle < nr_cycles-1:          
-           nr_cycles = nr_cycles-1
-
-      scaling_factor = float(major_cycle) / float(nr_cycles - 1)
-    
-      # ********************************************************************
-      #1. Determine Target coordinates for UVmin
-      tabtarget	= pt.table(measurement_set)
-      tabfield	= pt.table(tabtarget.getkeyword('FIELD'))
-      coords		= tabfield.getcell('REFERENCE_DIR',0)
-      target		= coords[0] * 180.0 / math.pi  # Why
-
-      UVmin=0
-      if target[1] <= 35:  # WHy?
-          UVmin = 0.1		    
-
-      ra_target	= target[0] + 360.0  # Why
-      dec_target	= target[1]    
-
-      # ********************************************************************        
-      # 2. Determine the frequency and the wavelenght
-      tabfreq					= pt.table(measurement_set)
-      table_spectral_window 	= pt.table(tabfreq.getkeyword("SPECTRAL_WINDOW"))
-      frequency				= table_spectral_window.getcell('REF_FREQUENCY', 0)   
-
-      wavelenght  = 3.0E8 / frequency  # Why
-
-      # ********************************************************************        
-      # 3. Determine the longuest baseline and the best resolution avaible	
-
-      tabbaseline 	= pt.table(measurement_set, readonly=False, ack=True)
-      posbaseline 	= tabbaseline.getcol('UVW')
-      maxBaseline 	= max(posbaseline[:, 0] ** 2 + 
-                          posbaseline[:, 1] ** 2) ** 0.5 
-
-      bestBeamresol	= round((wavelenght / maxBaseline) * 
-                            (180.0 / math.pi) * 3600.0, 0)
-
-      # Beam resolution limitation to 10arcsec to avoid too large images
-      if bestBeamresol < 10.0:
-          bestBeamresol = 10.0	
-
-      # ********************************************************************        
-      # 4. Estimate all imaging parameters
- 
-      # estimate fov
-      # fov = 5 degree, except for High HBA Observation => 1.5 degree
-      if frequency > 1.9E8:
-          fov	= 1.5				
-      else:
-          fov	= 5.0	    
-
-      # we need 4 pixel/beam to have enough sampling
-      pixPerBeam	= 4.0 
-
-      # best resolution pixel size (i.e final pixel size for selfcal)
-      bestPixelResol  = round(bestBeamresol / pixPerBeam, 2) 
-
-      # factor to estimate the starting resolution (9 times in this case)
-      badResolFactor	= 9
-      
-      pixsize	= round((badResolFactor * bestPixelResol) - 
-          (badResolFactor * bestPixelResol - bestPixelResol) *
-           scaling_factor , 3)
-                   
-
-      # number of pixel must be a multiple of 2 !!
-      nbpixel	= int(fov * 3600.0 / pixsize)
-      if nbpixel % 2 ==1:
-          nbpixel = nbpixel + 1		
-      
-      robust	= 0 #round(1.0 - (3.0 * scaling_factor), 2)
-
-      UVmax	= round((wavelenght) / 
-                       (pixPerBeam * pixsize / 3600.0 * math.pi / 180.0 ) / 
-                       (1E3 * wavelenght), 3)
-
-      wmax	= round(UVmax * (wavelenght) * 1E3, 3)		
-
-      # ********************************************************************        
-      # 5. Calculate number of projection planes
-      # Need to compute station diameter (the fov is fixed to 5 degree)
-      # using wouter's function, to compute the w_proj_planes
-      #    fov and diameter depending on the antenna name       
-      fov_from_ms, station_diameter = self._get_fov_and_station_diameter(
-                                                              measurement_set)        
-
-      w_proj_planes = min(257, math.floor((maxBaseline * wavelenght) / 
-                                          (station_diameter ** 2)))
-      w_proj_planes = int(round(w_proj_planes))
-
-      # MAximum number of proj planes set to 1024: George Heald, Ger van
-      # Diepen if this exception occurs
-      maxsupport = max(1024, nbpixel)
-      if w_proj_planes > maxsupport:
-          raise Exception("The number of projections planes for the current" +
-                          "measurement set is to large.")
-
-      # Warnings on pixel size
-      if nbpixel < 256:
-          self.logger.warn("Using a image size smaller then 256x256: This " + 
-                           "leads to problematic imaging in some instances!!") 
- 
- 
-      # ********************************************************************        
-      # 6. Pixelsize must be a string number : number +arcsec
-      #    conversion at this step
-      pixsize = str(pixsize)+'arcsec'
-
-      # ********************************************************************        
-      # 7. Threshold determination from the previous cycle 
-      if major_cycle == 0:
-          threshold = '0.075Jy'	
-      else:
-        fits_image_path_list	= measurement_set.split('concat.ms')
-        fits_image_path			= fits_image_path_list[0] +\
-                'awimage_cycle_%s/image.fits'%(major_cycle-1)
-
-
-        # open a FITS file 
-        fitsImage	= pyfits.open(fits_image_path) 
-        scidata 	= fitsImage[0].data 
-
-        dataRange	= list(range(fitsImage[0].shape[2]))
-        sortedData	=  list(range(fitsImage[0].shape[2] ** 2))
-
-        # FIXME We have the sneaking suspicion that this takes very long
-        # due to bad coding style... (double for loop with compute in inner loop)
-        for i in dataRange:
-            for j in dataRange:
-                sortedData[i * fitsImage[0].shape[2] + j]	=  scidata[0,0,i,j]
-
-        sortedData 		= sorted(sortedData)
-
-        # Percent of faintest data to use to determine 5sigma value : use 5%			
-        dataPercent		= int(fitsImage[0].shape[2] * 0.05)
-
-        fiveSigmaData	= sum(sortedData[0:dataPercent]) / dataPercent	
-        threshold		= (abs(fiveSigmaData) / 5.0) * (2.335 / 2.0) * 15
-
-      return pixsize, str(nbpixel), str(wmax), str(w_proj_planes), \
-             str(UVmin), str(UVmax), str(robust), str(threshold)
-     
-      
-          
-    def _get_fov_and_station_diameter(self, measurement_set):
-        """
-        _field_of_view calculates the fov, which is dependend on the
-        station type, location and mode:
-        For details see:
-        (1) http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofar
-        
-        """
-        # Open the ms
-        table_ms = pt.table(measurement_set)
-
-        # Get antenna name and observation mode
-        antenna = pt.table(table_ms.getkeyword("ANTENNA"))
-        antenna_name = antenna.getcell('NAME', 0)
-        antenna.close()
-
-        observation = pt.table(table_ms.getkeyword("OBSERVATION"))
-        antenna_set = observation.getcell('LOFAR_ANTENNA_SET', 0)
-        observation.close()
-
-        # static parameters for the station diameters ref (1)
-        hba_core_diameter = 30.8
-        hba_remote_diameter = 41.1
-        lba_inner = 32.3
-        lba_outer = 81.3
-
-        # use measurement set information to assertain antenna diameter
-        station_diameter = None
-        if antenna_name.count('HBA'):
-            if antenna_name.count('CS'):
-                station_diameter = hba_core_diameter
-            elif antenna_name.count('RS'):
-                station_diameter = hba_remote_diameter
-        elif antenna_name.count('LBA'):
-            if antenna_set.count('INNER'):
-                station_diameter = lba_inner
-            elif antenna_set.count('OUTER'):
-                station_diameter = lba_outer
-
-        # raise exception if the antenna is not of a supported type
-        if station_diameter == None:
-            self.logger.error(
-                    'Unknown antenna type for antenna: {0} , {1}'.format(\
-                              antenna_name, antenna_set))
-            raise PipelineException(
-                    "Unknown antenna type encountered in Measurement set")
-
-        # Get the wavelength
-        spectral_window_table = pt.table(table_ms.getkeyword(
-                                                            "SPECTRAL_WINDOW"))
-        freq = float(spectral_window_table.getcell("REF_FREQUENCY", 0))
-        wave_length = pt.taql('CALC C()') / freq
-        spectral_window_table.close()
-
-        # Now calculate the FOV see ref (1)
-        # alpha_one is a magic parameter: The value 1.3 is representative for a
-        # WSRT dish, where it depends on the dish illumination
-        alpha_one = 1.3
-
-        # alpha_one is in radians so transform to degrees for output
-        fwhm = alpha_one * (wave_length / station_diameter) * (180 / math.pi)
-        fov = fwhm / 2.0
-        table_ms.close()
-
-        return fov, station_diameter
-
-    def _create_mask(self, npix, cell_size, output_image,
-                     concatenated_measurement_set, executable,
-                     working_directory, log4_cplus_name, sourcedb_path,
-                     mask_patch_size, image_path_directory):
-        """
-        (3) create a casa image containing an mask blocking out the
-        sources in the provided sourcedb.
-        
-        It expects:
-        
-        a. the ms for which the mask will be created, it is used to de
-           termine some image details: (eg. pointing)
-        b. parameters for running within the catchsegfault framework
-        c. and the size of the mask_pach.
-           To create a mask, first a empty measurement set is created using
-           awimager: ready to be filled with mask data 
-           
-        This function is a wrapper around some functionality written by:
-        fdg@mpa-garching.mpg.de
-        
-        steps: 
-        1. Create a parset with image paramters used by:
-        2. awimager run. Creating an empty casa image.
-        3. Fill the casa image with mask data
-           
-        """
-        # ********************************************************************
-        # 1. Create the parset used to make a mask
-        mask_file_path = output_image + ".mask"
-
-        mask_patch_dictionary = {"npix": str(npix),
-                                 "cellsize": str(cell_size),
-                                 "image": str(mask_file_path),
-                                 "ms": str(concatenated_measurement_set),
-                                 "operation": "empty",
-                                 "stokes": "'I'"
-                                 }
-        mask_parset = Parset.fromDict(mask_patch_dictionary)
-        mask_parset_path = os.path.join(image_path_directory, "mask.par")
-        mask_parset.writeFile(mask_parset_path)
-        self.logger.debug(
-                "Write parset for awimager mask creation: {0}".format(
-                                                      mask_parset_path))
-
-        # *********************************************************************
-        # 2. Create an empty mask using awimager
-        cmd = [executable, mask_parset_path]
-        self.logger.info(" ".join(cmd))
-        try:
-            with CatchLog4CPlus(working_directory,
-                    self.logger.name + "." + os.path.basename(log4_cplus_name),
-                    os.path.basename(executable)
-            ) as logger:
-                catch_segfaults(cmd, working_directory, self.environment,
-                                        logger)
-        # Thrown by catch_segfault
-        except CalledProcessError as exception:
-            self.logger.error(str(exception))
-            return 1
-        except Exception as exception:
-            self.logger.error(str(exception))
-            return 1
-
-        # ********************************************************************
-        # 3. create the actual mask
-        self.logger.debug("Started mask creation using mask_patch_size:"
-                          " {0}".format(mask_patch_size))
-
-        self._msss_mask(mask_file_path, sourcedb_path, mask_patch_size)
-        self.logger.debug("Fished mask creation")
-        return mask_file_path
-
-    def _msss_mask(self, mask_file_path, sourcedb_path, mask_patch_size = 1.0):
-        """
-        Fill casa image with a mask based on skymodel(sourcedb)
-        Bugs: fdg@mpa-garching.mpg.de
-        
-        pipeline implementation klijn@astron.nl
-        version 0.32
-        
-        Edited by JDS, 2012-03-16:
-         - Properly convert maj/minor axes to half length
-         - Handle empty fields in sky model by setting them to 0
-         - Fix off-by-one error at mask boundary
-        
-        FIXED BUG
-         - if a source is outside the mask, the script ignores it
-         - if a source is on the border, the script draws only the inner part
-         - can handle skymodels with different headers
-        
-        KNOWN BUG
-         - not works with single line skymodels, workaround: add a fake
-           source outside the field
-         - mask patched display large amounts of aliasing. A possible 
-           sollution would
-           be normalizing to pixel centre. ( int(normalize_x * npix) /
-           npix + (0.5 /npix)) 
-           ideally the patch would increment in pixel radiuses
-             
-        Version 0.3  (Wouter Klijn, klijn@astron.nl)
-         - Usage of sourcedb instead of txt document as 'source' of sources
-           This allows input from different source sources
-        Version 0.31  (Wouter Klijn, klijn@astron.nl)
-         - Adaptable patch size (patch size needs specification)
-         - Patch size and geometry is broken: needs some astronomer magic to
-           fix it, problem with afine transformation prol.
-        Version 0.32 (Wouter Klijn, klijn@astron.nl)
-         - Renaming of variable names to python convention
-        """
-        # increment in maj/minor axes [arcsec]
-        pad = 500.
-
-        # open mask
-        mask = pim.image(mask_file_path, overwrite = True)
-        mask_data = mask.getdata()
-        xlen, ylen = mask.shape()[2:]
-        freq, stokes, null, null = mask.toworld([0, 0, 0, 0])
-
-        # Open the sourcedb:
-        table = pt.table(sourcedb_path + "::SOURCES")
-        pdb = lofar.parmdb.parmdb(sourcedb_path)
-
-        # Get the data of interest
-        source_list = table.getcol("SOURCENAME")
-        source_type_list = table.getcol("SOURCETYPE")
-        # All date in the format valuetype:sourcename
-        all_values_dict = pdb.getDefValues()
-
-        # Loop the sources
-        for source, source_type in zip(source_list, source_type_list):
-            if source_type == 1:
-                type_string = "Gaussian"
-            else:
-                type_string = "Point"
-            self.logger.info("processing: {0} ({1})".format(source,
-                                                             type_string))
-
-            # Get de right_ascension and declination (already in radians)
-            right_ascension = all_values_dict["Ra:" + source][0, 0]
-            declination = all_values_dict["Dec:" + source][0, 0]
-            if source_type == 1:
-                # Get the raw values from the db
-                maj_raw = all_values_dict["MajorAxis:" + source][0, 0]
-                min_raw = all_values_dict["MinorAxis:" + source][0, 0]
-                pa_raw = all_values_dict["Orientation:" + source][0, 0]
-                # convert to radians (conversion is copy paste JDS)
-                # major radius (+pad) in rad
-                maj = (((maj_raw + pad)) / 3600.) * np.pi / 180.
-                # minor radius (+pad) in rad
-                minor = (((min_raw + pad)) / 3600.) * np.pi / 180.
-                pix_asc = pa_raw * np.pi / 180.
-                # wenss writes always 'GAUSSIAN' even for point sources
-                # -> set to wenss beam+pad
-                if maj == 0 or minor == 0:
-                    maj = ((54. + pad) / 3600.) * np.pi / 180.
-                    minor = ((54. + pad) / 3600.) * np.pi / 180.
-            # set to wenss beam+pad
-            elif source_type == 0:
-                maj = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
-                minor = (((54. + pad) / 2.) / 3600.) * np.pi / 180.
-                pix_asc = 0.
-            else:
-                self.logger.info(
-                    "WARNING: unknown source source_type ({0}),"
-                    "ignoring: ".format(source_type))
-                continue
-
-            # define a small square around the source to look for it
-            null, null, border_y1, border_x1 = mask.topixel(
-                    [freq, stokes, declination - maj,
-                      right_ascension - maj / np.cos(declination - maj)])
-            null, null, border_y2, border_x2 = mask.topixel(
-                    [freq, stokes, declination + maj,
-                     right_ascension + maj / np.cos(declination + maj)])
-            xmin = np.int(np.floor(np.min([border_x1, border_x2])))
-            xmax = np.int(np.ceil(np.max([border_x1, border_x2])))
-            ymin = np.int(np.floor(np.min([border_y1, border_y2])))
-            ymax = np.int(np.ceil(np.max([border_y1, border_y2])))
-
-            if xmin > xlen or ymin > ylen or xmax < 0 or ymax < 0:
-                self.logger.info(
-                    "WARNING: source {0} falls outside the mask,"
-                    " ignoring: ".format(source))
-                continue
-
-            if xmax > xlen or ymax > ylen or xmin < 0 or ymin < 0:
-                self.logger.info(
-                    "WARNING: source {0} falls across map edge".format(source))
-
-            for pixel_x in range(xmin, xmax):
-                for pixel_y in range(ymin, ymax):
-                    # skip pixels outside the mask field
-                    if pixel_x >= xlen or pixel_y >= ylen or\
-                       pixel_x < 0 or pixel_y < 0:
-                        continue
-                    # get pixel right_ascension and declination in rad
-                    null, null, pix_dec, pix_ra = mask.toworld(
-                                                    [0, 0, pixel_y, pixel_x])
-                    # Translate and rotate coords.
-                    translated_pixel_x = (pix_ra - right_ascension) * np.sin(
-                        pix_asc) + (pix_dec - declination) * np.cos(pix_asc)
-                    # to align with ellipse
-                    translate_pixel_y = -(pix_ra - right_ascension) * np.cos(
-                        pix_asc) + (pix_dec - declination) * np.sin(pix_asc)
-                    if (((translated_pixel_x ** 2) / (maj ** 2)) +
-                        ((translate_pixel_y ** 2) / (minor ** 2))) < \
-                                                         mask_patch_size:
-                        mask_data[0, 0, pixel_y, pixel_x] = 1
-        null = null
-        mask.putdata(mask_data)
-        table.close()
-    
-    # some helper functions
-    def _nearest_ceiled_power2(self, value):
-        """
-        Return int value of  the nearest Ceiled power of 2 for the
-        suplied argument
-        
-        """
-        return int(pow(2, math.ceil(math.log(value, 2))))
-
-
-    def _save_selfcal_info(self, concatenated_measurement_set,
-                           major_cycle, npix, UVmin, UVmax):
-        """ 
-        The selfcal team requested meta information to be added to 
-        measurement set that allows the reproduction of intermediate
-        steps.
-        """ 
-        self.logger.info("Save-ing selfcal parameters to file:")
-        meta_file = os.path.join(
-              concatenated_measurement_set + "_selfcal_information",
-                                  "uvcut_and_npix.txt")
-        self.logger.info(meta_file)
-
-        # check if we have the output file? Add the header 
-        if not os.path.exists(meta_file):
-            meta_file_pt = open(meta_file, 'w')
-            meta_file_pt.write("#cycle_nr npix uvmin(klambda) uvmax(klambda)\n")
-            meta_file_pt.close()
-                 
-        meta_file_pt = open(meta_file, 'a')
-
-        # Create the actual string with info
-        meta_info_str = " ".join([str(major_cycle),
-                                 str(npix),
-                                 str(UVmin),
-                                str(UVmax)])
-        meta_file_pt.write(meta_info_str + "\n")
-        meta_file_pt.close()
-
-
-if __name__ == "__main__":
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(selfcal_awimager(
-                    _JOBID, _JOBHOST, _JOBPORT).run_with_stored_arguments())
-
diff --git a/CEP/Pipeline/recipes/sip/nodes/selfcal_bbs.py b/CEP/Pipeline/recipes/sip/nodes/selfcal_bbs.py
deleted file mode 100644
index 9dd954ebb5e..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/selfcal_bbs.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# LOFAR AUTOMATIC IMAGING PIPELINE
-# selfcal_bbs
-# Wouter Klijn 2012
-# klijn@astron.nl
-# Nicolas Vilchez, 2014
-# vilchez@astron.nl
-# -----------------------------------------------------------------------------
-
-import sys
-import os
-
-import pyrap.tables as pt
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.group_data import load_data_map
-from lofarpipe.support.subprocessgroup import SubProcessGroup
-from lofarpipe.support.data_map import MultiDataMap
-
-class selfcal_bbs(LOFARnodeTCP):
-    """
-    imager_bbs node performs a bbs run for each of measuremt sets supplied in 
-    the  mapfile at ms_list_path. Calibration is done on the sources in 
-    the sourcedb in the mapfile sky_list_path. Solutions are stored in the 
-    parmdb_list_path
-    
-    1. Load the mapfiles
-    2. For each measurement set to calibrate start a subprocess
-    3. Check if the processes finished correctly
-    4. (added by Nicolas vilchez) concat in time the final MS
-    5. (added by N.Vilchez) copy time slices directory to a new one       
-    """
-    
-    def run(self, bbs_executable, parset, ms_list_path, parmdb_list_path,
-             sky_list_path, concat_ms_path, major_cycle):
-        """
-        selfcal_bbs functionality. Called by framework performing all the work
-        """
-        self.logger.debug("Starting selfcal_bbs Node")
-        # *********************************************************************
-        # 1. Load mapfiles
-        # read in the mapfiles to data maps: The master recipe added the single
-        # path to a mapfilem which allows usage of default data methods 
-        # (load_data_map)
-        # TODO: Datamap
-        ms_map = MultiDataMap.load(ms_list_path)
-        parmdb_map = MultiDataMap.load(parmdb_list_path)
-        sky_list = MultiDataMap.load(sky_list_path)
-        source_db = sky_list[0].file[0] # the sourcedb is the first file entry
-      
-        try:
-            bbs_process_group = SubProcessGroup(self.logger,
-                                  self.resourceMonitor)
-            # *****************************************************************
-            # 2. start the bbs executable with data
-            # The data is located in multimaps. We need the first entry
-            # TODO: THis is not 'nice' usage of the multimap
-            for (measurement_set, parmdm) in zip(ms_map[0].file,
-                                                parmdb_map[0].file):
-                command = [
-                    bbs_executable,
-                    "--sourcedb={0}".format(source_db),
-                    "--parmdb={0}".format(parmdm) ,
-                    measurement_set,
-                    parset]
-                self.logger.info("Executing bbs command: {0}".format(" ".join(
-                            command)))
-                bbs_process_group.run(command)
-
-            # *****************************************************************
-            # 3. check status of the processes
-            if bbs_process_group.wait_for_finish() != None:
-                self.logger.error(
-                            "Failed bbs run detected Aborting")
-                return 1
-
-        except OSError as exception:
-            self.logger.error("Failed to execute bbs: {0}".format(str(
-                                                                    exception)))
-            return 1
-            
-        # *********************************************************************
-        # 4. Concat in time after bbs calibration your MSs using 
-        #    msconcat (pyrap.tables module) (added by N.Vilchez)        
-        # this step has te be performed on this location. because the bbs run 
-        # might add additional columns not present in the original ms
-        # and therefore not produced in the concat done in the prepare phase
-        # redmine issue #6021     
-        pt.msconcat(ms_map[0].file,concat_ms_path, concatTime=True)                 
- 
-        # *********************************************************************
-        # 5. copy time slives directory to a new one  
-        # This is done for debugging purpose: The copy is not used for anything
-        # The actual selfcal steps are done in place
-        #  (added by N.Vilchez)                   
-        # THe save location is created relative to the concat.ms
-        # we could also use the self.scratch_directory from the toplevel recipe
-        # this would need an aditional ingredient
-        # This is a 'debugging' step and should never ever cause a failure of \
-        # the pipeline
-        try:
-            working_dir = os.path.dirname(concat_ms_path)
-            time_slice_dir = os.path.join(working_dir, 'time_slices')
-            time_slice_copy_dir = os.path.join(working_dir, 
-              'time_slices_cycle_{0}'.format(major_cycle))
-
-            cmd = "cp -r {0} {1}".format(time_slice_dir, time_slice_copy_dir) 
-            os.system(cmd)     
-        except:
-          self.logger.warn(
-               "Debug copy of temporary files failed: continue operations")
-          pass # Do nothing       
-
-        return 0
-
-
-if __name__ == "__main__":
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(selfcal_bbs(_JOBID, _JOBHOST, _JOBPORT).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py b/CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py
deleted file mode 100644
index 243922a114b..00000000000
--- a/CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#                                                         LOFAR IMAGING PIPELINE
-#
-#                                                           selfcal_finalize
-#                                                            Wouter Klijn 2012
-#                                                           klijn@astron.nl
-# ------------------------------------------------------------------------------
-
-import sys
-import subprocess
-import os
-import tempfile
-import shutil
-
-from lofarpipe.support.lofarnode import LOFARnodeTCP
-from lofarpipe.support.utilities import log_time, create_directory
-import lofar.addImagingInfo as addimg
-import pyrap.images as pim
-from lofarpipe.support.utilities import catch_segfaults
-from lofarpipe.support.data_map import DataMap
-from lofarpipe.support.pipelinelogging import CatchLog4CPlus
-from lofarpipe.support.subprocessgroup import SubProcessGroup
-from lofar.common.subprocess_utils import communicate_returning_strings
-
-import urllib.request, urllib.error, urllib.parse
-import lofarpipe.recipes.helpers.MultipartPostHandler as mph
-
-class selfcal_finalize(LOFARnodeTCP):
-    """
-    This script performs the folowing functions:
-    
-    1. Add the image info to the casa image:
-       addimg.addImagingInfo (imageName, msNames, sourcedbName, minbl, maxbl)
-    2. Convert the image to hdf5 and fits image
-    3. Filling of the HDF5 root group
-    4. Move meta data of selfcal to correct dir in ms
-    5. Deepcopy ms to output location
-    """
-    def run(self, awimager_output, ms_per_image, sourcelist, target,
-            output_image, minbaseline, maxbaseline, processed_ms_dir,
-            fillrootimagegroup_exec, environment, sourcedb, concat_ms, 
-            correlated_output_location, msselect_executable):
-        self.environment.update(environment)
-        """
-        :param awimager_output: Path to the casa image produced by awimager 
-        :param ms_per_image: The X (90) measurements set scheduled to 
-            create the image
-        :param sourcelist: list of sources found in the image 
-        :param target: <unused>
-        :param minbaseline: Minimum baseline used for the image 
-        :param maxbaseline: largest/maximum baseline used for the image
-        :param processed_ms_dir: The X (90) measurements set actually used to 
-            create the image
-        :param fillrootimagegroup_exec: Executable used to add image data to
-            the hdf5 image  
-                 
-        :rtype: self.outputs['hdf5'] set to "succes" to signal node succes
-        :rtype: self.outputs['image'] path to the produced hdf5 image
-        """
-        with log_time(self.logger):
-            ms_per_image_map = DataMap.load(ms_per_image)
-
-            # *****************************************************************
-            # 1. add image info                      
-            # Get all the files in the processed measurement dir
-            file_list = os.listdir(processed_ms_dir)
-
-            processed_ms_paths = []
-            ms_per_image_map.iterator = DataMap.SkipIterator
-            for item in ms_per_image_map:
-                ms_path = item.file
-                processed_ms_paths.append(ms_path)
-
-            #add the information the image
-            try:
-                self.logger.debug("Start addImage Info")
-                addimg.addImagingInfo(awimager_output, processed_ms_paths,
-                    sourcedb, minbaseline, maxbaseline)
-
-            except Exception as error:
-                self.logger.warn("addImagingInfo Threw Exception:")
-                self.logger.warn(error)
-                # Catch raising of already done error: allows for rerunning
-                # of the recipe
-                if "addImagingInfo already done" in str(error):
-                    self.logger.warn("addImagingInfo already done, continue")
-                    pass
-                else:
-                    raise Exception(error) 
-                #The majority of the tables is updated correctly
-
-            # ***************************************************************
-            # 2. convert to hdf5 image format
-            output_directory = None
-            pim_image = pim.image(awimager_output)
-            try:
-                self.logger.info("Saving image in HDF5 Format to: {0}" .format(
-                                output_image))
-                # Create the output directory
-                output_directory = os.path.dirname(output_image)
-                create_directory(output_directory)
-                # save the image
-                pim_image.saveas(output_image, hdf5=True)
-
-            except Exception as error:
-                self.logger.error(
-                    "Exception raised inside pyrap.images: {0}".format(
-                                                                str(error)))
-                raise error
-
-            # Convert to fits
-            # create target location
-            fits_output = output_image + ".fits"
-            # To allow reruns a possible earlier version needs to be removed!
-            # image2fits fails if not done!!
-            if os.path.exists(fits_output):
-                os.unlink(fits_output)
-
-            try:
-                self.logger.debug("Start convert to fits")
-                temp_dir = tempfile.mkdtemp()
-                with CatchLog4CPlus(temp_dir,
-                    self.logger.name + '.' + os.path.basename(awimager_output),
-                            "image2fits") as logger:
-                    catch_segfaults(["image2fits", '-in', awimager_output,
-                                                 '-out', fits_output],
-                                    temp_dir, self.environment, logger)
-            except Exception as excp:
-                self.logger.error(str(excp))
-                return 1
-            finally:
-                shutil.rmtree(temp_dir)
-
-            # ****************************************************************
-            # 3. Filling of the HDF5 root group
-            command = [fillrootimagegroup_exec, output_image]
-            self.logger.info(" ".join(command))
-            #Spawn a subprocess and connect the pipes
-            proc = subprocess.Popen(
-                        command,
-                        stdin=subprocess.PIPE,
-                        stdout=subprocess.PIPE,
-                        stderr=subprocess.PIPE)
-
-            (stdoutdata, stderrdata) = communicate_returning_strings(proc)
-
-            exit_status = proc.returncode
-
-
-            #if copy failed log the missing file
-            if  exit_status != 0:
-                self.logger.error("Error using the fillRootImageGroup command"
-                    ". Exit status: {0}".format(exit_status))
-                self.logger.error(stdoutdata)
-                self.logger.error(stderrdata)
-
-                return 1
-
-            # *****************************************************************
-            # 4. Move the meta information to the correct directory next to the
-            #    concat.ms
-            self.logger.info("Save-ing selfcal parameters to file:")
-            meta_dir =  concat_ms + "_selfcal_information"
-            meta_dir_target =  os.path.join(concat_ms, "selfcal_information")
-            if os.path.exists(meta_dir) and os.path.exists(concat_ms):
-                self.logger.info("Copy meta information to output measurementset")
-
-                # Clear possible old data, allows for rerun of the pipeline
-                # if needed.
-                if os.path.exists(meta_dir_target):
-                      shutil.rmtree(meta_dir_target)
-                shutil.copytree(meta_dir, meta_dir_target)
-                
-            # *****************************************************************
-            # 4 Copy the measurement set to the output directory
-            # use msselect to copy all the data in the measurement sets
-            
-            cmd_string = "{0} in={1} out={2} baseline=* deep=True".format(
-                   msselect_executable, concat_ms, correlated_output_location)
-            msselect_proc_group = SubProcessGroup(self.logger)
-            msselect_proc_group.run(cmd_string)
-            if msselect_proc_group.wait_for_finish() != None:
-                self.logger.error("failed copy of measurmentset to output dir")
-                raise Exception("an MSselect run failed!")
-
-            self.outputs["hdf5"] = "succes"
-            self.outputs["image"] = output_image
-            self.outputs["correlated"] = correlated_output_location
-
-        
-        return 0
-
-
-if __name__ == "__main__":
-
-    _JOBID, _JOBHOST, _JOBPORT = sys.argv[1:4]
-    sys.exit(selfcal_finalize(_JOBID, _JOBHOST,
-                             _JOBPORT).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/tasks.cfg.CEP4.in b/CEP/Pipeline/recipes/sip/tasks.cfg.CEP4.in
index 3efaf40faa1..acd40a713c4 100644
--- a/CEP/Pipeline/recipes/sip/tasks.cfg.CEP4.in
+++ b/CEP/Pipeline/recipes/sip/tasks.cfg.CEP4.in
@@ -20,23 +20,12 @@ nthreads = 2
 max_per_node = 0 
 nthreads = 2
 
-[awimager]
-max_per_node = 0 
-nthreads = 2
-
 [rficonsole]
 executable = /opt/aoflagger/bin/aoflagger
 max_per_node = 0 
 nthreads = 2
 nproc = 0
 
-[imager_prepare]
-rficonsole_executable = /opt/aoflagger/bin/aoflagger
-nthreads = 2
-
-[imager_bbs]
-nthreads = 2
-
 [bbs_reducer]
 nthreads = 2
 
diff --git a/CEP/Pipeline/recipes/sip/tasks.cfg.in b/CEP/Pipeline/recipes/sip/tasks.cfg.in
index 0eb10b996e5..0c5c2b95a50 100644
--- a/CEP/Pipeline/recipes/sip/tasks.cfg.in
+++ b/CEP/Pipeline/recipes/sip/tasks.cfg.in
@@ -51,15 +51,6 @@ mapfile = %(runtime_directory)s/%(job_name)s/mapfiles/instrument.mapfile
 [get_metadata]
 recipe = get_metadata
 
-[imager_prepare]
-recipe = imager_prepare
-ndppp_exec = %(lofarroot)s/bin/NDPPP
-asciistat_executable = %(lofarroot)s/bin/asciistats.py
-statplot_executable = %(lofarroot)s/bin/statsplot.py
-msselect_executable = %(casaroot)s/bin/msselect
-rficonsole_executable = %(aoflaggerroot)s/bin/aoflagger
-nthreads = 8
-
 [long_baseline]
 recipe = long_baseline
 ndppp_exec = %(lofarroot)s/bin/NDPPP
@@ -70,29 +61,6 @@ rficonsole_executable = %(aoflaggerroot)s/bin/aoflagger
 nproc = 1
 nthreads = 8
 
-[imager_awimager]
-recipe = imager_awimager
-executable = %(lofarroot)s/bin/awimager
-nthreads = 8
-
-[imager_create_dbs]
-recipe = imager_create_dbs
-parmdb_executable = %(lofarroot)s/bin/parmdbm
-makesourcedb_path = %(lofarroot)s/bin/makesourcedb
-
-[imager_bbs]
-recipe = imager_bbs
-bbs_executable = %(lofarroot)s/bin/bbs-reducer
-nthreads = 8
-  
-[imager_source_finding]
-recipe = imager_source_finding
-makesourcedb_path = %(lofarroot)s/bin/makesourcedb
-
-[imager_finalize]
-recipe = imager_finalize
-fillrootimagegroup_exec = %(lofarroot)s/bin/fillRootImageGroup
-
 [copier]
 recipe = copier
 mapfiles_dir = %(runtime_directory)s/%(job_name)s/mapfiles
@@ -106,19 +74,6 @@ sky_mapfile = %(runtime_directory)s/%(job_name)s/mapfiles/sky.mapfile
 data_mapfile = %(runtime_directory)s/%(job_name)s/mapfiles/bbs.mapfile
 nthreads = 8
 
-[selfcal_awimager]
-recipe = selfcal_awimager
-executable = %(lofarroot)s/bin/awimager
-
-[selfcal_bbs]
-recipe = selfcal_bbs
-bbs_executable = %(lofarroot)s/bin/bbs-reducer
-
-[selfcal_finalize]
-recipe = selfcal_finalize
-fillrootimagegroup_exec = %(lofarroot)s/bin/fillRootImageGroup
-msselect_executable = %(casaroot)s/bin/msselect
-
 # below are tasks for the generic pipeline
 [executable_args]
 recipe = executable_args
@@ -171,16 +126,6 @@ args_format=lofar
 outputkey=msout
 nthreads = 8
 
-[awimager]
-recipe = executable_args
-parsetasfile = True 
-executable = %(lofarroot)s/bin/awimager
-outputsuffixes = [.model, .model.corr, .residual, .residual.corr, .restored, .restored.corr, .psf]
-max_per_node = 1 
-args_format=lofar
-outputkey=image
-nthreads = 8
-
 [rficonsole]
 recipe = executable_args
 executable = %(lofarroot)s/bin/rficonsole
-- 
GitLab