diff --git a/.gitattributes b/.gitattributes
index d4f4e455aaf9f7a7b1dd1dce13106b54fadf42b9..8634b2ef641eb0032130300a990cc7782827707b 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -572,6 +572,54 @@ CEP/ParmDB/test/tsetupsourcedb.in_gds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms0.vds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms1.vds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms2.vds -text
+CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel -text
+CEP/Pipeline/docs/notes/2010-11-15-grid.rst -text
+CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst -text
+CEP/Pipeline/docs/pulsar_demo/intro.rst -text
+CEP/Pipeline/docs/sphinx/source/.static/lofar.ico -text
+CEP/Pipeline/docs/sphinx/source/author/index.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/index.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/todo.rst -text
+CEP/Pipeline/docs/sphinx/source/index.rst -text
+CEP/Pipeline/docs/sphinx/source/logo.jpg -text svneol=unset#image/jpeg
+CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst -text
+CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst -text
+CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg -text
+CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png -text svneol=unset#image/png
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst -text
+CEP/Pipeline/docs/sphinx/source/todo.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/index.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/index.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/running.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst -text
 CMake/FindAskapSoft.cmake -text
 CMake/FindJNI.cmake -text
 CMake/FindValgrind.cmake -text
diff --git a/CEP/Pipeline/deploy/fabfile.py b/CEP/Pipeline/deploy/fabfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9292ca727d362f8f277de0757dcb90e38eeba3c
--- /dev/null
+++ b/CEP/Pipeline/deploy/fabfile.py
@@ -0,0 +1,62 @@
+from fabric.api import env, hosts, run, put, get, require
+from lofarpipe.support.clusterdesc import ClusterDesc
+from lofarpipe.support.clusterdesc import get_compute_nodes, get_head_node
+import os.path
+
+from ConfigParser import SafeConfigParser as ConfigParser
+
+# Support function
+def _get_config(filename):
+    if not os.path.isfile(filename):
+        raise IOError, "Configuration file not found"
+    config = ConfigParser()
+    config.read(filename)
+    return config
+
+# These functions actually do the work
+def head_node(configfile='~/.pipeline.cfg'):
+    clusterdesc = ClusterDesc(
+        _get_config(
+            os.path.expanduser(configfile)
+        ).get('cluster', 'clusterdesc')
+    )
+    env.hosts = get_head_node(clusterdesc)
+
+def compute_nodes(configfile='~/.pipeline.cfg'):
+    clusterdesc = ClusterDesc(
+        _get_config(
+            os.path.expanduser(configfile)
+        ).get('cluster', 'clusterdesc')
+    )
+    env.hosts = get_compute_nodes(clusterdesc)
+
+def start_controller(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[head_node])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    controller_ppath = _get_config(configfile).get('deploy', 'controller_ppath')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipcontroller.sh %s start %s" % (script_path, controlpath, controller_ppath), shell=False)
+
+def stop_controller(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[head_node])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipcontroller.sh %s stop" % (script_path, controlpath), shell=False)
+
+def start_engine(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[compute_nodes])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    engine_ppath = _get_config(configfile).get('deploy', 'engine_ppath')
+    engine_lpath = _get_config(configfile).get('deploy', 'engine_lpath')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipengine.sh %s start %s %s" % (script_path, controlpath, engine_ppath, engine_lpath), shell=False)
+
+def stop_engine(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[compute_nodes])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipengine.sh %s stop" % (script_path, controlpath), shell=False)
diff --git a/CEP/Pipeline/deploy/ipcontroller.sh b/CEP/Pipeline/deploy/ipcontroller.sh
new file mode 100755
index 0000000000000000000000000000000000000000..2ef38f548e1f0d0dbf0e5cc73fe5420827d4fc4c
--- /dev/null
+++ b/CEP/Pipeline/deploy/ipcontroller.sh
@@ -0,0 +1,31 @@
+CONTROLPATH=$1
+export PYTHONPATH=$3
+
+mkdir -p $CONTROLPATH
+
+PIDFILE=$CONTROLPATH/ipc.pid
+
+case "$2" in
+  start) 
+         if [ ! -f $PIDFILE ]; then
+             /sbin/start-stop-daemon --start -b -m --pidfile $PIDFILE \
+               --exec /opt/pipeline/dependencies/bin/ipcontroller -- -xy \
+               --engine-furl-file=$CONTROLPATH/engine.furl \
+               --task-furl-file=$CONTROLPATH/task.furl \
+               --multiengine-furl-file=$CONTROLPATH/multiengine.furl \
+               --logfile=$CONTROLPATH/ipc.log
+         else
+            echo "ipcontroller already running on `hostname`"
+         fi
+         ;;
+  stop)
+         /sbin/start-stop-daemon --stop --pidfile $PIDFILE
+         rm $PIDFILE
+         ;;
+  *)
+         echo "Usage: $0 <controlpath> {start|stop}" >&2
+         exit 1
+         ;;
+esac
+
+exit 0
diff --git a/CEP/Pipeline/deploy/ipengine.sh b/CEP/Pipeline/deploy/ipengine.sh
new file mode 100755
index 0000000000000000000000000000000000000000..21889095d49b6ea800f774e27ed31bdbf659810e
--- /dev/null
+++ b/CEP/Pipeline/deploy/ipengine.sh
@@ -0,0 +1,69 @@
+CONTROLPATH=$1
+export PYTHONPATH=$3
+export LD_LIBRARY_PATH=$4
+FURL=$5
+NPROC=$6
+PIDPATH=$CONTROLPATH/engines/`hostname`
+
+mkdir -p $PIDPATH
+
+case "$2" in
+  start) 
+         if [ $FURL ]; then
+             FURLFILE=`mktemp`
+             TMPFURL=1
+             echo $FURL > $FURLFILE
+         else
+             FURLFILE=$CONTROLPATH/engine.furl
+         fi
+         if [ $NPROC ] && [ $NPROC -eq $NPROC ]; then
+            NPROC=$NPROC
+         else
+            NPROC=`cat /proc/cpuinfo | grep ^processor | wc -l`
+         fi
+         for PROC in `seq 1 $NPROC`
+            do
+                 PIDFILE=$PIDPATH/ipengine$PROC.pid
+                 if [ ! -f $PIDFILE ]; then
+                     /sbin/start-stop-daemon --start -b -m               \
+                       --pidfile $PIDFILE                                \
+                       --exec /opt/pipeline/dependencies/bin/ipengine -- \
+                       --furl-file=$FURLFILE --logfile=$PIDPATH/log
+                     start_time=`date +%s`
+                     while :
+                        do
+                            sleep 0.1
+                            PID=`cat $PIDFILE 2> /dev/null`
+                            grep "engine registration succeeded" $PIDPATH/log$PID.log > /dev/null 2>&1
+                            if [ $? -eq 0 ]; then
+                                break
+                            fi
+                            # Time out after 30 seconds
+                            end_time=`date +%s`
+                            if [ $(($end_time-$start_time)) -ge 30 ]; then
+                                break
+                            fi
+                        done
+#                     ps -p `cat $PIDFILE` > /dev/null || echo "ipengine failed to start on `hostname`"
+                else
+                    echo "ipengine already running on `hostname`"
+                fi
+            done
+         if [ $TMPFURL ]; then
+             rm $FURLFILE
+         fi
+         ;;
+  stop)
+         for PIDFILE in $PIDPATH/ipengine*.pid
+             do
+                 /sbin/start-stop-daemon --stop --pidfile $PIDFILE --oknodo
+                 rm $PIDFILE
+             done
+         ;;
+  *)
+         echo "Usage: $0 <controlpath> {start|stop}" >&2
+         exit 1
+         ;;
+esac
+
+exit 0
diff --git a/CEP/Pipeline/deploy/start_cluster.py b/CEP/Pipeline/deploy/start_cluster.py
new file mode 100755
index 0000000000000000000000000000000000000000..159abaf783365a617e9f6c83f110faa1bb54a302
--- /dev/null
+++ b/CEP/Pipeline/deploy/start_cluster.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+"""
+Start IPython cluster.
+"""
+
+import sys, logging, os
+from optparse import OptionParser
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofarpipe.support.clusterhandler import ClusterHandler
+
+parser = OptionParser()
+parser.add_option(
+    "--config", help="Pipeline configuration file", default="~/.pipeline.cfg"
+)
+parser.add_option(
+    "--num-engines", help="Number of engines per node", default=8
+)
+options, args = parser.parse_args()
+
+my_logger = logging.getLogger()
+stream_handler = logging.StreamHandler(sys.stdout)
+formatter = logging.Formatter(
+    "%(asctime)s %(levelname)-7s: %(message)s",
+    "%Y-%m-%d %H:%M:%S"
+)
+stream_handler.setFormatter(formatter)
+my_logger.addHandler(stream_handler)
+my_logger.setLevel(logging.DEBUG)
+
+config = ConfigParser()
+config.read(os.path.expanduser(options.config))
+
+clusterhandler = ClusterHandler(config)
+clusterhandler.start_cluster(options.num_engines)
+
diff --git a/CEP/Pipeline/deploy/stop_cluster.py b/CEP/Pipeline/deploy/stop_cluster.py
new file mode 100755
index 0000000000000000000000000000000000000000..35156167e67454f70739ffd7ec04a9e98f43c11d
--- /dev/null
+++ b/CEP/Pipeline/deploy/stop_cluster.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+
+"""
+Stop IPython cluster.
+"""
+
+import sys, logging, os
+from optparse import OptionParser
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofarpipe.support.clusterhandler import ClusterHandler
+
+parser = OptionParser()
+parser.add_option(
+    "--config", help="Pipeline configuration file", default="~/.pipeline.cfg"
+)
+options, args = parser.parse_args()
+
+my_logger = logging.getLogger()
+stream_handler = logging.StreamHandler(sys.stdout)
+formatter = logging.Formatter(
+    "%(asctime)s %(levelname)-7s: %(message)s",
+    "%Y-%m-%d %H:%M:%S"
+)
+stream_handler.setFormatter(formatter)
+my_logger.addHandler(stream_handler)
+my_logger.setLevel(logging.DEBUG)
+
+config = ConfigParser()
+config.read(os.path.expanduser(options.config))
+
+clusterhandler = ClusterHandler(config)
+clusterhandler.stop_cluster()
+
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..754dc685c7c3d44a3426afd9460168cd37a86516
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg
@@ -0,0 +1,13 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime
+recipe_directories = [/opt/pipeline/recipes/]
+lofarroot =  /opt/LofIm/daily/lofar
+default_working_directory = /data/scratch/swinbank
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+log_directory = %(job_directory)s/logs/%(start_time)s
+vds_directory = %(job_directory)s/vds
+parset_directory = %(job_directory)s/parsets
+results_directory = %(job_directory)s/results/%(start_time)s
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b667bd2c608d3886ea49b9d3d36969cbac3f1e4
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py
@@ -0,0 +1,12 @@
+"""
+This is a very simple pipeline definition for demonstration purposes only.
+"""
+import sys
+from lofarpipe.support.control import control
+
+class demo(control):
+    def pipeline_logic(self):
+        self.run_task("dummy_echo", self.inputs['args'])
+
+if __name__ == '__main__':
+    sys.exit(demo().main())
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg b/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..32dbceeed8b08358c21a38d24d8c2b801de7adba
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg
@@ -0,0 +1,3 @@
+[dummy_echo]
+recipe = dummy_echo
+
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..a788b8957376a1eb536bc773a9ed6ae551451c94
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg
@@ -0,0 +1,25 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime
+recipe_directories = [/opt/pipeline/recipes/]
+lofarroot =  /opt/LofIm/daily/lofar
+default_working_directory = /data/scratch/swinbank
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+log_directory = %(job_directory)s/logs/%(start_time)s
+vds_directory = %(job_directory)s/vds
+parset_directory = %(job_directory)s/parsets
+results_directory = %(job_directory)s/results/%(start_time)s
+
+[cluster]
+clustername = imaging
+clusterdesc = %(runtime_directory)s/sub3.clusterdesc
+task_furl = %(runtime_directory)s/task.furl
+multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+[deploy]
+script_path = /opt/pipeline/framework/bin
+controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages
+engine_lpath = /opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b667bd2c608d3886ea49b9d3d36969cbac3f1e4
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py
@@ -0,0 +1,12 @@
+"""
+This is a very simple pipeline definition for demonstration purposes only.
+"""
+import sys
+from lofarpipe.support.control import control
+
+class demo(control):
+    def pipeline_logic(self):
+        self.run_task("dummy_echo", self.inputs['args'])
+
+if __name__ == '__main__':
+    sys.exit(demo().main())
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg b/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1364e50e1526b38afc42649478b82e6053aeeb5f
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg
@@ -0,0 +1,3 @@
+[dummy_echo]
+recipe = dummy_echo_parallel
+
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset
new file mode 100644
index 0000000000000000000000000000000000000000..c1ced0e837483fa3ae96d7bc57306c86b9c883dd
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset
@@ -0,0 +1,24 @@
+Images.stokes = [I]
+Images.shape = [1024, 1024]
+Images.cellSize = [60, 60]
+Images.directionType = J2000
+Solver.type = Dirty
+Solver.algorithm = MultiScale
+Solver.niter = 1
+Solver.gain = 1.0
+Solver.verbose = True
+Solver.scales = [0, 3]
+Gridder.type = WProject
+Gridder.wmax = 1000
+Gridder.nwplanes = 40
+Gridder.oversample = 1
+Gridder.maxsupport = 4096
+Gridder.limitsupport = 0
+Gridder.cutoff = 0.001
+Gridder.nfacets = 1
+datacolumn = CORRECTED_DATA
+minUV = 1.0
+ncycles = 0
+restore = True
+restore_beam = [1, 1, 0]
+
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset
new file mode 100644
index 0000000000000000000000000000000000000000..3de6ef6b6acf69ca428f37520ab4d5a7aff4b94c
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset
@@ -0,0 +1,31 @@
+uselogger = True
+msin.startchan = 8
+msin.nchan = 240
+msin.datacolumn = DATA     # is the default
+msout.datacolumn = DATA    # is the default
+
+steps = [preflag,flag1,avg1,flag2,avg2,count]   # if defined as [] the MS will be copied and NaN/infinite will be  flagged
+
+
+preflag.type=preflagger    # This step will flag the autocorrelations. Note that they are not flagged by default by NDPPP
+preflag.corrtype=auto
+
+flag1.type=madflagger
+flag1.threshold=4
+flag1.freqwindow=31
+flag1.timewindow=5
+flag1.correlations=[0,3]   # only flag on XX and YY
+
+avg1.type = squash
+avg1.freqstep = 256        # it compresses the data by a factor of 256 in frequency
+avg1.timestep = 1          # is the default
+
+flag2.type=madflagger
+flag2.threshold=3
+flag2.timewindow=51
+
+avg2.type = squash
+avg2.timestep = 5          #it compresses the data by a factor of 5 in time
+
+count.type = counter
+count.showfullyflagged = True
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset
new file mode 100644
index 0000000000000000000000000000000000000000..c0176c4d6c0ae2acd2b47149618ec110c094e202
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset
@@ -0,0 +1,26 @@
+msin.startchan = 0
+msin.nchan = 1
+msin.datacolumn = CORRECTED_DATA
+msout.datacolumn = CORRECTED_DATA
+
+steps = [clip1,flag3,flag4,flag5,flag6]   # if defined as [] the MS will be copied and NaN/infinite will be  flagged
+
+clip1.type=preflagger
+clip1.amplmax=**inserted by pipeline**
+
+flag3.type=madflagger
+flag3.threshold=4
+flag3.timewindow=5
+flag3.correlations=[0,3]   # only flag on XX and YY
+
+flag4.type=madflagger
+flag4.threshold=3
+flag4.timewindow=25
+
+flag5.type=madflagger
+flag5.threshold=3
+flag5.timewindow=51
+
+flag6.type=madflagger
+flag6.threshold=3
+flag6.timewindow=71
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset
new file mode 100644
index 0000000000000000000000000000000000000000..bcdc9bf4b43c668e437b065508a4568466489cae
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset
@@ -0,0 +1,37 @@
+Strategy.ChunkSize = 0
+Strategy.Steps = [solve, correct, subtract]
+
+Step.solve.Operation = SOLVE
+Step.solve.Model.Sources = []
+Step.solve.Model.Gain.Enable = T
+Step.solve.Model.Cache.Enable = T
+Step.solve.Solve.Parms = ["Gain:0:0:*","Gain:1:1:*"]
+Step.solve.Solve.CellSize.Freq = 0
+Step.solve.Solve.CellSize.Time = 1
+Step.solve.Solve.CellChunkSize = 10
+Step.solve.Solve.Options.MaxIter = 20
+Step.solve.Solve.Options.EpsValue = 1e-9
+Step.solve.Solve.Options.EpsDerivative = 1e-9
+Step.solve.Solve.Options.ColFactor = 1e-9
+Step.solve.Solve.Options.LMFactor = 1.0
+Step.solve.Solve.Options.BalancedEqs = F
+Step.solve.Solve.Options.UseSVD = T
+Step.solve.Model.Beam.Enable = T
+Step.solve.Model.Beam.StationConfig.Name = LBA_OUTER
+Step.solve.Model.Beam.StationConfig.Path = /home/zwieten/StationConfig/
+Step.solve.Model.Beam.Element.Type = HAMAKER_LBA
+
+Step.correct.Operation = CORRECT
+Step.correct.Model.Sources = [**central_source**]
+Step.correct.Model.Gain.Enable = T
+Step.correct.Output.Column = CORRECTED_DATA
+Step.correct.Model.Beam.Enable = T
+Step.correct.Model.Beam.StationConfig.Name = LBA_OUTER
+Step.correct.Model.Beam.StationConfig.Path = /home/zwieten/StationConfig/
+Step.correct.Model.Beam.Element.Type = HAMAKER_LBA
+
+Step.subtract.Operation = SUBTRACT
+Step.subtract.Output.Column = SUBTRACTED_DATA
+Step.subtract.Model.Sources = [**central_source**]
+Step.subtract.Model.Gain.Enable = F
+Step.subtract.Model.Beam.Enable = F
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..2abb670c25b4cb47c412fa92d85631d3d6d61bbc
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg
@@ -0,0 +1,28 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime_full
+recipe_directories = [/opt/pipeline/recipes]
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+
+[cluster]
+clusterdesc = /home/swinbank/cdesc/full.clusterdesc
+task_furl = %(runtime_directory)s/task.furl
+multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+[deploy]
+script_path = /opt/pipeline/framework/bin
+controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages
+engine_lpath = /opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib
+
+[logging]
+log_file = %(runtime_directory)s/jobs/%(job_name)s/logs/%(start_time)s/pipeline.log
+format = %(asctime)s %(levelname)-7s %(name)s: %(message)s
+datefmt = %Y-%m-%d %H:%M:%S
+
+# Expert users only need venture here...
+#[remote]
+#method = paramiko
+#key_filename = /home/swinbank/.ssh/id_rsa.pipeline.pub
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/sip.py b/CEP/Pipeline/docs/examples/definition/sip2/sip.py
new file mode 100644
index 0000000000000000000000000000000000000000..d76f36b0340ca2714df6a2c50518e64ed9f2433f
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/sip.py
@@ -0,0 +1,177 @@
+"""
+This is a model imaging pipeline definition.
+
+Although it should be runnable as it stands, the user is encouraged to copy it
+to a job directory and customise it as appropriate for the particular task at
+hand.
+"""
+from __future__ import with_statement
+from contextlib import closing
+from itertools import repeat
+import sys
+import os
+
+from pyrap.quanta import quantity
+
+from lofarpipe.support.control import control
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.parset import patched_parset
+from lofarpipe.support.lofaringredient import ListField
+
+class sip(control):
+    outputs = {
+        'images': ListField()
+    }
+
+    def pipeline_logic(self):
+        from to_process import datafiles # datafiles is a list of MS paths.
+        with log_time(self.logger):
+            # Build a map of compute node <-> data location on storage nodes.
+            storage_mapfile = self.run_task(
+                "datamapper", datafiles
+            )['mapfile']
+
+            # Produce a GVDS file describing the data on the storage nodes.
+            self.run_task('vdsmaker', storage_mapfile)
+
+            # Read metadata (start, end times, pointing direction) from GVDS.
+            vdsinfo = self.run_task("vdsreader")
+
+            # NDPPP reads the data from the storage nodes, according to the
+            # map. It returns a new map, describing the location of data on
+            # the compute nodes.
+            ndppp_results = self.run_task(
+                "ndppp",
+                storage_mapfile,
+                parset=os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.initial.parset"
+                ),
+                data_start_time=vdsinfo['start_time'],
+                data_end_time=vdsinfo['end_time']
+            )
+
+            # Remove baselines which have been fully-flagged in any individual
+            # subband.
+            compute_mapfile = self.run_task(
+                "flag_baseline",
+                ndppp_results['mapfile'],
+                baselines=ndppp_results['fullyflagged']
+            )['mapfile']
+
+            # Build a sky model ready for BBS & return the name & flux of the
+            # central source.
+            ra = quantity(vdsinfo['pointing']['ra']).get_value('deg')
+            dec = quantity(vdsinfo['pointing']['dec']).get_value('deg')
+            central = self.run_task(
+                "skymodel", ra=ra, dec=dec, search_size=2.5
+            )
+
+            # Patch the name of the central source into the BBS parset for
+            # subtraction.
+            with patched_parset(
+                self.task_definitions.get("bbs", "parset"),
+                {
+                    'Step.correct.Model.Sources': "[ \"%s\" ]" % (central["source_name"]),
+                    'Step.subtract.Model.Sources': "[ \"%s\" ]" % (central["source_name"])
+                }
+            ) as bbs_parset:
+                # BBS modifies data in place, so the map produced by NDPPP
+                # remains valid.
+                self.run_task("bbs", compute_mapfile, parset=bbs_parset)
+
+            # Now, run DPPP three times on the output of BBS. We'll run
+            # this twice: once on CORRECTED_DATA, and once on
+            # SUBTRACTED_DATA. Clip anything at more than 5 times the flux of
+            # the central source.
+            with patched_parset(
+                os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.postbbs.parset"
+                ),
+                {
+                    "clip1.amplmax": str(5 * central["source_flux"])
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+            ) as corrected_ndppp_parset:
+                for i in repeat(None, 3):
+                    self.run_task(
+                        "ndppp",
+                        compute_mapfile,
+                        parset=corrected_ndppp_parset,
+                        suffix=""
+                    )
+
+            with patched_parset(
+                os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.postbbs.parset"
+                ),
+                {
+                    "msin.datacolumn": "SUBTRACTED_DATA",
+                    "msout.datacolumn": "SUBTRACTED_DATA",
+                    "clip1.amplmax": str(5 * central["source_flux"])
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+            ) as subtracted_ndppp_parset:
+                for i in repeat(None, 3):
+                    self.run_task(
+                        "ndppp",
+                        compute_mapfile,
+                        parset=subtracted_ndppp_parset,
+                        suffix=""
+                    )
+
+            # Image CORRECTED_DATA.
+            self.logger.info("Imaging CORRECTED_DATA")
+
+            # Patch the pointing direction recorded in the VDS file into
+            # the parset for the cimager.
+            with patched_parset(
+                self.task_definitions.get("cimager", "parset"),
+                {
+                    'Images.ra': quantity(vdsinfo['pointing']['ra']).formatted("time"),
+                    'Images.dec': quantity(vdsinfo['pointing']['dec']).formatted("angle")
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+
+            ) as imager_parset:
+                # And run cimager.
+                self.outputs['images'] = self.run_task(
+                    "cimager", compute_mapfile,
+                    parset=imager_parset,
+                    results_dir=os.path.join(
+                        self.config.get("layout", "results_directory"),
+                        "corrected"
+                    )
+                )['images']
+
+            # Image SUBTRACTED_DATA.
+            self.logger.info("Imaging SUBTRACTED_DATA")
+
+            # Patch the pointing direction recorded in the VDS file into
+            # the parset for the cimager, and change the column to be
+            # imaged.
+            with patched_parset(
+                self.task_definitions.get("cimager", "parset"),
+                {
+                    'Images.ra': quantity(vdsinfo['pointing']['ra']).formatted("time"),
+                    'Images.dec': quantity(vdsinfo['pointing']['dec']).formatted("angle"),
+                    'datacolumn': "SUBTRACTED_DATA"
+
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+
+            ) as subtracted_imager_parset:
+                # And run cimager.
+                self.outputs['images'] = self.run_task(
+                    "cimager", compute_mapfile,
+                    parset=subtracted_imager_parset,
+                    results_dir=os.path.join(
+                        self.config.get("layout", "results_directory"),
+                        "subtracted"
+                    )
+                )['images']
+
+if __name__ == '__main__':
+    sys.exit(sip().main())
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg b/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..0e652aa54aea42d031646eb8a9b4661e12f9b1d3
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg
@@ -0,0 +1,57 @@
+[datamapper]
+recipe = datamapper
+mapfile = %(runtime_directory)s/jobs/%(job_name)s/parsets/storage_mapfile
+
+[vdsmaker]
+recipe = new_vdsmaker
+directory = %(runtime_directory)s/jobs/%(job_name)s/vds
+gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/inputs.gvds
+makevds = %(lofarroot)s/bin/makevds
+combinevds = %(lofarroot)s/bin/combinevds
+
+[vdsreader]
+recipe = vdsreader
+gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/inputs.gvds
+
+[skymodel]
+recipe = skymodel
+min_flux = 0.5
+skymodel_file = %(runtime_directory)s/jobs/%(job_name)s/parsets/bbs.skymodel
+
+[ndppp]
+recipe = new_dppp
+executable = %(lofarroot)s/bin/NDPPP
+initscript = %(lofarroot)s/lofarinit.sh
+working_directory = /data/scratch/swinbank
+dry_run = False
+mapfile = %(runtime_directory)s/jobs/%(job_name)s/parsets/compute_mapfile
+
+[flag_baseline]
+recipe = flag_baseline
+
+[bbs]
+recipe = bbs
+initscript = %(lofarroot)s/lofarinit.sh
+control_exec = %(lofarroot)s/bin/GlobalControl
+kernel_exec = %(lofarroot)s/bin/KernelControl
+parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/uv-plane-cal-beam.parset
+key = bbs_%(job_name)s
+db_host = ldb001
+db_name = swinbank
+db_user = postgres
+
+[sourcedb]
+recipe = sourcedb
+executable = %(lofarroot)s/bin/makesourcedb
+skymodel = %(runtime_directory)s/jobs/%(job_name)s/parsets/bbs.skymodel
+
+[parmdb]
+recipe = parmdb
+executable = %(lofarroot)s/bin/parmdbm
+
+[cimager]
+recipe = cimager
+imager_exec = /opt/LofIm/daily/askapsoft/bin/cimager.sh
+convert_exec = /opt/LofIm/daily/lofar/bin/convertimagerparset
+parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/mwimager.parset
+results_dir = %(runtime_directory)s/jobs/%(job_name)s/results/%(start_time)s
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/to_process.py b/CEP/Pipeline/docs/examples/definition/sip2/to_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b9d758c6ebbe9f24ec94988139bb78701b5a7ff
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/to_process.py
@@ -0,0 +1,247 @@
+# Quick way of priming the pipeline with a list of datafiles to be processed.
+# Generate this file by, eg:
+#
+# $ obsid=L2010_08322; for i in `seq 0 7`; do ssh lce`printf %03d $((i*9+1))` ls /net/sub$((i+1))/lse*/data*/$obsid/* | grep SB >> to_process.py; done
+#
+# then tweak with your favourite text editor.
+
+datafiles = [
+'/net/sub1/lse001/data3/L2010_08567/SB0.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB10.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB11.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB12.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB13.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB14.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB15.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB16.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB1.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB2.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB3.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB4.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB5.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB6.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB7.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB8.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB9.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB17.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB18.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB19.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB20.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB21.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB22.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB23.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB24.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB25.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB26.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB27.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB28.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB29.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB30.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB31.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB32.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB33.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB34.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB35.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB36.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB37.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB38.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB39.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB40.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB41.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB42.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB43.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB44.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB45.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB46.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB47.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB48.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB49.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB50.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB51.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB52.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB53.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB54.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB55.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB56.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB57.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB58.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB59.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB60.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB61.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB62.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB63.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB64.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB65.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB66.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB67.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB68.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB69.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB70.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB71.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB72.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB73.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB74.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB75.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB76.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB77.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB78.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB79.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB80.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB81.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB82.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB83.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB84.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB100.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB101.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB85.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB86.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB87.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB88.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB89.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB90.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB91.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB92.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB93.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB94.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB95.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB96.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB97.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB98.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB99.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB102.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB103.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB104.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB105.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB106.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB107.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB108.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB109.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB110.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB111.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB112.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB113.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB114.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB115.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB116.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB117.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB118.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB119.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB120.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB121.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB122.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB123.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB124.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB125.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB126.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB127.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB128.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB129.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB130.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB131.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB132.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB133.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB134.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB135.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB136.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB137.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB138.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB139.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB140.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB141.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB142.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB143.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB144.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB145.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB146.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB147.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB148.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB149.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB150.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB151.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB152.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB153.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB154.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB155.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB156.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB157.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB158.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB159.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB160.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB161.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB162.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB163.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB164.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB165.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB166.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB167.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB168.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB169.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB170.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB171.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB172.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB173.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB174.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB175.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB176.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB177.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB178.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB179.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB180.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB181.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB182.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB183.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB184.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB185.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB186.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB187.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB188.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB189.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB190.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB191.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB192.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB193.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB194.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB195.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB196.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB197.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB198.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB199.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB200.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB201.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB202.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB203.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB204.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB205.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB206.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB207.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB208.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB209.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB210.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB211.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB212.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB213.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB214.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB215.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB216.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB217.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB218.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB219.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB220.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB221.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB222.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB223.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB224.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB225.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB226.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB227.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB228.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB229.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB230.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB231.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB232.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB233.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB234.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB235.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB236.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB237.MS',
+]
diff --git a/CEP/Pipeline/docs/examples/model_parsets/README b/CEP/Pipeline/docs/examples/model_parsets/README
new file mode 100644
index 0000000000000000000000000000000000000000..31f91bfeda8ebd7ea0bdd74510bf19290bb6883b
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/README
@@ -0,0 +1,4 @@
+Model parsets for use in the standard imaging pipeline.
+
+These are collected here for reference only; this is not their permanent home.
+They will probably need to be customised to the particular job in question.
diff --git a/CEP/Pipeline/docs/examples/model_parsets/bbs.parset b/CEP/Pipeline/docs/examples/model_parsets/bbs.parset
new file mode 100644
index 0000000000000000000000000000000000000000..617e99538672eef2d79c681baf6d4368cdbe2803
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/bbs.parset
@@ -0,0 +1,49 @@
+Strategy.Stations = []
+Strategy.InputColumn = DATA
+Strategy.TimeWindow = []
+Strategy.ChunkSize = 2500
+Strategy.UseSolver = F
+Strategy.Correlation.Selection = CROSS
+Strategy.Correlation.Type = []
+Strategy.Steps = [solve, subtract, correct]
+
+Step.solve.Baselines.Station1 = []
+Step.solve.Baselines.Station2 = []
+Step.solve.Model.Sources      = [3C196]
+Step.solve.Model.Gain.Enable  = T
+Step.solve.Correlation.Selection = CROSS
+Step.solve.Correlation.Type = []
+Step.solve.Operation        = SOLVE
+Step.solve.Output.Column    =
+Step.solve.Solve.Parms           = ["Gain:0:0:*", "Gain:1:1:*"]
+Step.solve.Solve.ExclParms       = []
+Step.solve.Solve.CalibrationGroups = []
+Step.solve.Solve.CellSize.Freq          = 0
+Step.solve.Solve.CellSize.Time          = 1
+Step.solve.Solve.CellChunkSize          = 25
+Step.solve.Solve.PropagateSolutions     = F
+Step.solve.Solve.Options.MaxIter        = 20
+Step.solve.Solve.Options.EpsValue       = 1e-9
+Step.solve.Solve.Options.EpsDerivative  = 1e-9
+Step.solve.Solve.Options.ColFactor      = 1e-9
+Step.solve.Solve.Options.LMFactor       = 1.0
+Step.solve.Solve.Options.BalancedEqs    = F
+Step.solve.Solve.Options.UseSVD         = T
+
+Step.subtract.Baselines.Station1 = []
+Step.subtract.Baselines.Station2 = []
+Step.subtract.Model.Sources      = [3C196]
+Step.subtract.Model.Gain.Enable  = T
+Step.subtract.Correlation.Selection = CROSS
+Step.subtract.Correlation.Type = []
+Step.subtract.Operation      = SUBTRACT
+Step.subtract.Output.Column  =
+
+Step.correct.Baselines.Station1 = []
+Step.correct.Baselines.Station2 = []
+Step.correct.Model.Sources      = []
+Step.correct.Model.Gain.Enable  = T
+Step.correct.Correlation.Selection = CROSS
+Step.correct.Correlation.Type = []
+Step.correct.Operation      = CORRECT
+Step.correct.Output.Column  = CORRECTED_DATA
diff --git a/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel b/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel
new file mode 100644
index 0000000000000000000000000000000000000000..a8fb59355736c5ebec0298c7b1c6f57996f18567
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel
@@ -0,0 +1,2 @@
+# (Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='55.468e6', SpectralIndexDegree='0', SpectralIndex:0='0.0', SpectralIndex:1='0.0', Major, Minor, Phi) = format
+3C196, POINT, 08:13:36.062300,  +48.13.02.24900, 1.0
diff --git a/CEP/Pipeline/docs/examples/model_parsets/casapy.parset b/CEP/Pipeline/docs/examples/model_parsets/casapy.parset
new file mode 100644
index 0000000000000000000000000000000000000000..53794179c272067f7428fd8ac491fdd8a6ef599f
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/casapy.parset
@@ -0,0 +1,29 @@
+Images.stokes = [I] 
+Images.shape = [1024, 1024]
+Images.cellSize = [10, 10]
+Images.ra = 08h13m36.06 
+Images.dec = 48.13.02.25 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = Hogbom
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject
+Gridder.wmax = 50000
+Gridder.nwplanes = 5
+Gridder.oversample = 1
+Gridder.padding = 1
+Gridder.maxsupport = 1024
+Gridder.limitsupport = 0
+Gridder.cutoff = 0.001 
+Images.nfacets = 1
+Selection.select = True
+Selection.antenna = '*&*'
+weighting = natural
+datacolumn = CORRECTED_DATA
+minUV = 1.0 
+ncycles = 0 
+restore = True
+restore_beam = [10arcsec, 10arcsec, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset
new file mode 100644
index 0000000000000000000000000000000000000000..96e4a2897b470ea2d8d31245bca3c9b34e350c08
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 31
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 1
+start = 8
+step = 240
+threshold = 3
+timewindow = 5
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset
new file mode 100644
index 0000000000000000000000000000000000000000..db13757749437f95b60eca8f26e70618ac88b467
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 3
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset
new file mode 100644
index 0000000000000000000000000000000000000000..678899a071f02afb3dad4f93c2e4a213d48e220c
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 4
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset
new file mode 100644
index 0000000000000000000000000000000000000000..678899a071f02afb3dad4f93c2e4a213d48e220c
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 4
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset b/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset
new file mode 100644
index 0000000000000000000000000000000000000000..760cf772696f9daa8518cb9b56f929d5db904441
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset
@@ -0,0 +1,25 @@
+Images.stokes = [I] 
+Images.shape = [2048, 2048] 
+Images.cellSize = [10, 10] 
+Images.ra = 08h13m36.06 
+Images.dec = 48.13.02.25 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = MultiScale 
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject 
+Gridder.wmax = 66521
+Gridder.nwplanes = 257 
+Gridder.oversample = 1 
+Gridder.maxsupport = 4096
+Gridder.limitsupport = 4096
+Gridder.cutoff = 0.001 
+Gridder.nfacets = 1 
+datacolumn = CORRECTED_DATA
+minUV = 1.0 
+ncycles = 0 
+restore = False 
+restore_beam = [1, 1, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset b/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset
new file mode 100644
index 0000000000000000000000000000000000000000..45a51e8290d69fac14c4b9e5bb4dad2da568d03d
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset
@@ -0,0 +1,25 @@
+Images.stokes = [I] 
+Images.shape = [2048, 2048] 
+Images.cellSize = [10, 10] 
+Images.ra = 03h32m59.37 
+Images.dec = 54.34.43.57 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = MultiScale 
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject 
+Gridder.wmax = 10000
+Gridder.nwplanes = 257 
+Gridder.oversample = 1 
+Gridder.maxsupport = 400
+Gridder.limitsupport = 400
+Gridder.cutoff = 0.001 
+Gridder.nfacets = 1 
+datacolumn = DATA
+minUV = 1.0 
+ncycles = 0 
+restore = True
+restore_beam = [0.003, 0.003, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset b/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset
new file mode 100644
index 0000000000000000000000000000000000000000..ce2559f3dd8e2b89095da675b6268a1aca090c03
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset
@@ -0,0 +1,27 @@
+msin.startchan = 8
+msin.nchan = 240
+msin.datacolumn = DATA
+msout.datacolumn = DATA
+steps = [flag1,flag2,avg1,flag3]
+# Squashing pass to average all channels into one
+avg1.type = squash
+avg1.freqstep = 240
+avg1.timestep = 1
+# Flagging pass 1 (on unsquashed data with medium time window, XX & YY only)
+flag1.type=madflagger
+flag1.threshold=2
+flag1.freqwindow=101
+flag1.timewindow=1
+flag1.correlations=[0,3]
+# Flagging pass 2 (on unsquashed data with medium freq window, XX & YY only)
+flag2.type=madflagger
+flag2.threshold=2
+flag2.freqwindow=1
+flag2.timewindow=101
+flag2.correlations=[0,3]
+# Flagging pass 3 (on squashed data with wide time window, all corr)
+flag3.type=madflagger
+flag3.threshold=3
+flag3.freqwindow=1
+flag3.timewindow=901
+flag3.correlations=[0,1,2,3]
diff --git a/CEP/Pipeline/docs/notes/2010-11-15-grid.rst b/CEP/Pipeline/docs/notes/2010-11-15-grid.rst
new file mode 100644
index 0000000000000000000000000000000000000000..31e9a4b4f3dc98bd7f387df393724754fdfe45a4
--- /dev/null
+++ b/CEP/Pipeline/docs/notes/2010-11-15-grid.rst
@@ -0,0 +1,140 @@
+==================
+LOFAR Grid Meeting
+==================
+----------------
+15 November 2010
+----------------
+
+Introduction to LOFAR Pipeline Software
+---------------------------------------
+
+- What is a pipeline?
+
+  - Shepherds a given dataset through an analysis process.
+  - Pipelines are not necessarily well defined. For instance, we're still
+    working on figuring out exactly what the "standard imaging pipeline"
+    should do -- when should it flag? How much should the data be compressed?,
+    and so on. Therefore, a pipeline is defined by stringing together a series
+    of standard processing blocks (or "recipes"), togther with some
+    configuration information.
+
+- The LOFAR framework
+
+  - Written in pure Python.
+  - Structured as a series of "recipes", each one of which performs a given
+    processing step. For example, one recipe might run NDPPP (flagging &
+    compression of the data), another might run BBS (calibration).
+  - Recipes can call other recipes as part of their processing run.
+  - The pipeline itself is just defined as another recipe, and calls other
+    recipes as required.
+
+- Recipes
+
+  - Must be able to handle running arbitrary tasks.
+  - For example, some recipes wrap external code (like NDPPP), which has no
+    programmatic interface: the recipe can simply set up & spawn the process,
+    then wait to collect its return code.
+  - Other recipes can perform processing in pure Python: for example, they
+    might manipulate a dataset using pyrap.
+  - This means there is no "standard" way a recipe must be structured; this is
+    up to the programmer defining it.
+  - The framework provides many helpers and shortcuts for common functionality
+    required in defining a recipe, though.
+
+- Parallelisation
+
+  - In general, the pipeline tasks are embarassingly parallel: we are
+    processing hundreds of independent subbands at the same time. Therefore,
+    the pipeline simply distributes independent jobs to the compute nodes and
+    collects the results.
+  - Previous versions of the framework used IPython to do this. This is still
+    available, but its use is not now encouraged.
+  - The pipeline provides its own simple job dispatch system:
+
+    - A recipe on the head node is associated with a "node script", again in
+      pure Python, which runs on the compute nodes. The node script may take a
+      number of arguments (usually, for eg, the name of the data file to
+      processes).
+    - The recipe can use SSH or mpirun to start the node scripts in parallel
+      across all compute nodes, handing each one the appropriate parameters.
+    - Adding a different dispatch mechanism is easy, providing it supports
+      similar semantics (ie, start a command and wait for it to finish).
+    - There are mechanisms to schedule somewhat intelligently, eg limiting the
+      number of simultaneous jobs which are started per node.
+    - After scheduling a bunch of jobs on the compute nodes, the pipeline will
+      then wait for them to complete. Success is signalled via exit status of
+      the job.
+
+  - Cluster layout
+
+    - The layout of the LOFAR cluster is described by means of a "clusterdesc"
+      file.
+    - The pipeline can read this clusterdesc file to obtain the name of the
+      cluster head node and the compute nodes which are available for it to send
+      jobs to.
+
+  - "Mapping" of data
+
+    - Not all data on the LOFAR storage nodes can be accessed by each compute
+      node. Instead, the compute nodes in a given subcluster can only access
+      the storage nodes in that cluster.
+    - In general, the imaging pipeline is started by simply passing it a list
+      of paths to the data to be processed.
+    - It is possible to work out from the path to a storage node which
+      subcluster it is a part of, and hence which compute nodes can access the
+      data.
+    - Processing is scheduled on compute nodes which can access the data in a
+      round-robin fashion.
+    - This understanding of paths is (obviously) fragile, but the LOFAR system
+      provides little other useful metadata to work from. 
+    - The "mapper" task which decides which compute node should process each
+      input dataset is easily extendable/replacable to meet other
+      requirements.
+
+- Logging
+
+  - Logging is performed using the standard Python logging module.
+  - Output is configurable as part of the pipeline configuration file. In
+    general, logging is set to a disk file and stdout on the head node.
+  - As might be expected, you can specify the verbosity etc.
+  - The compute nodes log to the head node using TCP connections (ie, using
+    Python's logging.SocketHandler). The head node then adds any data they
+    send to its own logging stream.
+  - There is a built-in mechanism to scan the logging stream for arbitrary
+    regular expressions, and thereby flag potential problems etc.
+  - There is (currently) no other channel by which the compute nodes send
+    feedback (beyond their exit status) to the head node (but I hope to
+    implement a simple mechanism based on passing pickled Python objects over
+    a TCP stream shortly).
+
+- Results
+
+  - Broadly speaking, the imaging pipeline picks data up from the storage
+    nodes and processes it, writing results onto scratch disks on the compute
+    nodes.
+  - Intermediate data products are kept on the compute nodes -- for example,
+    output from NDPPP will be processed by BBS and the imager on the same
+    node, thus minimising data transport.
+  - The resulting images will usually be copied to a shared storage location
+    for collection/archiving.
+  - If required, a copying step for intermediate data products could also be
+    added.
+
+- Dependencies
+
+  - The system makes extensive use of the Python standard library
+  - The only non-core Python component required for basic operation is the
+    lofar.parameterset Python module, available from LOFARSOFT.
+  - Obviously, individual recipes may have more demanding requirements, either
+    because they need to call external programs or they require libraries like
+    pyrap for their own processing.
+
+- Documentation and further information
+
+  - Available from USG Subversion:
+    http://usg.lofar.org/svn/code/trunk/src/pipeline/.
+  - Uses a standard, distutils-based installation.
+  - Documentation (in Sphinx format, and still a work in progress) is also
+    available from that location.
+  - An online snapshot of the documentation is at
+    http://urchin.earth.li/~jds/pipeline/.
diff --git a/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst b/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9366750f2bbd7739c2337b42a2399b13d07a1553
--- /dev/null
+++ b/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst
@@ -0,0 +1,100 @@
+==================
+Pipeline Framework
+==================
+Points for Discussion
+=====================
+8 December 2010
+===============
+
+Job management
+--------------
+
+- Each pipeline "job" (ie, a combination of pipeline definition & a
+  particular dataset) is given a job identifier (a free form string, but
+  generally based on the obsid)
+- Each job may be run multiple times; logs, results, etc are filed by a
+  combination of job identifier and pipeline start time
+
+Configuration
+-------------
+
+- pipeline.cfg file (in Python ConfigParser format) is used to configure the
+  pipeline system.
+- Includes things like where to search for recipes, what log format to use,
+  etc.
+
+Recipes & Tasks
+---------------
+
+- Continue to use "recipes" to describe individual components, as in Cuisine
+  (indeed, ultimately derives from Cuisine's WSRTrecipe).
+- We add the concept of a "task", which is a short-cut to running a
+  recipe with a particular set of parameters; access via self.run_task()
+  method in recipe.
+- Tasks are defined through another configuration file (tasks.cfg).
+- Entirely optional: everything can be done the "old-fashioned" way of
+  specifying inputs directly in the recipe. But tasks can make the recipe
+  code cleaner, and help separate configuration details from code.
+
+Inputs, outputs, type checking
+------------------------------
+
+- Input and output dicts for recipes will be checked for completeness and
+  data types.
+- This enables the pipeline to check inputs for correctness before running a
+  recipe, rather than failing part through.
+- A recipe must declare any inputs it expects as part of its definition.
+- Acceptable types are based on simple classes. Basics (strings, ints,
+  floats...) are already defined as part of the framework; the recipe author
+  is encouraged to specify (eg "this must be a float with value between 0.3
+  and 1.5").
+- Default and/or optional inputs can also be declared.
+- Uniform parser and the same type-checking is applied reading inputs from 
+  command line options or from the tasks definitions.
+
+Distribution
+------------
+
+- The pipeline includes its own distribution system, whereby recipes (running
+  on the head node) can call node scripts (running on other machines).
+- Node scripts are dispatched using either SSH or mpirun (depending on
+  pipeline.cfg; SSH by default).
+- Pickleable Python objects can be sent to and retrieved from the nodes, so
+  complex configuration data or results can be exchanged.
+- The pipeline can detect a failure on one node, and shut down the rest of the
+  system if required.
+- The recipes for rficonsole, NDPPP, BBS and cimager all use this system.
+  None of the current recipes use other systems (eg "startdistproc").
+- Helper functions make this system very easy for independent tasks (eg
+  running multiple instances of rficonsole, NDPPP). For more involved
+  workflows (eg BBS, where KernelControl and GlobalContol must be run
+  simultaneously), a more elaborate recipe is required.
+
+Parsets and tool configuration
+------------------------------
+
+- Most imaging pipeline tasks are configured by means of a parset.
+- Typically, the pipeline will take a template parset missing names of eg
+  input and output files, and fill in the blanks for each file to be
+  processed.
+- There are helper routines to do this in the pipeline framework
+  (utilities.patch_parset() et al).
+
+Logging
+-------
+
+- Pervasive use of Python logging module throughout.
+- Every recipe has a logger (self.logger), which can be used directly and
+  passed to (most) pipeline functions.
+- Logger is also made available on compute nodes when running in a
+  distributed way.
+
+Users
+-----
+
+- There is currently not one "standard" imaging pipeline definition. The
+  system is designed to be modular enough that processing chains can be
+  quickly in response to requests from commissioners.
+- As well as the imaging pipeline, the Pulsar and Transients groups have also
+  defined and are actively using pipelines in this framework.
+- The Pulsar pipeline is still dependent on the IPython system.
diff --git a/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py b/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py
new file mode 100644
index 0000000000000000000000000000000000000000..4208af8abafaa486b252aef917bebea7030993f1
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py
@@ -0,0 +1,26 @@
+import glob
+import subprocess
+import os
+
+def run(file_pattern, input_dir, output_file, clobber):
+    # Returns 0 for success, 1 for faliure
+
+    # Sanity checking checking
+    if not os.path.exists(input_dir):
+        return 1
+    if os.path.exists(output_file):
+        if clobber:
+            os.unlink(output_file)
+        else:
+            return 1
+
+    # Build list of input files
+    input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+    try:
+        # Run "montage" command
+        subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+    except Exception, e:
+        return 1
+
+    return 0
diff --git a/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py b/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py
new file mode 100644
index 0000000000000000000000000000000000000000..42a8f248f3fe120c7b585f66dd404a16f60f6f48
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py
@@ -0,0 +1,34 @@
+import sys
+import subprocess
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, file_pattern, input_dir, output_file, clobber):
+        # Returns 0 for success, 1 for faliure
+
+        # Sanity checking checking
+        if not os.path.exists(input_dir):
+            return 1
+        if os.path.exists(output_file):
+            if clobber:
+                os.unlink(output_file)
+            else:
+                return 1
+
+        # Build list of input files
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        try:
+            # Run "montage" command
+            subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+        except Exception, e:
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/3-logging.py b/CEP/Pipeline/docs/pulsar_demo/3-logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f30d7717f4fc6626f6805f53230cc690a60fb61
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/3-logging.py
@@ -0,0 +1,47 @@
+import sys
+import subprocess
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, file_pattern, input_dir, output_file, clobber):
+        if not os.path.exists(input_dir):
+            self.logger.error("Input directory (%s) not found" % input_dir)
+            return 1
+
+        self.logger.info("Processing %s" % input_dir)
+
+        if os.path.exists(output_file):
+            if clobber:
+                self.logger.warn(
+                    "Deleting previous version of results: %s" % output_file
+                )
+                os.unlink(output_file)
+            else:
+                self.logger.error(
+                    "Refusing to overwrite existing file %s" % output_file
+                )
+                return 1
+
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        try:
+            # Run "montage" command
+            subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+        except Exception, e:
+            self.logger.error(str(e))
+            return 1
+
+        if not os.path.exists(output_file):
+            self.logger.error(
+                "Output file %s not created by montage exectuable" % output_file
+            )
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/4-helpers.py b/CEP/Pipeline/docs/pulsar_demo/4-helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..73a9684e12932c9ef8151224109baafb25ebafc8
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/4-helpers.py
@@ -0,0 +1,54 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                              "Thumbnail combine" pipeline demo
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import catch_segfaults
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, executable, file_pattern, input_dir, output_file, clobber):
+        if not os.path.exists(input_dir):
+            self.logger.error("Input directory (%s) not found" % input_dir)
+            return 1
+
+        self.logger.info("Processing %s" % input_dir)
+
+        if os.path.exists(output_file):
+            if clobber:
+                self.logger.warn(
+                    "Deleting previous version of results: %s" % output_file
+                )
+                os.unlink(output_file)
+            else:
+                self.logger.error(
+                    "Refusing to overwrite existing file %s" % output_file
+                )
+                return 1
+
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        command_line = [executable] + input_files + [output_file]
+        try:
+            catch_segfaults(command_line, None, None, self.logger)
+        except Exception, e:
+            self.logger.error(str(e))
+            return 1
+
+        if not os.path.exists(output_file):
+            self.logger.error(
+                "Output file %s not created by montage exectuable" % output_file
+            )
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/5-recipe.py b/CEP/Pipeline/docs/pulsar_demo/5-recipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..17099c22a239396fa3d34ccf17c32cea452bf4db
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/5-recipe.py
@@ -0,0 +1,12 @@
+import sys
+from lofarpipe.support.baserecipe import BaseRecipe
+
+class thumbnail_combine(BaseRecipe):
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+        self.logger.info("This recipe does nothing")
+
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py b/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eec1929ebcea3686894ea6b7a5ad3476d12fadd
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py
@@ -0,0 +1,52 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                              "Thumbnail combine" pipeline demo
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        # Hosts on which to execute
+        hosts = ['lce019']
+
+        # Path to node script
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+
+        # Build a list of jobs
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        "/usr/bin/montage",     # executable
+                        "*.th.png",             # file_pattern
+                        "/path/to/png/files",   # input_dir
+                        "/path/to/output.png",  # output_dir
+                        True                    # clobber
+                    ]
+                )
+            )
+
+        # And run them
+        self._schedule_jobs(jobs)
+
+        # The error flag is set if a job failed
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py b/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e834b285aed5f864f445c0eae7e81bad21c22a
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py
@@ -0,0 +1,68 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        ),
+        'target_hosts': ingredient.ListField(
+            '--target-hosts',
+            help="Remote hosts on which to execute"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        hosts = self.inputs['target_hosts']
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py b/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py
new file mode 100644
index 0000000000000000000000000000000000000000..96ca8f497e8997aa4e4d4c90ee9afd9be71d34f4
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py
@@ -0,0 +1,78 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class HostNameList(ingredient.ListField):
+    @classmethod
+    def is_valid(value):
+        import socket
+        for hostname in value:
+            try:
+                socket.gethostbyname(hostname)
+            except:
+                return False
+        return True
+        
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        ),
+        'target_hosts': ingredient.HostNameList(
+            '--target-hosts',
+            help="Remote hosts on which to execute"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in self.inputs['target_hosts']:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/9-config.py b/CEP/Pipeline/docs/pulsar_demo/9-config.py
new file mode 100644
index 0000000000000000000000000000000000000000..df215fd19caa0b90467d78ab8d685dd024975426
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/9-config.py
@@ -0,0 +1,68 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        hosts = get_compute_nodes(
+            ClusterDesc(self.config.get('cluster', "clusterdesc"))
+        )
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/intro.rst b/CEP/Pipeline/docs/pulsar_demo/intro.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cb55aae30c0c20f3a7caf99b1b2b316894addecc
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/intro.rst
@@ -0,0 +1,113 @@
+=======================
+Pipeline Framework Demo
+=======================
+17 December 2010
+================
+
+What is a pipeline?
+-------------------
+
+- A standardised way of interacting with the rest of LOFAR; logging, MAC/SAS
+  integration, etc.
+- Should make the developer's job *easier* (after an initial learning curve),
+  by providing a bunch of helper routines.
+- Provides sanity checking of inputs, outputs, etc.
+- Does not dictate exactly how you write your code: designed to enable the
+  developer as much freedom as necessary to get the job done. We have to
+  interface with a wide variety of external executables, scripts, Python
+  modules, ...
+
+Basic concepts
+--------------
+
+- Framework is pure Python, with minimal dependencies on external libraries.
+- Originated in the WSRT "Cuisine" framework, although almost no Cuisine code
+  is now actually used.
+- Also hardly used is IPython (although it's very hard to convince people it's
+  not the "IPython framework"!).
+- An inidividual "pipeline component" is wrapped in *recipe*: a Python
+  script which describes how to carry out that tasks.
+- Recipes take *inputs* and produce *outputs*. We do type & sanity checking
+  on the inputs and outputs.
+- Inputs will include some indication of the data to process (eg, a series
+  of filenames) and any configuration parameters needed (eg, tool
+  configuration parsets).
+- A *task* is a recipe + a set of configuration parameters. Separate
+  configuration from code; provide a shortcut for common processing jobs.
+- Recipes can be nested: for example, in the imaging pipeline, the *bbs*
+  recipe calls the *vdsmaker*, *parmdb* and *sourcedb* recipes as part of
+  its run.
+- A *pipeline* is just a recipe which calls a series of subsidiary recipes
+  in order. (Often with a little extra boilerplate to receive messages from
+  MAC/SAS etc.)
+
+Job management
+--------------
+
+- Each pipeline "job" (ie, a combination of pipeline definition & a
+  particular dataset) is given a job identifier (a free form string, but
+  generally based on the obsid).
+- Each job may be run multiple times; logs, results, etc are filed by a
+  combination of job identifier and pipeline start time
+
+Configuration
+-------------
+
+- pipeline.cfg file (in Python ConfigParser format) is used to configure the
+  pipeline system.
+- Includes things like where to search for recipes, what log format to use,
+  etc.
+
+Inputs, outputs, type checking
+------------------------------
+
+- Input and output dicts for recipes will be checked for completeness and
+  data types.
+- This enables the pipeline to check inputs for correctness before running a
+  recipe, rather than failing part through.
+- A recipe must declare any inputs it expects as part of its definition.
+- Acceptable types are based on simple classes. Basics (strings, ints,
+  floats...) are already defined as part of the framework; the recipe author
+  is encouraged to specify (eg "this must be a float with value between 0.3
+  and 1.5").
+- Default and/or optional inputs can also be declared.
+- Uniform parser and the same type-checking is applied reading inputs from
+  command line options or from the tasks definitions.
+
+Distribution
+------------
+
+- The pipeline includes its own distribution system, whereby recipes (running
+  on the head node) can call node scripts (running on other machines).
+- Node scripts are dispatched using either SSH or mpirun (depending on
+  pipeline.cfg; SSH by default).
+- Pickleable Python objects can be sent to and retrieved from the nodes, so
+  complex configuration data or results can be exchanged.
+- The pipeline can detect a failure on one node, and shut down the rest of the
+  system if required.
+- The recipes for rficonsole, NDPPP, BBS and cimager all use this system.
+  None of the current recipes use other systems (eg "startdistproc").
+- Helper functions make this system very easy for independent tasks (eg
+  running multiple instances of rficonsole, NDPPP). For more involved
+  workflows (eg BBS, where KernelControl and GlobalContol must be run
+  simultaneously), a more elaborate recipe is required.
+
+Parsets and tool configuration
+------------------------------
+
+- Most imaging pipeline tasks are configured by means of a parset.
+- Typically, the pipeline will take a template parset missing names of eg
+  input and output files, and fill in the blanks for each file to be
+  processed.
+- There are helper routines to do this in the pipeline framework
+  (utilities.patch_parset() et al).
+
+Logging
+-------
+
+- Pervasive use of Python logging module throughout.
+- Every recipe has a logger (self.logger), which can be used directly and
+  passed to (most) pipeline functions.
+- Logger is also made available on compute nodes when running in a
+  distributed way.
+- Basic integration with log4cplus/log4cxx as used by the LofarLogger.
diff --git a/CEP/Pipeline/docs/sphinx/Makefile b/CEP/Pipeline/docs/sphinx/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..39fe377f91cb704bde281923c86e2defdb63f095
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/Makefile
@@ -0,0 +1,75 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview over all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+
+clean:
+	-rm -rf build/*
+
+html:
+	mkdir -p build/html build/doctrees
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+pickle:
+	mkdir -p build/pickle build/doctrees
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+web: pickle
+
+json:
+	mkdir -p build/json build/doctrees
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	mkdir -p build/htmlhelp build/doctrees
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+latex:
+	mkdir -p build/latex build/doctrees
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	mkdir -p build/changes build/doctrees
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	mkdir -p build/linkcheck build/doctrees
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
diff --git a/CEP/Pipeline/docs/sphinx/source/.static/lofar.ico b/CEP/Pipeline/docs/sphinx/source/.static/lofar.ico
new file mode 100644
index 0000000000000000000000000000000000000000..7e31090573496e9def6a629f019d4c88f3dfcca2
Binary files /dev/null and b/CEP/Pipeline/docs/sphinx/source/.static/lofar.ico differ
diff --git a/CEP/Pipeline/docs/sphinx/source/author/index.rst b/CEP/Pipeline/docs/sphinx/source/author/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..24efb5d2337ecb2cff78cab53b3ee7ae177bd897
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/author/index.rst
@@ -0,0 +1,731 @@
+**********************
+Recipe design tutorial
+**********************
+
+The pipeline framework provides a simple system for dispatching
+compute-intensive jobs to remote hosts. This includes sending and
+receiving complex data as part of the job. This section provides a tutorial
+introduction to writing a recipe which takes advantage of this capability.
+
+Problem specification
+=====================
+
+This tutorial addresses a simple real-world use case. This example was
+suggested by Anastasia Alexov, and addresses a requirement of the LOFAR Pulsar
+Pipeline.
+
+The pulsar pipeline runs across multiple compute nodes, generating a series of
+thumbnail plots on the local storage of each one -- that is, the plots are
+only accessible by a process running on the compute node, and are not exported
+via NFS or similar. The aim is to combine all the thumbnails on a given host
+into a single image, using the ``montage`` command provided by `ImageMagick
+<http://www.imagemagick.org/>`_. It is assumed that the thumbnails reside in
+the same path on each node.
+
+An initial implementation of the code which runs on each node was provided as
+a ``ksh`` script.
+
+.. code-block:: ksh
+   :linenos:
+
+   #!/bin/ksh
+
+   #find all the th.png files and convert them into a list to paste together using "montage".
+   
+   if [ -f combined.th.png ]
+   then
+      echo "WARNING: deleting previous version of results: combined.th.png"
+      rm combined.th.png
+   fi
+   
+   find ./ -name "*.th.png" -print  > /tmp/$$_combine_col1.txt
+   find ./ -name "*.th.png" -print  | sed -e 's/\// /g' -e 's/^.* //g' -e 's/.*_RSP/RSP/g' -e 's/\..*//g'  -e 's/_PSR//g' > /tmp/$$_combine_col2.txt
+   paste /tmp/$$_combine_col1.txt /tmp/$$_combine_col2.txt | awk '{print "-label "$2" "$1" "}' | tr -d '\n' | awk '{print "montage -background none "$0" combined.th.png"}' > combine_png.sh
+   rm /tmp/$$_combine_col1.txt /tmp/$$_combine_col2.txt
+   wc_convert=`wc -l combine_png.sh | awk '{print $1}'`
+   
+   if [[ $wc_convert > 0 ]]
+   then
+      chmod 777 combine_png.sh
+      echo "Executing the following comamnd: "
+      cat combine_png.sh
+      ./combine_png.sh
+      echo ""
+      echo "Results:  combined.th.png"
+      echo ""
+   else
+      echo ""
+      echo "No thumbnail (\*.th.png) files were found to combine."
+      echo ""
+   fi
+   
+   exit 0
+
+Per-node script
+===============
+
+First, we will consider the processing that must be done on each of the remote
+hosts. We start by converting the ``ksh`` script to a native Python version,
+then refining it to best take advantage of the framework capabilities.
+
+It may be worth emphasising that the conversion to Python is optional: an
+alternative approach would be to run code each node which simply spawned a
+copy of ``ksh`` and executed the script directly. In general, though,
+minimising forking is a wise approach -- and the Python code provides better
+opportunity to demosntrate the framework capabilities.
+
+First Python implementation
+---------------------------
+
+A simple Python implementation of functionality similar to that provided by
+the ``ksh`` script is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import glob
+   import subprocess
+   import os
+   
+   def run(file_pattern, input_dir, output_file, clobber):
+       # Returns 0 for success, 1 for faliure
+   
+       # Sanity checking
+       if not os.path.exists(input_dir):
+           return 1
+       if os.path.exists(output_file):
+           if clobber:
+               os.unlink(output_file)
+           else:
+               return 1
+   
+       # Build list of input files
+       input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+       try:
+           # Run "montage" command
+           subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+       except Exception, e:
+           return 1
+   
+       return 0
+   
+Note the following:
+
+- The Python version has been implemented as a function (``run``).
+
+- Success or failure is indicated by the return value of the function: in true
+  Unix fashion, ``0`` represents success.
+
+- We allow the user to specify whether the output should be overwritten using
+  the ``clobber`` argument.
+
+- The user can also specify the pattern of filenames to be searched for (so
+  this code can be more generic than the simple ``*.th.png`` in the ``ksh``
+  version).
+
+- Arguments also enable the user to specify both the directory to search for
+  thumbnail files, and the directory into which the output file should be
+  written.
+
+- For simplicity, we have not implemented the logic used to add titles to the
+  images (but extending the code to do so would be trivial).
+
+- Standard Python code is used to implement all the required functionality,
+  with no added complexity. In particular, Python's `subprocess
+  <http://docs.python.org/library/subprocess.html>`_ module is used to spawn the
+  ``montage`` command.
+
+Using the :class:`~lofarpipe.support.lofarnode.LOFARnodeTCP` class
+------------------------------------------------------------------
+
+To integrate the Python code developed above into the framework, some minimal
+changes are required. First, we take our ``run()`` function, and make it a
+method of a class derived from
+:class:`lofarpipe.support.lofarnode.LOFARnodeTCP`. Secondly, we add some
+boilerplate such that when the script is run from the command line, it takes
+three arguments, then instantiates the class we have defined and executes its
+:meth:`~lofarpipe.support.lofarnode.LOFARnodeTCP.run_with_stored_arguments`
+method. Note that the script then exits with the value returned by that
+method. The result is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import subprocess
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, file_pattern, input_dir, output_file, clobber):
+           # Returns 0 for success, 1 for faliure
+   
+           # Sanity checking checking
+           if not os.path.exists(input_dir):
+               return 1
+           if os.path.exists(output_file):
+               if clobber:
+                   os.unlink(output_file)
+               else:
+                   return 1
+   
+           # Build list of input files
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           try:
+               # Run "montage" command
+               subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+           except Exception, e:
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+Logging
+-------
+
+Within the :class:`lofarpipe.support.lofarnode.LOFARnode` environment, we
+now have access to some other framework-provided services. Chief amont these
+is logging. The script is therefore updated to be more robust against failures
+and to report progress to the logger.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import subprocess
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, file_pattern, input_dir, output_file, clobber):
+           if not os.path.exists(input_dir):
+               self.logger.error("Input directory (%s) not found" % input_dir)
+               return 1
+   
+           self.logger.info("Processing %s" % input_dir)
+   
+           if os.path.exists(output_file):
+               if clobber:
+                   self.logger.warn(
+                       "Deleting previous version of results: %s" % output_file
+                   )
+                   os.unlink(output_file)
+               else:
+                   self.logger.error(
+                       "Refusing to overwrite existing file %s" % output_file
+                   )
+                   return 1
+   
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           try:
+               # Run "montage" command
+               subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+           except Exception, e:
+               self.logger.error(str(e))
+               return 1
+   
+           if not os.path.exists(output_file):
+               self.logger.error(
+                   "Output file %s not created by montage exectuable" % output_file
+               )
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+
+Note that ``self.logger`` in the above is an instance of
+:class:`logging.logger` from the `Python standard library
+<http://docs.python.org/library/logging.html>`_, with all the features that
+implies. Any messages sent to the logger will be automatically integrated with
+the overall pipeline logging system.
+
+Helper functions
+----------------
+
+The pipeline framework provides some (entirely optional!) convenience
+functions which can help the recipe author address common use cases.
+
+The :func:`~lofarpipe.support.utilites.catch_segfaults` function, for example,
+can automatically recover and re-run an external command in the event that it
+results in a segmentation fault. This can be integrated into our existing
+script as follows.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   from lofarpipe.support.utilities import catch_segfaults
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, executable, file_pattern, input_dir, output_file, clobber):
+           if not os.path.exists(input_dir):
+               self.logger.error("Input directory (%s) not found" % input_dir)
+               return 1
+   
+           self.logger.info("Processing %s" % input_dir)
+   
+           if os.path.exists(output_file):
+               if clobber:
+                   self.logger.warn(
+                       "Deleting previous version of results: %s" % output_file
+                   )
+                   os.unlink(output_file)
+               else:
+                   self.logger.error(
+                       "Refusing to overwrite existing file %s" % output_file
+                   )
+                   return 1
+   
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           command_line = [executable] + input_files + [output_file]
+           try:
+               catch_segfaults(command_line, None, None, self.logger)
+           except Exception, e:
+               self.logger.error(str(e))
+               return 1
+   
+           if not os.path.exists(output_file):
+               self.logger.error(
+                   "Output file %s not created by montage exectuable" % output_file
+               )
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+Note that we have also added the ``executable`` argument to define which
+external command should actually be run. There is no reason to avoid making
+the code as generic and reusable as possible!
+
+At this point, our node script is complete (at least in this simple form). To
+be useful, though, it needs to be executed across many different nodes as part
+of a pipeline. This is where the *recipe* needs to be defined.
+
+Defining the recipe
+===================
+
+As described in the :ref:`overview <framework-overview>`, a recipe is the
+basic building block of pipelines: they describe how to perform an individual
+unit of pipeline processing. In this case, our recipe will specify the inputs
+for the node script we have written above, dispatch the jobs to a number
+of compute nodes, and finally collect the results.
+
+A basic recipe
+--------------
+
+All pipeline recipes ultimately derive from
+:class:`lofarpipe.support.baserecipe.BaseRecipe`. A trivial example is shown
+below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   from lofarpipe.support.baserecipe import BaseRecipe
+   
+   class thumbnail_combine(BaseRecipe):
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+           self.logger.info("This recipe does nothing")
+   
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+This recipe does nothing except print a couple of lines to the log. However,
+note the following key features:
+
+- The control code for the recipe is all implemented within the ``go()``
+  method of a class derived from
+  :class:`lofarpipe.support.baserecipe.BaseRecipe`.
+
+- Within that environment, we have access to a logger, which works in exactly
+  the same way as it does on the node. (Enthusiasts may wish to note that this
+  is actually an instance of
+  :class:`lofarpipe.support.pipelinelogging.SearchingLogger`, but the practical
+  difference is minimal).
+
+- It is important to call the ``go()`` method of the superclass (as shown at
+  line 7) to ensure all the necessary initialisation is performed.
+
+- If called from the command line, we instantiate the object, call its
+  ``main()`` method, and exit with its return value.
+
+Dispatching remote jobs
+-----------------------
+
+One of the most fundamental aspects of the framework is its ability to
+dispatch jobs to remote hosts, and this is absolutely necessary for the
+problem under discussion. We can add this to the recipe as follows.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   
+   from lofarpipe.support.baserecipe import BaseRecipe
+   from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+   from lofarpipe.support.remotecommand import ComputeJob
+   
+   class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+   
+           # Hosts on which to execute
+           hosts = ['lce019']
+   
+           # Path to node script
+           command = "python %s" % (self.__file__.replace('master', 'nodes'))
+   
+           # Build a list of jobs
+           jobs = []
+           for host in hosts:
+               jobs.append(
+                   ComputeJob(
+                       host, command,
+                       arguments=[
+                           "/usr/bin/montage",     # executable
+                           "\*.th.png",            # file_pattern
+                           "/path/to/png/files",   # input_dir
+                           "/path/to/output.png",  # output_file
+                           True                    # clobber
+                       ]
+                   )
+               )
+   
+           # And run them
+           self._schedule_jobs(jobs)
+   
+           # The error flag is set if a job failed
+           if self.error.isSet():
+               self.logger.warn("Failed compute job process detected")
+               return 1
+           else:
+               return 0
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+This raises a number of relevant points to note.
+
+- The distribution system is activated for a given recipe by "mixin-in" the
+  :class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn` class to
+  its definition.
+
+- In this case, we execute on only one remote host (``lce019``, as defined at
+  line 13). However, as many as necessary could be defined.
+
+- Each remote processing job is defined as an instance of
+  :class:`~lofarpipe.support.remotecommand.ComputeJob`. It takes three
+  arguments: the name of the host on which to execute, the name of the command
+  to be run, and any arguments which should be passed to that command. These
+  are provided in lines 23 to 30.
+
+- The command to run can be any Python script. By convention, node scripts are
+  named such that the name can be derived from the recipe name as shown at line
+  16, but this is entirely up to the author.
+
+- The arguments provided to
+  :class:`~lofarpipe.support.remotecommand.ComputeJob` correspond exactly to
+  those defined in the node script, above.
+
+- After all the jobs have been defined, they are passed (as a list) to
+  :meth:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn._schedule_jobs`.
+  This blocks until all jobs have finished.
+
+- If a job fails, the ``error`` attribute (an instance of
+  :class:`threading.Event` from `Python's standard library
+  <http://docs.python.org/library/threading.html>`_ is set.  The recipe should
+  check for this and act appropriately.
+
+Ingredients
+-----------
+
+The recipe shown in the previous section contains many hard-coded elements:
+all the arguments to the compute job, the host on which to run, and so on.
+This is obviously inflexible and undesireable. We can overcome this using the
+*ingredients* system provided by the framework. An example is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   
+   import lofarpipe.support.lofaringredient as ingredient
+   from lofarpipe.support.baserecipe import BaseRecipe
+   from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+   from lofarpipe.support.remotecommand import ComputeJob
+   
+   class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+       inputs = {
+           'executable': ingredient.ExecField(
+               '--executable',
+               default="/usr/bin/montage",
+               help="montage executable"
+           ),
+           'file_pattern': ingredient.StringField(
+               '--file-pattern',
+               default="\*.th.png",
+               help="File search pattern (glob)",
+           ),
+           'input_dir': ingredient.StringField(
+               '--input-dir',
+               help="Directory containing input files"
+           ),
+           'output_file': ingredient.StringField(
+               '--output-file',
+               help="Output filename"
+           ),
+           'clobber': ingredient.BoolField(
+               '--clobber',
+               default=False,
+               help="Clobber pre-existing output files"
+           ),
+           'target_hosts': ingredient.ListField(
+               '--target-hosts',
+               help="Remote hosts on which to execute"
+           )
+       }
+   
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+   
+           hosts = self.inputs['target_hosts']
+           command = "python %s" % (self.__file__.replace('master', 'nodes'))
+           jobs = []
+           for host in hosts:
+               jobs.append(
+                   ComputeJob(
+                       host, command,
+                       arguments=[
+                           self.inputs['executable'],
+                           self.inputs['file_pattern'],
+                           self.inputs['input_dir'],
+                           self.inputs['output_file'],
+                           self.inputs['clobber']
+                       ]
+                   )
+               )
+           self._schedule_jobs(jobs)
+   
+           if self.error.isSet():
+               self.logger.warn("Failed compute job process detected")
+               return 1
+           else:
+               return 0
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+Using this system, the recipe author defines a list of inputs to the recipe.
+Each input is an instance of a class descended from
+:class:`lofarpipe.support.lofaringredients.Field`: the various sub-types of
+field enable the user to perform sanity-checking of inputs. For example, in
+the above, we can check that the executable provided really is an executable
+by making the relevant field an instance of
+:class:`~lofarpipe.support.lofaringredients.ExecField`, and that the
+``clobber`` value is really a bool by making its field
+:class:`~~lofarpipe.support.lofaringredients.BoolField`.  The
+:ref:`developer's guide <lofarpipe-ingredients>` provides a lot more
+information about the types of field available.
+
+Each of the ingredients is associated with a name in the ``inputs`` dict.
+Within the recipe, the values of the inputs are available as
+``self.inputs[FIELDNAME]``, as seen (for example) at line 43.
+
+The various inputs can take their values from a number of sources. For
+example, as we will see, inputs can be read from the command line, provided in
+a configuration file, or take the default value specified in their definition.
+Whatever the source, though, they are always made available to the recipe in a
+consistent way: a :class:`~~lofarpipe.support.lofaringredients.BoolField`
+*always* contains a bool, and so on.
+
+User-defined ingredients
+------------------------
+
+The ingredients system is designed to take care of as much error & sanity
+checking for the developer as is possible. It is therefore extensible: as well
+as checking for basic types as shown above, we can construct specialist fields
+to (for example) check that a given input falls within a particular range.
+
+In this case, we know that ``target_hosts`` should be a list of hostnames of
+machines to which jobs may be dispatched. Above, we used
+:class:`~lofarpipe.support.lofaringredients.ListField` to simply check that it
+is a list. However, with a little imagination, we can define a list that is
+guaranteed to contain only resolvable hostnames. For example:
+
+.. code-block:: python
+   :linenos:
+
+   import lofarpipe.support.lofaringredient as ingredient
+
+   class HostNameList(ingredient.ListField):
+       @classmethod
+       def is_valid(value):
+           import socket
+           for hostname in value:
+               try:
+                   socket.gethostbyname(hostname)
+               except:
+                   return False
+           return True
+  
+This checks that every element within the list is resolveable (using Python's
+standard :func:`socket.gethostbyname` function). We could incorporate it into
+the above recipe by simply changing line 33 to:
+
+.. code-block:: python
+
+   'target_hosts': HostNameList(
+
+Configuration file access
+-------------------------
+
+In the above, we have expected the user to supply a list of hosts to run jobs
+on directly. However, in general the cluster layout is already known: this
+can, therefore, be determined automatically.
+
+As part of the :ref:`pipeline configuration <config-file>`, the user is able
+to specify a ``clusterdesc`` parameter. This contains the full path to a file
+which describes the cluster layout (see :ref:`the note on distproc
+<distproc-blurb>` for details). The recipe can access the pipeline
+configuration and extract the information from this file directly. We can
+simply drop the ``target_hosts`` input from our recipe, and replace line 43
+with:
+
+.. code-block:: python
+
+   from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+   hosts = get_compute_nodes(
+       ClusterDesc(
+           self.config.get('cluster', "clusterdesc")
+       )
+   )
+
+There are a number of points to note here.
+
+The pipeline configuration file is available as the ``self.config``
+attribute in the recipe. This is an instance of
+:class:`ConfigParser.SafeConfigParser` from the `standard library
+<http://docs.python.org/library/configparser.html>`_, and can be accessed
+exactly as described in the Python documentation. Here, we simply extract the
+value of ``clusterdesc`` from the ``cluster`` section.
+
+The framework provides some convenience routines from working with clusterdesc
+file. Here, we use :class:`lofarpipe.support.clusterdesc.ClusterDesc` and
+:func:`~lofarpipe.support.clusterdesc.get_compute_nodes` to extract a list of
+all the compute nodes defined in the cluster, and then proceed to use the list
+of hosts in the recipe exactly as before.
+
+Additional notes
+================
+
+Some important aspects of recipe design were not covered in the above
+discussion.
+
+Assigning jobs to specific hosts
+--------------------------------
+
+The example we have considered above is, in one important respect, simpler
+than many pipeline recipes: it runs exactly the same code on each of the
+remote hosts. A more general situation is processing a large number of
+similar, but not identical, datasets (such as independent subbands of an
+observation). Due to limited storage capacities on the remote hosts, it is
+usually the case that each host only stores a subset of the total number of
+datasets locally. Therefore, when dispatching jobs to the host, the recipe
+author must be careful only to send jobs which refer to data it can reasonably
+process.
+
+From the recipe point of view, this procedure is straightforward. The recipe
+developer earlier contains code like:
+
+.. code-block:: python
+
+   jobs = []
+   for host in hosts:
+       jobs.append(
+           ComputeJob(
+               host, command,
+               arguments=[
+                   ...
+               ]
+           )
+
+When specifying a job which must run on a specific host, the pipeline author
+can use a mapping of the form:
+
+.. code-block:: python
+
+   job_list = [
+       ("hostname1", [arguments for job 1]),
+       ("hostname2", [arguments for job 2]),
+       ...
+   ]
+
+And our earlier code can then simply be modified to:
+
+.. code-block:: python
+
+   jobs = []
+   for host, arguments in job_list:
+       jobs.append(
+           ComputeJob(
+               host, command, arguments=arguments
+           )
+
+In general, the recipe author must define the mapping between hostnames and
+job arguments themselves: this will depend on the details of the problem the
+recipe is addressing. Often, it is conventient to use one recipe to generate
+the mapping, then save it to disk for use by several recipes in the pipeline.
+This is the approach taken in LOFAR's standard imaging pipeline. Here, the
+:ref:`recipe-datamapper` recipe determines which filenames are accessible from
+which hosts, and stores them to disk in a :ref:`parset file <parset-handling>`
+formatted as follows:
+
+.. code-block:: none
+
+   hostname1 = [ /path/to/filename1, /path/to/filename2 ]
+   hostname2 = [ /path/to/filename3, /path/to/filename3 ]
+   ...
+
+The :func:`lofarpipe.support.group_data.load_data_map` function makes it easy
+to read back this parset from disk and iterate over the values to dispatch
+compute jobs: see the imaging pipeline's :ref:`dppp-recipe` recipe for an
+example.
+
+.. todo::
+
+   Recipe outputs
+
+.. todo::
+
+   Combining recipes into a pipeline
+
+.. todo::
+
+   Testing this recipe by running it
diff --git a/CEP/Pipeline/docs/sphinx/source/conf.py b/CEP/Pipeline/docs/sphinx/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5738750ca6df3db8dd981b5cb5f9f7747151079
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/conf.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+#
+# LOFAR Standard Imaging Pipeline documentation build configuration file, created by
+# sphinx-quickstart on Wed Jun 10 17:09:31 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# General configuration
+# ---------------------
+
+def add_recipe_inputs(app, what_, name, obj, options, lines):
+    """
+    If obj is a recipe with ingredients, add information on its inputs &
+    outputs to its docstring.
+
+    Designed to be called on the ``autodoc-process-docstring`` event.
+    """
+    from lofarpipe.support.lofaringredient import RecipeIngredients
+    def format_ingredient_dict(ingredients):
+        for name, field in sorted(ingredients.iteritems()):
+            if hasattr(field, "default"):
+                extra = "; default: ``%s``" % field.default
+            elif hasattr(field, "optional"):
+                extra = "; optional"
+            else:
+                extra = ""
+            lines.append("``%s`` (:class:`%s`%s)" % (name, type(field).__name__, extra))
+            if field.help:
+                lines.append("    %s" % field.help)
+            lines.append("")
+    if what_ == "class" and issubclass(obj, RecipeIngredients):
+        lines.append("**Recipe inputs**")
+        lines.append("")
+        if obj.inputs:
+            format_ingredient_dict(obj.inputs)
+        else:
+            lines.append("None defined -- defaults apply (see :class:`~lofarpipe.support.lofaringredient.RecipeIngredients`).")
+            lines.append("")
+        lines.append("**Recipe outputs**")
+        lines.append("")
+        if obj.outputs:
+            format_ingredient_dict(obj.outputs)
+        else:
+            lines.append("None.")
+            lines.append("")
+
+todo_include_todos = True
+
+def setup(app):
+    app.connect('autodoc-process-docstring', add_recipe_inputs)
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.graphviz',
+    'sphinx.ext.inheritance_diagram',
+    'sphinx.ext.todo',
+    'sphinx.ext.ifconfig'
+]
+
+inheritance_graph_attrs = dict(
+    size='"0.0"', # Don't scale drawing down, as it screws up fonts
+    ratio="compress",
+    fontsize=14,
+    nodesep='"0.1"',
+    ranksep='"0.1"',
+    rankdir='"TB"'
+)
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'LOFAR Pipeline System'
+copyright = u'2009—11, John Swinbank'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0'
+# The full version, including alpha/beta/rc tags.
+release = '0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# Options for HTML output
+# -----------------------
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'default.css'
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = "logo.jpg"
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = "lofar.ico"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+#html_copy_source = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'LOFARStandardImagingPipelinedoc'
+
+
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class [howto/manual]).
+latex_documents = [
+  ('index', 'LOFARStandardImagingPipeline.tex', ur'LOFAR Standard Imaging Pipeline Documentation',
+   ur'John Swinbank', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/index.rst b/CEP/Pipeline/docs/sphinx/source/developer/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e47cfdaa4f93176e88ae599ccb5e99ee41b79706
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/index.rst
@@ -0,0 +1,88 @@
+.. _developer-guide:
+
+*********************
+Developer information
+*********************
+
+This section describes the internal structure of the pipeline framework. It is
+intended for developers who plan to work on the framework code itself.
+
+.. _code-structure:
+
+Structure of the code
+=====================
+
+The pipeline code can be obtained from `USG Subversion
+<http://usg.lofar.org/svn/code/trunk/src/pipeline/>`_. There are five
+top-level directories:
+
+``deploy``
+    IPython system deployment scripts. See the section on
+    :ref:`parallelisation with IPython <parallelisation-ip>` for more details,
+    but note that the use of IPython within the pipeline is *deprecated*.
+
+``docs``
+    Documentation and examples. See the section on :ref:`available
+    documentation <documentation>` for details.
+
+``mac``
+    MAC/SAS EventPort interface code. See the :ref:`mac-interface` section for
+    details.
+
+``framework``
+    The framework code itself. This is implemented as the Python module
+    :mod:`lofarpipe`; see its documentation for details. A ``distutils`` based
+    setup script, ``setup.py``, is included for easy installation: see the
+    section on :ref:`installing the framework <framework-installation>`.
+
+``recipes``
+    A collection of :ref:`recipes <recipe-docs>`, intended to both demonstrate
+    the operation of the framework and serve as useful pipeline components.
+    Recipes intended for different pipelines may be stored in separate
+    directories: for example, the ``sip`` directory contains recipes for
+    useful to the :ref:`standard imaging pipeline <sip>`.
+
+External components
+===================
+
+.. _ipython-deprecated:
+
+IPython
+-------
+
+The IPython system was extensively used by earlier versions of this framework,
+but is now *deprecated*. Both recipe and framework developers are urged to
+avoid using it wherever possible. However, until all existing recipes
+(including those not distributed with the framework) have been converted to
+use another system, the IPython support in the framework should be maintained.
+That includes:
+
+* :class:`lofarpipe.support.clusterhandler.ClusterHandler`
+* :func:`lofarpipe.support.clusterhandler.ipython_cluster`
+* :class:`lofarpipe.support.ipython.LOFARTask`
+* :class:`lofarpipe.support.ipython.IPythonRecipeMixIn`
+* :func:`lofarpipe.support.utilities.build_available_list`
+* :func:`lofarpipe.support.utilities.clear_available_list`
+* :func:`lofarpipe.support.utilities.check_for_path`
+
+Conversely, once all IPython-based recipes in active use have been replaced,
+the IPython support code should be removed from the framework.
+
+.. _documentation:
+
+Available documentation
+=======================
+
+.. todo::
+
+   Describe the available documentation in the docs directory: what the
+   examples are, how to build the Sphinx documenation.
+
+.. _mac-interface:
+
+MAC/SAS interface
+=================
+
+.. todo::
+
+   Describe current status of MAC/SAS interface.
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst
new file mode 100644
index 0000000000000000000000000000000000000000..792d65774977ef00656a37d6af7ed523b019a150
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst
@@ -0,0 +1,19 @@
+.. module:: lofarpipe
+   :synopsis: The framework package.
+
+****************************
+The :mod:`lofarpipe` package
+****************************
+
+The :mod:`lofarpipe` package contains all the Python modules included with the
+core framework code. This includes the :mod:`lofarpipe.cuisine` pacakge
+(inherited from the :ref:`cuisine <notes-on-cuisine>` framework for WSRT), the
+:mod:`lofarpipe.support` pacakge (containing all the pipeline framework code),
+and the :mod:`lofarpipe.tests` package (which contains a limited selection of
+test).
+
+.. toctree::
+
+   lofarpipe/cuisine.rst
+   lofarpipe/support.rst
+   lofarpipe/tests.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7b68d04690163fa7944dd568a965b51758430b32
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst
@@ -0,0 +1,62 @@
+.. module:: lofarpipe.cuisine
+   :synopsis: The Cuisine system.
+
+************************************
+The :mod:`lofarpipe.cuisine` package
+************************************
+
+The LOFAR pipeline system developed partly from the `WSRT Cuisine
+<http://www.astron.nl/~renting/pipeline_frame.html>`_, developed by Adriaan
+Renting for use at the Westerbork Synthesis Radio Telescope. Many of the basic
+concepts (the recipe, with associated inputs and outputs, for example)
+originated in the Cuisine system, and the user is encouraged to refer to its
+documentation where necessary.
+
+A slightly modified version of the "original" Cuisine is distributed as part
+of the :mod:`lofarpipe` package. The modifications include:
+
+* Use of `new style
+  <http://www.python.org/download/releases/2.2.3/descrintro/>`_ Python classes
+  throughout.
+
+* Reworked option handling code to use the `optparse
+  <http://docs.python.org/library/optparse.html>`_ module from the Python
+  standard library.
+
+* Reworked logging system using the `logging
+  <http://docs.python.org/library/logging.html>`_ module from the Python
+  standard library. This provides a flexible way of configuring logging formats
+  and destinations, included logging to files or TCP sockets. See the section on
+  the pipeline :ref:`logging system <lofarpipe-logging>`.
+
+* Assorted bug-fixes and tweaks.
+
+It is hoped that these changes will eventually be merged upstream.
+
+Very little of the original Cuisine code is currently used in the LOFAR
+framework, although the :class:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe` is
+still used as the basis for all LOFAR recipes. The recipe author, however, is
+*never* expected to directly interact with Cuisine code: all LOFAR pipeline
+recipes inherit from :class:`lofarpipe.support.baserecipe.BaseRecipe`, which
+entirely wraps all relevant Cuisine functionality. The
+:mod:`lofarpipe.support` inheritance diagrams show exactly how these packages
+are related. The following API documentation covers only those routines
+directly used by the rest of the pipeline system, not Cuisine as a whole.
+
+.. module:: lofarpipe.cuisine.WSRTrecipe
+   :synopsis: Base module for all Cuisine recipe functionality.
+
+:mod:`lofarpipe.cuisine.WSRTrecipe`
+-----------------------------------
+
+.. autoclass:: lofarpipe.cuisine.WSRTrecipe.WSRTrecipe
+   :members: help, main_init, main, run, go, main_result, cook_recipe
+
+.. module:: lofarpipe.cuisine.cook
+   :synopsis: Cuisine cooks.
+
+:mod:`lofarpipe.cuisine.cook`
+-----------------------------
+
+.. autoclass:: lofarpipe.cuisine.cook.PipelineCook
+   :members: copy_inputs, copy_outputs, spawn, try_running
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6c0082592638d524db0cd2f82fbac05a3e55e6fb
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst
@@ -0,0 +1,19 @@
+.. module:: lofarpipe.support
+   :synopsis: Core framework code.
+
+************************************
+The :mod:`lofarpipe.support` package
+************************************
+
+This package contains effectively all the core framework code that comprises
+the LOFAR pipeline system. Broadly speaking, it address five distinct problem
+areas:
+
+.. toctree::
+   :maxdepth: 1
+
+   The construction of recipes <support/recipes.rst>
+   Checking of recipe inputs and outputs ("ingredients") <support/ingredients.rst>
+   Logging <support/logging.rst>
+   Distribution of pipeline jobs <support/distribution.rst>
+   Utility and convenience functions <support/utility.rst>
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst
new file mode 100644
index 0000000000000000000000000000000000000000..751021e00aa855b7d7590e8d89453d30eb3b9150
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst
@@ -0,0 +1,70 @@
+.. _lofarpipe-remotecommand:
+
+************
+Distribution
+************
+
+Before proceeding with this section, the reader should ensure they are
+familiar with the :ref:`recipe author's perspective` on how tasks can be
+distributed within the pipeline framework. This section will describe in
+detail what goes on behind the scenes and how it can be extended.
+
+.. todo::
+
+   Details!
+
+Node scripts
+------------
+
+.. autoclass:: lofarpipe.support.lofarnode.LOFARnode
+   :members:
+
+.. autoclass:: lofarpipe.support.lofarnode.LOFARnodeTCP
+   :members:
+
+Compute jobs
+------------
+
+.. autoclass:: lofarpipe.support.remotecommand.ComputeJob
+   :members:
+
+Scheduling and rate limiting
+----------------------------
+
+See :class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn`.
+
+.. autoclass:: lofarpipe.support.remotecommand.ProcessLimiter
+
+Dispatch mechanisms
+-------------------
+
+.. todo::
+
+   Extenting this system.
+
+Dispatch mechanism can be specified in the ``remote`` section of the
+:ref:`pipeline configuration <config-file>`
+
+.. autofunction:: lofarpipe.support.remotecommand.run_remote_command
+
+.. autofunction:: lofarpipe.support.remotecommand.run_via_mpirun
+
+.. autofunction:: lofarpipe.support.remotecommand.run_via_paramiko
+
+.. autoclass:: lofarpipe.support.remotecommand.ParamikoWrapper
+
+Exchanging data between master and nodes
+----------------------------------------
+
+.. autofunction:: lofarpipe.support.jobserver.job_server
+
+.. autoclass:: lofarpipe.support.jobserver.JobSocketReceiver
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.jobserver.JobStreamHandler
+   :show-inheritance:
+
+Clean shut-down and recovery
+----------------------------
+
+.. autofunction:: lofarpipe.support.remotecommand.threadwatcher
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3b70596eeb8eb5b76400e2698857e254e04a6c37
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst
@@ -0,0 +1,53 @@
+.. _lofarpipe-ingredients:
+
+***********
+Ingredients
+***********
+
+One of the most fragile parts of pipeline definition is ensuring that the
+inputs to a recipe are correct. Broadly, there are two failure modes here
+which should be avoided. The first is perhaps the more obvious: it is always
+desireable to check that the inputs actually make sense before attempting to
+use them to operate on some piece of data -- particularly when that data may
+be scientifically valuable. By checking inputs when the recipe is started,
+awkward errors during the recipe run may be avoided.
+
+The second failure mode concerns the source of the inputs. As we have seen,
+recipe inputs may be provided on the command line, read from a configuration
+file, calculated by another recipe as part of the pipeline run, etc. It is
+important that these inputs are presented to the recipe code in a consistent
+way: if, for example, a ``float`` is required, a ``float`` should be provided,
+not a string read from the command line.
+
+All LOFAR recipes define a series of ``inputs``, as described in the
+:ref:`recipe design section <recipe-docs>`. These inputs are ultimately
+derived from the :class:`~lofarpipe.support.lofaringredient.Field` class,
+which provides validation and type-conversion (where appropriate) of the
+inputs. A number of pre-defined fields are available, covering many basic use
+cases (see :ref:`below <pre-defined-fields>`); the recipe author is also
+encouraged to defined their own as necessary.
+
+All recipes ultimately derive from
+:class:`~lofarpipe.support.lofaringredient.RecipeIngredients` (see :ref:`the
+last section <lofarpipe-recipes>`). This provides a number of standard fields,
+which are present in all recipes, as well as ensuring that additional,
+per-recipe fields are handled appropriately, including type-checking on recipe
+instantiation. Within the recipe environment, the contents of fields are
+available as ``self.inputs``, an instance of
+:class:`~lofarpipe.support.lofaringredient.LOFARingredient`.
+
+.. _pre-defined-fields:
+
+Pre-defined fields
+==================
+.. automodule:: lofarpipe.support.lofaringredient
+   :members: Field, StringField, IntField, FloatField, FileField, ExecField, DirectoryField, BoolField, ListField, DictField, FileList
+
+Infrastructure
+==============
+
+.. autoclass:: lofarpipe.support.lofaringredient.RecipeIngredients
+
+.. autoclass:: lofarpipe.support.lofaringredient.RecipeIngredientsMeta
+
+.. autoclass:: lofarpipe.support.lofaringredient.LOFARingredient
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bbda265f54bdf04427a25b6482241f435c05590f
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst
@@ -0,0 +1,112 @@
+.. _lofarpipe-logging:
+
+*******
+Logging
+*******
+
+One of the major changes made by the LOFAR pipeline system to the
+:mod:`~lofarpipe.cuisine` system was the introduction of logging using the
+standard `Python logging module
+<http://docs.python.org/library/logging.html>`_. All instances of recipes
+derived from :class:`~lofarpipe.cuisine.WSRTrecipe` (in other words, every
+recipe developed using the framework) has an associated logger (in fact, an
+instance of :class:`~lofarpipe.support.pipelinelogging.SearchingLogger`
+available as the attribute ``self.logger``), which supports the standard
+logging methods: see the Python documentation for details. The logging system
+is also available in much the same way on remote hosts using the pipeline's
+:ref:`distribution system <lofarpipe-remotecommand>`.
+
+Note that by default only messages of level :const:`logging.WARNING` and
+higher are logged. Use of the ``-v`` or ``--verbose`` flag the command line
+will log messages at level :const:`logging.INFO`; ``-d`` or ``--debug`` at
+level :const:`logging.DEBUG`.
+
+Logs are output to standard output and also to a file. By default, the file is
+located in the ``job_directory``, but this, together with the format used for
+logging, may be configured through the :ref:`configuration file <config-file>`
+if required.
+
+The :mod:`lofarpipe.support` module provides a number of helper functions for
+working with logs, which are documented here.
+
+Searching logs
+==============
+
+Sometimes, it is convenient to be able to keep track of messages sent to a
+logger. For example, pipeline tools may send metadata to the log rather than
+output it in any other, more useful, fashion.
+
+As mentioned above, all recipes have an associated instance of
+:class:`~lofarpipe.support.pipelinelogging.SearchingLogger`. This can have any
+number of regular expression patterns defined, and it will then store for
+later reference any log entries which match those patterns. For example, a
+recipe could include the code:
+
+.. code-block:: python
+
+   self.logger.searchpatterns["new_pattern"] = "A log entry"
+
+This would record all log entries matching "A log entry". Later, a list of all
+those entries can be retrieved:
+
+.. code-block:: python
+
+   matches = self.logger.searchpatterns["new_pattern"].results
+   self.logger.searchpatterns.clear()
+
+Note that messages generated by all subsidiary loggers -- including those on
+remote hosts -- are included.  The call to
+:meth:`~lofarpipe.support.pipelinelogging.SearchPatterns.clear` simply
+instructs the logger to stop searching for that pattern, to avoid incurring
+overhead in future.
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchingLogger
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchPattern
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchPatterns
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.pipelinelogging.getSearchingLogger
+
+Logging process output
+======================
+
+Many pipeline recipes run external executables. These tools may produce useful
+logging output, either by writing to ``stdout`` (or ``stderr``), or by using a
+library such as `log4cplus <http://log4cplus.sourceforge.net/>`_ or `log4cxx
+<http://logging.apache.org/log4cxx/>`_. The framework makes it possible to
+ingest that output and re-route it through the standard pipeline logging
+system.
+
+Standard output/error
+---------------------
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_process_output
+
+Logging libraries
+-----------------
+
+The output from ``log4cplus`` or ``log4cxx`` is currently intercepted by
+simply redirecting it to a file and then logging the contents of that file as
+it is updated via the
+:func:`~lofarpipe.support.pipelinelogging.log_file` function.
+
+.. autoclass:: lofarpipe.support.pipelinelogging.LogCatcher
+
+.. autoclass:: lofarpipe.support.pipelinelogging.CatchLog4CPlus
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.pipelinelogging.CatchLog4CXX
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_file
+
+Logging resource usage
+======================
+
+This is a decorator which makes it easy to log the amount of time (wall and
+CPU) used by parts of a pipeline.
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_time
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2db9a29a17c32bb5e804e1eed82c43600d2f85a9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst
@@ -0,0 +1,49 @@
+.. _lofarpipe-recipes:
+
+*******
+Recipes
+*******
+
+All LOFAR pipeline recipes are ultimately derived from
+:class:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe`. However, all the
+functionality it provides is encapsulated and enhanced by the
+:class:`~lofarpipe.support.baserecipe.BaseRecipe` class, and it is from this
+that all pipeline recipes should be derived. This class also includes the
+:ref:`"ingredients" system <lofarpipe-ingredients>`, which controls recipe
+inputs and outputs.
+
+A number of "mix-in" classes may be added to
+:class:`~lofarpipe.support.baserecipe.BaseRecipe` to provide additional
+functionality, such as the ability to dispatch jobs to remote hosts
+(:class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn`). Recipe
+authors may mix-in whatever functionality is required to achieve their aims.
+
+The :class:`~lofarpipe.suppport.control.control` class provides a recipe with a
+little extra functionality to help it act as an overall pipeline. This can
+include interfacing with an external control system, for example, or keeping
+track of the pipeline progress
+(:class:`~lofarpipe.support.stateful.StatefulRecipe`).
+
+The relationship between all these classes is illustrated below.
+
+.. inheritance-diagram:: lofarpipe.support.control.control lofarpipe.support.lofarrecipe.LOFARrecipe
+   :parts: 3
+
+.. autoclass:: lofarpipe.support.baserecipe.BaseRecipe
+   :members:
+
+.. autoclass:: lofarpipe.support.stateful.StatefulRecipe
+
+.. autoclass:: lofarpipe.support.control.control
+   :members: pipeline_logic
+
+.. autoclass:: lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn
+   :members: _schedule_jobs
+
+   See the :ref:`distribution <lofarpipe-remotecommand>` section for details.
+
+.. autoclass:: lofarpipe.support.ipython.IPythonRecipeMixIn
+
+   The use of IPython within the pipeline framework is :ref:`deprecated
+   <ipython-deprecated>`.
+
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst
new file mode 100644
index 0000000000000000000000000000000000000000..145582f759f4d458744a23ac7c4956acf85787cc
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst
@@ -0,0 +1,98 @@
+.. _lofarpipe-utility:
+
+*********
+Utilities
+*********
+
+The framework provides a number of convenience and utility functions. These
+are not fundamental to the operation of the framework itself, but rathr
+provude functionality which is commonly needed in pipeline recipes.
+
+.. _parset-handling:
+
+Parset handling
+---------------
+
+Parsets ("parameter sets") are files containing key-value pairs commonly used
+for the configuration of LOFAR tools. Many pipeline recipes will, at heart,
+run a standard tool over a number of datasets in parallel by substituting
+per-dataset values into a template parset. These routines are designed to
+simplify that process.
+
+.. autoclass:: lofarpipe.support.parset.Parset
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.parset.get_parset
+
+.. autofunction:: lofarpipe.support.parset.patch_parset
+
+.. autofunction:: lofarpipe.support.parset.patched_parset
+
+Cluster descriptions (clusterdesc) handling
+-------------------------------------------
+
+Clusterdesc files (see :ref:`distproc-blurb`) describe the layout of the
+compute cluster. They can be used within the pipeline to help choose nodes to
+which jobs may be dispatched.
+
+.. autoclass:: lofarpipe.support.clusterdesc.ClusterDesc
+
+.. autofunction:: lofarpipe.support.clusterdesc.get_compute_nodes
+
+.. autofunction:: lofarpipe.support.clusterdesc.get_head_node
+
+Grouping input data
+-------------------
+
+Often, a long list of input datasets are grouped according to some criteria
+for processing. These routines provide some useful ways of batching-up data.
+
+.. autofunction:: lofarpipe.support.group_data.group_files
+
+.. autofunction:: lofarpipe.support.group_data.gvds_iterator
+
+.. autofunction:: lofarpipe.support.group_data.load_data_map
+
+Process control
+---------------
+
+Many pipeline recipes spawn an external executable and wait for it to
+complete. These routines can assist the recipe author by simplifying this
+process and automatically recovering from transient errors.
+
+.. autofunction:: lofarpipe.support.utilities.spawn_process
+
+.. autofunction:: lofarpipe.support.utilities.catch_segfaults
+
+File and directory maniupulaton
+-------------------------------
+
+.. autofunction:: lofarpipe.support.utilities.get_mountpoint
+
+.. autofunction:: lofarpipe.support.utilities.create_directory
+
+Iterators and generators
+------------------------
+
+.. autofunction:: lofarpipe.support.utilities.is_iterable
+
+.. autofunction:: lofarpipe.support.utilities.izip_longest
+
+.. autofunction:: lofarpipe.support.utilities.group_iterable
+
+Miscellaneous
+-------------
+
+.. autofunction:: lofarpipe.support.utilities.read_initscript
+
+.. autofunction:: lofarpipe.support.utilities.string_to_list
+
+Exceptions
+----------
+
+The follow exceptions may be raised by pipeline components.
+
+.. automodule:: lofarpipe.support.lofarexceptions
+   :members:
+   :undoc-members:
+
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst
new file mode 100644
index 0000000000000000000000000000000000000000..34f49b4a99a8ad5397d8573d78f63946250eb710
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst
@@ -0,0 +1,17 @@
+.. module:: lofarpipe.tests
+   :synopsis: Pipeline framework tests.
+
+**********************************
+The :mod:`lofarpipe.tests` package
+**********************************
+
+Pipeline tests are implemented using Python's `unittest
+<http://docs.python.org/library/unittest.html>`_ module. Test coverage
+throughout the framework is currently sadly lacking. This must be addressed in
+future development. Such tests as are available are installed as modules in
+the :mod:`lofarpipe.tests` package.
+
+.. toctree::
+   :maxdepth: 1
+
+   tests/ingredients.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst
new file mode 100644
index 0000000000000000000000000000000000000000..cf713cf0ee635a479f12e566ff55876c0075e149
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst
@@ -0,0 +1,6 @@
+*************************************************
+The :mod:`lofarpipe.tests.lofaringredient` module
+*************************************************
+
+.. automodule:: lofarpipe.tests.lofaringredient
+   :members:
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/todo.rst b/CEP/Pipeline/docs/sphinx/source/developer/todo.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ac0663ad56877572ac46b4ba36f1858a25c28201
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/todo.rst
@@ -0,0 +1,61 @@
+********************************
+Potential framework enhancements
+********************************
+
+There are a number of areas in which the current version of the pipeline
+framework could be enhanced. In no particular order, these include:
+
+- An ingredients system for :class:`~lofarpipe.support.lofarnode.LOFARnode`,
+  so that inputs are checked as they are supplied to nodes.
+
+  - A consistent interface with
+    :class:`~lofarpipe.support.baserecipe.BaseRecipe`, ie relying on an
+    ``inputs`` dictionary, should also be supplied.
+
+  - Unfortunately, this can't be used to validate inputs before starting the
+    recipe, for obvious reasons.
+
+- Ingredients probably shouldn't be provided with default values. At least,
+  not default values for things like paths to executables which may sometimes
+  disappear.
+
+- Error handling throughout should be cleaned up; more exceptions should be
+  caught and better feedback provided to the user.
+
+- Configuration of the :ref:`logging system <lofarpipe-logging>` should be
+  made more flexible.
+
+- Rather than checking exit status, the framework should use a more-Pythonic
+  error handling system based on exceptions.
+
+- For consistency with the rest of the LOFAR system, :ref:`parsets
+  <parset-handling>` should be used for all configuration information.
+
+  - That, in turn, means that the parset format should be standardised.
+
+- A global job queue per node should be implemented, so that multiple
+  simultaneous recipes can submit jobs without overloading nodes.
+
+- :meth:`lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn.__schedule_jobs`
+  should be non-blocking.
+
+- :meth:`lofarpipe.support.baserecipe.BaseRecipe.go` and
+  :meth:`lofarpipe.support.lofarnode.LOFARnodeTCP.run` should be made
+  consistent, in terms of both name and semantics.
+
+- The logging system should interface more directly with ``log4cplus``, rather
+  than just reading log information from a file.
+
+- More detailed feedback and quality-check information should be sent to the
+  rest of the LOFAR system.
+
+  - This is an issue which requires changes throughout the LOFAR system; it
+    can't be addressed by the framework alone.
+
+- The ``recipe_directories`` option should be removed from the
+  :ref:`configuration file`, and the regular Python import mechanism, with
+  appropriate namespacing, should be used to locate recipes.
+
+- The dependency on :mod:`lofar.parameterset` should be removed, so that the
+  framework is a stand-alone codebase and can more easily be ported to other
+  systems.
diff --git a/CEP/Pipeline/docs/sphinx/source/index.rst b/CEP/Pipeline/docs/sphinx/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..77d0addb50a0de4bb3fcf489ba5fdba3a3e119e5
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/index.rst
@@ -0,0 +1,92 @@
+###################################
+LOFAR Pipeline System Documentation
+###################################
+
+.. toctree::
+   :hidden:
+
+   todo
+
+This document provides an overview of the LOFAR pipeline system. This system
+has largely been developed to support the LOFAR imaging pipeline, but is now
+being deployed for a variety of science pipelines on the LOFAR cluster. This
+document is split into a number of sections: :ref:`the first
+<section-overview>` describes the aims of the framework, the structure of a
+pipeline, and gives an overview of how the system fits together. :ref:`The
+second <section-user-guide>` provides details on how to run pre-defined
+pipelines. :ref:`The third <section-author-guide>` provides a tutorial
+introduction to writing pipelines and pipeline components. :ref:`The fourth
+<section-developer-reference>` describes the framework codebase in more
+detail, and is intended for pipeline authors who wish to dig a little deeper,
+as well as those interested in developing the framework itself. :ref:`The
+final section <section-pipeline-specific>` provides a guide to the imaging
+pipeline itself.
+
+.. ifconfig:: todo_include_todos
+
+   This documentation is still a work in progress. See the :ref:`to-do list
+   <todo>` for upcoming improvements.
+
+.. _section-overview:
+
+The pipeline system was developed by John Swinbank (University of Amsterdam)
+in 2009 & 2010. Since 2011, the primary maintainer is Marcel Loose (ASTRON).
+
+Overview & Getting Started
+==========================
+
+.. toctree::
+   :maxdepth: 2
+
+   overview/overview/index.rst
+   overview/dependencies/index.rst
+
+.. _section-user-guide:
+
+User's Guide
+============
+
+.. toctree::
+   :maxdepth: 2
+
+   user/installation/index.rst
+   user/usage/index.rst
+
+.. _section-author-guide:
+
+Recipe & Pipeline Author's Guide
+================================
+
+.. toctree::
+   :maxdepth: 2
+
+   author/index.rst
+
+.. _section-developer-reference:
+
+Developer's Reference
+=====================
+
+.. toctree::
+   :maxdepth: 2
+
+   developer/index.rst
+   developer/lofarpipe.rst
+   developer/todo.rst
+
+.. _section-pipeline-specific:
+
+Pipeline Specific Documenation
+==============================
+
+.. toctree::
+   :maxdepth: 2
+
+   pipelines/sip/index.rst
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/CEP/Pipeline/docs/sphinx/source/logo.jpg b/CEP/Pipeline/docs/sphinx/source/logo.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..27a5db5145597965afbbd1ed759d9a9cdcb48d28
Binary files /dev/null and b/CEP/Pipeline/docs/sphinx/source/logo.jpg differ
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst b/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..14336fc7bb77e733f1daced9c491209809943f38
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst
@@ -0,0 +1,72 @@
+.. _framework-dependencies:
+
+*******************
+External components
+*******************
+
+The LOFAR pipeline system is built using the `Python
+<http://www.python.org/>`_ programming language. Certain features build upon
+the following libraries and tools. The short descriptions given here should
+serve as background material for those who simply wish to use the framework:
+directly interacting with these components should rarely be necessary.
+Developers, of course, will wish to learn in detail about all of these
+libraries.
+
+.. _ipython-blurb:
+
+IPython
+=======
+`IPython <http://ipython.scipy.org>`_, billed as "an enhanced interactive
+Python", also provides a comprehensive and easy-to-use suite of tools for
+parallel processing across a cluster of compute nodes using Python. This
+capability is may be used for writing recipes in the pipeline system.
+
+The parallel computing capabilities are only available in recent (post-0.9)
+releases of IPython. The reader may wish to refer to the `IPython
+documentation <http://ipython.scipy.org/doc/>`_ for more information, or, for
+a summary of the capabilities of the system, to the `Notes on IPython
+<http://www.lofar.org/operations/lib/exe/fetch.php?media=software:tkp_notes_on_ipython.pdf>`_
+document on the `LOFAR wiki <http://www.lofar.org/operations/>`_.
+
+A slight enhancement to the standard 0.9 IPython release is included with the
+pipeline system. We subclass :class:`IPython.kernel.task.StringTask` to create
+:class:`pipeline.support.LOFARTask`. This adds the ``dependargs`` named
+argument to the standard :class:`~IPython.kernel.task.StringTask`, which, in
+turn, is fed to the tasks's :meth:`depend` method. This makes the dependency
+system significantly more useful. See, for example, the :ref:`dppp-recipe`
+recipe for an example of its use.
+
+
+.. _distproc-blurb:
+
+distproc
+========
+An alternative method of starting a distributed process across the cluster is
+to use the ``distproc`` system by Ger van Diepen. This system is used
+internally by various pipeline components, such as the MWImager; the intested
+reader is referred to the `MWImager Manual
+<http://www.lofar.org/operations/lib/exe/fetch.php?media=engineering:software:tools:mwimager_manual_v1.pdf>`_
+for an overview of the operation of this system.
+
+Infrastructure for supporting the ``distproc`` system is well embedded within
+various pipeline components, so the new framework has been designed to make
+use of that where possible. In particular, the reader's attention is drawn to
+two file tyes:
+
+``clusterdesc``
+    A clusterdesc file describes the cluster configuration. It defines a
+    control node, various processing nodes, and describes what disks and other
+    resources they have access to.
+
+``VDS``
+    A VDS file describes the contents of a particular dataset and where it may
+    be found on the cluster. For the standard imaging pipeline, data is
+    distributed across different nodes by subband; each subband is described
+    by a single VDS file (generated by the ``makevds`` command). The VDS files
+    describing the subbands of a given observation may be combined (using
+    ``combinevds``) to generated a description of all the available data
+    (often known as a GDS file).
+
+The information contained in this files is used by both the task distribution
+systems to schedule jobs on the appropriate compute nodes.
+
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst b/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..07397f8f5275412403950b5ce654058d84583990
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst
@@ -0,0 +1,91 @@
+.. _framework-overview:
+
+**********************
+Overview of a pipeline
+**********************
+
+Before plunging into the nitty-gritty of all the various components that make
+up a pipeline, let's first take a bird's-eye overview of the concepts
+involved.
+
+The figure shows a schematic pipeline layout, illustrating most of the relevant
+concepts. We will consider each of these in turn below.
+
+.. image:: ../pipeline-flowchart.png
+
+Recipes
+=======
+
+A *recipe* is a unit of the pipeline. It consists of a a task with a given set
+of inputs and outputs. A given recipe may contain calls to subsidiary recipes,
+for which it will provide the inputs and use the outputs in its own
+processing. Note that the whole pipeline control structure is itself a recipe:
+it takes a given set of inputs (visibility data) and produces some outputs
+(images and associated metadata) by running through a series of defined steps.
+In fact, each of those steps is itself a recipe -- one for flagging, one for
+calibration, and so on.
+
+Although some recipes are provided with the pipeline framework, it is
+anticipated that users will wish to define their own. A search path for
+recipes can be specified, enabling each user to maintain their own private (or
+shared) repositories of recipes.
+
+Tasks and Configuration
+=======================
+
+A recipe describes the steps that need to be taken to perform some particular
+action on the data. For instance, a recipe might describe how to set up and
+run an imager process. Often, the recipe will take a series of parameters
+describing how it should be run -- what time steps to image, whether to use
+the W-projection algorithm, the shape or the restoring beam, and so on. These
+are provided as a series of input arguments to the recipe. Some sets of
+arguments will be used repeatedly: a set of default configurations for
+different modes, say. These can be bundled together as a *task*: a recipe
+together with a set of defined parameters, and saved in a configuration file
+for easy access.
+
+As with recipes, it is anticipated that users will build up their own
+libraries of pre-defined tasks for whatever applications they find necessary.
+
+Control recipes and pipeline definition
+=======================================
+
+The *control* recipe is a special type of a normal recipe. The fundamentals
+are the same; however, it contains some additional "housekeeping" logic which
+may be useful for starting a pipeline. For instance, the control recipe can
+configure a logging system for the pipeline, and may be used to interface with
+LOFAR's MAC/SAS control system.
+
+Often, a control recipe is referred to as a "pipeline definition".
+
+.. _cluster-layout:
+
+Cluster layout
+==============
+
+The control recipe runs on the so called "head node". The head node acts as
+the coordination point for all pipeline activity. As and when required, the
+head can dispatch compute-intensive jobs to other nodes on a cluster. Various
+mechanisms are provided for queueing and dispatching jobs and collecting their
+results.
+
+Complex cluster layouts may be described by a "clusterdesc" file, as used by
+:ref:`distproc`. The framework understands these files natively, whether or
+not the distproc system itself is in use.
+
+.. _pipeline-jobs:
+
+Pipeline jobs
+=============
+
+Once a pipeline has been described as above, it will often be used multiple
+times -- for example, to process multiple independent datasets. Each pipeline
+run is associated with a "job identifier" which can be used to keep track of
+these independent pipeline runs. Their results, logs, configuration and so on
+can therefore be conveniently kept separate. The job identifier is a free-form
+string: the user can choose whatever descriptive name is convenient.
+
+The same job can be run multiple times: this might be convenient when a
+previous attempt failed part way through due to a hardware fault, for example.
+In this case, data is filed by job identifier combined with the pipeline start
+time.
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg
new file mode 100644
index 0000000000000000000000000000000000000000..5b969be7b05a71680e03a4177d7954b31738b35d
Binary files /dev/null and b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg differ
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png
new file mode 100644
index 0000000000000000000000000000000000000000..289d75eb0f0471cd321eec281f5011c0c1f3639e
Binary files /dev/null and b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png differ
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d7457ebced15b5ddd078a6a7231ead7474cec3aa
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst
@@ -0,0 +1,24 @@
+.. _sip:
+
+*****************************
+The Standard Imaging Pipeline
+*****************************
+
+The Standard Imaging Pipeline (or SIP) will accept raw data from the LOFAR
+correlator, pre-process it, calibrate it, image it, and update the sky model
+with the sources detected in the data. This section describes the components
+of the SIP and how they fit together.
+
+It should be emphasised that, at least during development stages, there is not
+a single, ideal imaging pipeline. Instead, the user is encouraged to compose a
+pipeline which meets their needs by selecting their preferred combination of
+individual recipes.
+
+Note that none of the SIP recipes make use of IPython, so configuration and
+usage should be relatively simple.
+
+.. toctree::
+   :maxdepth: 2
+
+   quickstart/index.rst
+   recipes/index.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..443b99d4165551c714a4219e46b66ddce8b091e5
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst
@@ -0,0 +1,210 @@
+.. _sip-quickstart:
+
+==============
+CEP Quickstart
+==============
+
+This brief guide should take you from scratch to an operational imaging
+pipeline running on LOFAR CEP. It is still under development, and corrections
+(preferably in the form of patches) are very welcome.
+
+This guide assumes you already have a working installation of the pipeline
+framework; please see the :ref:`framework documentation
+<framework-quickstart>` for help with that.
+
+If you will be using the LOFAR offline cluster, it's worth taking a moment to
+ensure you have an SSH keychain that will enable you to log into all the
+cluster nodes you need (lfe001, lfe002, lce0XX, lse0XX) without needing to
+type a password: see the `LOFAR wiki
+<http://www.lofar.org/operations/doku.php?id=public:ssh-usage>`_ for help.
+
+Note that examples of many of the steps described here are available as part
+of the example data provided with the pipeline framework (`available from
+Subversion <http://usg.lofar.org/svn/code/trunk/src/pipeline/docs/examples/>`_ if
+required).
+
+For the sake of example, we will consider the ``L2009_16007`` dataset,
+available on LOFAR subcluster 3. 
+
+Set up your environment
+-----------------------
+Before starting, you shoud ensure that all
+the ``LofIm`` packages are available in your environment.  The typical way
+to add this package to ones' start up environment is to type the following
+in the command line:
+
+.. code-block:: bash
+
+    $ use LofIm
+     
+or add this to the .bashrc or .cshrc files so that it is automatically 
+added at login.  Note, there are some issues with paths when the daily build
+fails.  To ensure that there are no problems accessing the LOFAR imaging software,
+you may want to skip the ``use LofIm`` step and add ``/opt/LofIm/daily/lofar`` the 
+paths explicitly to your environment. You will also need to add the
+appropriate environment to the cluster setup in your configuration file: see
+the :ref:`framework quickstart <pipeline-config>` for details.
+
+Make a job directory
+--------------------
+
+This is dedicated to the output of a specific pipeline "job" -- that is, a run
+with a given set of input data, pipeline configuration and so on. Note that
+each job is given as specific, user-defined name, and the job directory should
+be named after that. The name is entirely up to the user, but I generally use
+the name of the input dataset with an appended number to differentiate
+configurations. Hence, something like:
+
+.. code-block:: bash
+
+    $ mkdir -p ~/pipeline_runtime/jobs/L2009_16007_1
+
+Prepare parsets describing how to run the pipeline components
+-------------------------------------------------------------
+
+Any individual pipeline component you plan to run that needs a parset -- such
+as DPPP, BBS or the MWImager -- will need one provided by the pipeline
+framework. Drop them into ``~/pipeline_runtime/jobs/L2009_16007_1/parsets``.
+
+Note that some parameters will be added to the parset by the framework as part
+of the run. At present, these are:
+
+* NDPPP: ``msin``, ``msout``
+* MWImager: ``dataset``
+
+Prepare your task definitions
+-----------------------------
+
+Refer to the :ref:`framework overview <framework-overview>` and :ref:`recipe
+documentation <task-definition>` for more on tasks; recall that they are
+basically the combination of a recipe and a set of parameters. We will use the
+:ref:`vdsreader-recipe` and :ref:`dppp-recipe` recipes, and define the following task:
+
+.. code-block:: none
+
+   [vdsreader]
+   recipe = vdsreader
+   gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/%(job_name)s.gvds
+
+   [ndppp]
+   recipe = dppp
+   executable = %(lofarroot)s/bin/NDPPP
+   initscript = %(lofarroot)s/lofarinit.sh
+   working_directory = %(default_working_directory)s
+   parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/ndppp.parset
+   dry_run = False
+
+
+Prepare a pipeline definition
+-----------------------------
+
+The pipeline definition specifies how data should flow through the pipeline.
+It is a Python script, so you can use whatever logic you like to determine the
+flow. For now, I suggest you keep it simple!
+
+The :meth:`pipeline.master.control.run_task()` method is a shortcut to run the
+specific recipe configurations specified in the configuration file; it takes a
+configuration stanza and a list of datafiles as its input, and returns a list
+of processed datafiles. More complex configurations are also possible, but
+you'll have to define these by hand (ie, specifying the inputs and outputs of
+the underlying recipe manually).
+
+A very simple definition might be:
+
+.. code-block:: python
+
+    class sip(control):
+       def pipeline_logic(self):
+           with log_time(self.logger):
+               datafiles = self.run_task("vdsreader")['data']
+               datafiles = self.run_task("ndppp", datafiles)['data']
+
+Here, the ``vdsreader`` task reads a list of filenames to be processed from a
+VDS file, and then hands them to ``ndppp``. Note that the ``log_time``
+context simply writes an entry to the log recording how long it all took.
+
+Prepare a VDS file describing your data
+---------------------------------------
+
+A VDS file describes the location of all the datasets/measurement sets.  
+Preparing the VDS file actually not strictly necessary: you can use the vdsreader task to
+obtain a list of filenames to process (as above in sip.py run_task("vdsreader")), 
+or you can specify them by hand -- just writing a list in a text file is fine, then parsing that and
+feeding it to the DPPP task is fine. You need to specify the full path to each
+measurementset, but don't need to worry about the specific hosts it's
+accessible on. Note, you with the current cross-mount arrangement of the 
+cluster compute and storage notes, you need to be on the **lce0XX** nodes in order 
+to see the paths to the MS files.  A list that looks like
+
+.. code-block:: python
+
+    ['/net/sub3/lse007/data2/L2009_16007/SB1.MS', '/net/sub3/lse007/data2/L2009_16007/SB2.MS', ...]
+
+is fine.  This method allows you the test the pipeline with a fewer set of
+files than the typical set in its entirety.  In order to **run on a list of
+files instead of running vsdreader**, the list would go into the sip.py file
+as such (otherwise, use the above setting for datafiles of
+run_task("vdsreader")):
+
+.. code-block:: python
+
+    class sip(control):
+       def pipeline_logic(self):
+           with log_time(self.logger):
+               datafiles = ['/net/sub3/lse007/data2/L2009_16007/SB1.MS', '/net/sub3/lse007/data2/L2009_16007/SB2.MS']
+               datafiles = self.run_task("ndppp", datafiles)
+
+
+Anyway, assuming you want to go the VDS route, something like
+
+For bash (on any imaging lce0XX node machine):
+
+.. code-block:: bash
+
+    $ ssh lce019
+    $ mkdir /tmp/16007
+    $ mkdir ~/pipeline_runtime/jobs/L2009_16007_1/vds/
+    $ for storage in `seq 7 9`; do for file in /net/sub3/lse00$storage/data2/L2009_16007/\*MS; do /opt/LofIm/daily/lofar/bin/makevds ~/Work/pipeline_runtime/sub3.clusterdesc $file /tmp/16007/`basename $file`.vds; done; done
+    $ /opt/LofIm/daily/lofar/bin/combinevds ~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds /tmp/16007/\*vds
+
+For tcsh (on any imaging lce0XX node machine):
+
+.. code-block:: tcsh
+
+    $ ssh lce019
+    $ mkdir /tmp/16007
+    $ echo "for storage in "\`"seq 7 9"\`"; do for file in /net/sub3/lse00"\$"storage/data2/L2009_16007/\*MS; do /opt/LofIm/daily/lofar/bin/makevds ~/Work/pipeline_runtime/sub3.clusterdesc "\$"file /tmp/16007/"\`"basename "\$"file"\`".vds; done; done" > run.sh
+    $ chmod 755 run.sh
+    $ ./run.sh
+    $ mkdir ~/pipeline_runtime/jobs/L2009_16007_1/vds/
+    $ /opt/LofIm/daily/lofar/bin/combinevds ~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds /tmp/16007/\*vds
+
+
+will do the trick.  Check to be sure that your global vds file was created
+(``~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds``).  Clean up
+the temporary location.
+
+Run the pipeline
+----------------
+
+The pipeline can take a long time to process all subbands, especially if you
+are running multiple passes of DPPP.  Since your loggin session with the head
+node is likely to be cut off by an auto-logout, it is recommended that you use a
+`screen <http://www.gnu.org/software/screen/manual/screen.html>`_ session when
+running the pipeline, so that you can re-attach to the the session if you log
+out before the pipeline is finished.
+ 
+.. code-block:: bash
+
+    $ cd ~/pipeline_runtime/jobs/L2009_16007_1/
+    $ python sip.py -j L2009_16007_1 -d
+
+The ``-d`` flag specifies debugging mode (ie, more logging). The ``-j``
+argument just specifies the job we're running.  Intermediate pipeline files
+are placed in your default_working_directory (in ``pipeline.cfg``);  results
+are placed in the ``~/pipeline_runtime/jobs/L2009_16007_1/results`` directory;
+logs are placed in the ``~/pipeline_runtime/jobs/L2009_16007_1/logs``
+directory. ``DPPP`` leaves all the results in the default_working_directory;
+if you do not run any additional pipeline tasks after ``DPPP``, there will be
+no results directory created.  The pipeline log will indicate whether the
+pipeline completed successfully.
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aeaa1e985252ea3b5312638cf189b73ddcbdc55d
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst
@@ -0,0 +1,8 @@
+.. _recipe-bbs:
+
+===
+BBS
+===
+
+.. autoclass:: bbs.bbs
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6cb2cc99b5c419d967142d68ec9783b987185515
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst
@@ -0,0 +1,11 @@
+.. _recipe-cimager:
+
+=======
+cimager
+=======
+
+.. autoclass:: cimager.cimager
+   :show-inheritance:
+
+.. autoclass:: cimager.ParsetTypeField
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..986665153806b995533ee6f1fbd45c2aebd92ec6
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst
@@ -0,0 +1,8 @@
+.. _recipe-datamapper:
+
+==========
+datamapper
+==========
+
+.. autoclass:: datamapper.datamapper
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2fc26c6a45d0af68d17f50e6f179c876de82b4c5
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst
@@ -0,0 +1,8 @@
+.. _dppp-recipe:
+
+====
+DPPP
+====
+
+.. autoclass:: new_dppp.new_dppp
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..55a89e09812837b4aa9af1cb7e1d1c3aba914b3e
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst
@@ -0,0 +1,23 @@
+=================================
+Standard Imaging Pipeline recipes
+=================================
+
+Here we outline the various components which make up LOFAR's Standard Imaging
+Pipeline and how they can be combined to form a coherent whole. These
+components are made available as pipeline recipes; the reader is encouraged to
+be familiar with the :ref:`recipe-docs` section.
+
+.. toctree::
+    :maxdepth: 1
+
+    sip
+    datamapper
+    storagemapper
+    dppp
+    rficonsole
+    bbs
+    sourcedb
+    parmdb
+    cimager
+    vdsmaker
+    vdsreader
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4b7ecd066ac09f475603fc3f82c646060f20fabe
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst
@@ -0,0 +1,8 @@
+.. _recipe-parmdb:
+
+======
+parmdb
+======
+
+.. autoclass:: parmdb.parmdb
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst
new file mode 100644
index 0000000000000000000000000000000000000000..6197b4a97467f6a347fe9ca5251baca02231b7b0
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst
@@ -0,0 +1,8 @@
+.. _recipe-rficonsole:
+
+==========
+rficonsole
+==========
+
+.. autoclass:: rficonsole.rficonsole
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d0c4f1ac5365d1b3c12d48ba3de30806cec7acf9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst
@@ -0,0 +1,42 @@
+==============
+The SIP recipe
+==============
+
+.. todo::
+
+   Check if this section is up to date.
+
+There is no single SIP recipe: an imaging pipeline should be composed of
+components as required. However, various examples are available to help.
+
+``sip.py``
+----------
+
+.. todo::
+
+   Provide simpler example SIP.
+
+This recipe demonstrates the basic functionality of an imaging pipeline. In
+turn, it runs ``DPPP`` (data compression and flagging), ``BBS`` (calibration),
+``MWimager`` (imaging) and a custom-developed source finding step. The logs of
+all these steps are collected and stored centrally, images (in ``CASA``
+format) are made available.
+
+This should form a model for how a pipeline can be constructed. However, it
+does not contain logic for routines such as the "major cycle" (whereby
+``BBS``, ``MWimager`` and the source finder will iterate to find an optimum
+calibration). Such logic should be straightforward to add based on this
+framework.
+
+.. literalinclude:: ../../../../../examples/definition/sip2/sip.py
+
+``tasks.cfg``
+-------------
+
+.. todo::
+
+   Check task file for completenes/correctness.
+
+This task file defines the tasks referred to in the above example.
+
+.. literalinclude:: ../../../../../examples/definition/sip2/tasks.cfg
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3561c7290a12cb6d685031b21464ee5c5284124b
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst
@@ -0,0 +1,8 @@
+.. _recipe-sourcedb:
+
+========
+sourcedb
+========
+
+.. autoclass:: sourcedb.sourcedb
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c70bbed346175bbe063b248c1d0b91195427ec08
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst
@@ -0,0 +1,8 @@
+.. _recipe-storagemapper:
+
+=============
+storagemapper
+=============
+
+.. autoclass:: storagemapper.storagemapper
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ce13707016bd27ba504c78929adb087e0882c8c2
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst
@@ -0,0 +1,6 @@
+========
+vdsmaker
+========
+
+.. autoclass:: new_vdsmaker.new_vdsmaker
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst
new file mode 100644
index 0000000000000000000000000000000000000000..eb4e200b982d38543021dd0c3b7a970af3d279e9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst
@@ -0,0 +1,8 @@
+.. _vdsreader-recipe:
+
+=========
+vdsreader
+=========
+
+.. autoclass:: vdsreader.vdsreader
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/todo.rst b/CEP/Pipeline/docs/sphinx/source/todo.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a84e685358820c856d134ca78c95a67ab440cdbc
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/todo.rst
@@ -0,0 +1,7 @@
+.. _todo:
+
+************************
+Documentation to-do list
+************************
+
+.. todolist::
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b8e04bd2adb89e646c0e1fc423cc3f08b7c110f9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst
@@ -0,0 +1,15 @@
+.. _infrastructure-setup:
+
+**********************
+Installation and setup
+**********************
+
+This section provides information on how to set up the pipeline system. A
+quick-start guide is provided for users of LOFAR CEP, where the pipeline
+system is already installed. Others will need to install by hand.
+
+.. toctree::
+   :maxdepth: 1
+
+   quickstart.rst
+   installation.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..dead4204104129970f4d96360c43dee8e8c369bd
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst
@@ -0,0 +1,69 @@
+.. _framework-installation:
+
+**********************
+Framework installation
+**********************
+
+**Note**: These instructions were developed and tested using Ubuntu 10.04
+Server. They assume the user is using the ``bash`` shell.  Adjustment for
+other systems should be straightforward.
+
+Before starting, you will need to install the ``lofar.parameterset`` Python
+module. This must be available on your ``$PYTHONPATH``. Check by ensuring that
+you can replicate the following command:
+
+.. code-block:: bash
+
+  $ python -c 'import lofar.parameterset ; print "ok"'
+  ok
+
+The latest version of the framework is available by Subversion from the `USG
+repository <http://usg.lofar.org/>`_. Obtain a snapshot as follows:
+
+.. code-block:: bash
+
+  $ svn export http://usg.lofar.org/svn/code/trunk/src/pipeline/ ~/pipeline_framework
+
+This will create a ``pipeline_framwork`` directory within your current working
+directory. That directory contains a number of subdirectories. Note first
+``docs``: this contains the source for the pipeline documentation. If you have
+`Sphinx <http://sphinx.pocoo.org/>`_ installed, you can run ``make`` in that
+directory to generate the documentation tree.
+
+The framework itself is a Python package named ``lofarpipe``. It is found in
+the ``framework`` directory, and may be installed using the setup.py script
+included. The output directory can be specified using the ``--prefix``
+option; in the example below, we install to ``/opt/pipeline/framework``.
+
+.. code-block:: bash
+
+  $ cd ~/pipeline_framework/framework
+  $ sudo python setup.py install --prefix=/opt/pipeline/framework
+  running install
+  running build
+  running build_py
+  [... many lines elided ...] 
+  running install_egg_info
+  Writing /opt/pipeline/framework/lib/python2.6/site-packages/Pipeline-0.1.dev-py2.6.egg-info
+
+After installation, ensure that the relevant ``site-packages`` directory
+appears on your ``$PYTHONPATH`` environment variable:
+
+.. code-block:: bash
+
+  $ export PYTHONPATH=$PYTHONPATH:/opt/pipeline/framework/lib/python2.6/site-packages/
+
+You may wish to add this to your shell startup sequence.
+
+The pipeline also comes with a collection of recipes in the
+``pipeline_framework/recipes`` directory. These are not required by the
+framework itself, but will be useful for building pipelines. Ensure the
+contents of this directory are conveniently accessible:
+
+.. code-block:: bash
+
+  $ sudo cp -r ~/pipeline_framework/recipes /opt/pipeline
+
+At this point, the basic framework code should be installed. The next step is
+to start running simple recipes: see the :ref:`running-basic` section for
+details.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst
new file mode 100644
index 0000000000000000000000000000000000000000..b4320c3c8913f732ec8b1d5c98341d01f21d108c
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst
@@ -0,0 +1,155 @@
+.. _framework-quickstart:
+
+CEP quickstart
+==============
+
+.. todo::
+
+   Bring this quickstart guide in-line with the current situation.
+
+This section provides some quick notes on getting started with the pipeline
+system. More details are available in subsequent sections of this chapter.
+
+This section describes the basic requirements for setting up the pipeline
+framework. You may also need further configuration to run specific tools in
+your pipeline: see, for example, the Standard Imaging Pipeline
+:ref:`sip-quickstart` section.
+
+Locate the pipeline dependencies
+--------------------------------
+
+There are a number of Python packages which are required for the framework to
+operate: see :ref:`framework-dependencies`. On the LOFAR cluster, these are
+available under ``/opt/pipeline/dependencies``. Ensure the appropriate
+directories are available in the environment variables ``$PATH`` (should
+contain ``/opt/pipeline/dependencies/bin``)
+and ``$PYTHONPATH``
+(``/opt/pipeline/dependencies/lib/python2.5/site-packages``). To avoid any
+possible conflicts with system installations, it is best to list these paths
+early in the relevant variables.
+
+Ensure the framework modules are available
+------------------------------------------
+
+There are two Python packages which comprise the pipeline framework: :mod:`ep`
+and :mod:`lofarpipe`. These must both be available on your ``$PYTHONPATH``.
+The easiest way to achieve this is to use the system installations in
+``/opt/pipeline/framework``: add
+``/opt/pipeline/framework/lib/python2.5/site-packages`` to your
+``$PYTHONPATH``. Alternatively, you may wish to build and install your own
+copies for development purposes: see :ref:`building-modules` for details.
+
+Decide on a basic layout
+------------------------
+
+The pipeline will store all its logs, results, configuration data, etc in a
+centralised location or "runtime directory". This should be accessible from
+all nodes you will be using -- both the head node, and any compute nodes --
+and should be writable (at least) by the userid under which the pipeline will
+run. You should create this directory now.
+
+If you will be using the compute nodes to store data on their local disks, you
+will also need to create a "working directory" in a standard location on each
+of them. On the LOFAR cluster, ``/data/scratch/[username]`` is a good choice.
+This can be easily achieved using ``cexec``; for instance:
+
+.. code-block:: bash
+
+   $ cexec sub3:0-8 mkdir -p /data/scratch/swinbank
+
+Produce a ``clusterdesc`` file
+------------------------------
+
+The ``clusterdesc`` file describes the layout of the cluster -- the names of
+the various nodes, what disks they have access to, and so on. Some are already
+available in LOFAR Subversion. A minimal file for subcluster three could be:
+
+.. code-block:: bash
+
+   Head.Nodes = [ lfe001..2 ]
+   Compute.Nodes = [ lce019..027 ]
+
+It doesn't matter where you save this, but you might find it convenient to
+leave it in the runtime directory.
+
+.. _pipeline-config:
+
+Produce a pipeline configuration file
+-------------------------------------
+
+This file will contain all the standard information the pipeline framework
+needs to get going. For a basic pipeline, running only on the head node, you
+should have something like:
+
+.. literalinclude:: ../../../../../docs/examples/definition/dummy/pipeline.cfg
+
+Ensure that the ``runtime_directory`` and ``default_working_directory``
+directives match the directories you created above. The others can mostly be
+ignored for now, unless you know you need to change them.
+
+If you also want to use the cluster, you need to add another two stanzas:
+
+.. code-block:: none
+
+  [cluster]
+  clusterdesc = %(runtime_directory)s/sub3.clusterdesc
+  task_furl = %(runtime_directory)s/task.furl
+  multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+  [deploy]
+  script_path = /opt/pipeline/framework/bin
+  controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+  engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages
+  engine_lpath = /opt/pipeline/dependencies/lib
+
+You should ensure the ``clusterdesc`` directive points at the clusterdesc
+file you are using. Note that ``%(runtime_directory)s`` will be expanded to
+the path you've specified for the runtime directory.
+
+``engine_lpath`` and ``engine_ppath`` specify (respectively) the
+``$LD_LIBRARY_PATH`` and ``$PYTHONPATH`` that will be set for jobs on the
+compute nodes. These should (at least) point to the dependencies and the
+framework, as above, but should also include any necessary paths for code
+which you will be running on the engines (imaging tools, data processing,
+etc).
+
+The other variables can be left at the default settings for now, unless you
+know they need to be changed.
+
+When looking for a configuration file, the framework will look first in its
+current working directory for ``pipeline.cfg`` and, if nothing is there, look
+under ``~/.pipeline.cfg``. Save yours somewhere appropriate.
+
+At this point, the framework should be ready to run on the head node. If
+required, continue to :ref:`launch-cluster`.
+
+.. _launch-cluster:
+
+Setting up the IPython cluster
+------------------------------
+
+The IPython system consists of a controller, which runs on the head node, and
+engines, which run on the compute nodes. See the sections on :ref:`IPython
+<ipython-blurb>` and the :ref:`cluster layout <cluster-layout>` for details.
+Simple Python scripts make it easy to start and stop the cluster. This can be
+done independently of an individual pipeline run: one can start the engines
+once, run multiple piplines using the same engines, and then shut it down.
+
+The relevant scripts are available in ``/opt/pipeline/framework/bin``, named
+``start_cluster.py`` and ``stop_cluster.py``. Each accepts the name of a
+pipeline configuration file as an optional argument: if one is not provided,
+it defaults to ``~/.pipeline.cfg``.
+
+Usage is very straightforward:
+
+.. code-block:: bash
+
+  $ /opt/pipeline/framework/bin/start_cluster.py --config /path/to/pipeline.cfg
+
+After the script has finished executing, you can continue to set up and run
+your pipeline. When finished, shut down the cluster:
+
+.. code-block:: bash
+
+  $ /opt/pipeline/framework/bin/stop_cluster.py --config /path/to/pipeline.cfg
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2e6a0048e9c4889defc531c506c71909e3489483
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst
@@ -0,0 +1,115 @@
+.. _config-file:
+
+*************
+Configuration
+*************
+
+Various default parameters for pipeline operation are stored in a
+configuration file. The contents of this file are available to recipes as
+``self.config``, which is an instance of ``SafeConfigParser`` from the `Python
+Standard Library <http://docs.python.org/library/configparser.html>`_.
+
+Accepted parameters
+===================
+
+Section ``DEFAULT``
+-------------------
+
+This section contains global pipeline configuration parameters. They can be
+referred to in other sections of the configuration file using ``%()s`` syntax:
+see the Python documentation (lined above) for details.
+
+``runtime_directory``
+    Overall pipeline framework runtime directory. Pipeline framework metadata
+    which is shared between multiple jobs will be written here.
+
+    This parameter is **required**.
+
+``recipe_directories``
+    List of directories in which to search for recipes. Multiple directories
+    can be specified using a Python list-like syntax: ``[/path/to/dir/1,
+    /path/to/dir/2]``.
+
+``task_files``
+    List of task definition files. Multiple entries may be specified in the
+    same way as above.
+
+Section ``layout``
+------------------
+
+This section contains paths which may be referenced by individual pipeline
+recipes, for example to save state, locate parset files or write logs.
+
+``job_directory``
+    This will contain configuration information (parsets, etc) for a given
+    pipeline job. Metadata referring to an ongoing run may be written into
+    this directory (and will normally be removed when the run finishes),
+    and logs and output files are colelcted here.
+
+    This directory should be available to (eg, NFS mounted) to **every** node
+    that is involved in the pipeline run.
+
+    This parameter is **required**.
+
+Section ``cluster``
+-------------------
+
+This section describes the layout of a cluster which can be used for
+distributed processing.
+
+``clusterdesc``
+    The full path to a ``clusterdesc`` file (see :ref:`distproc-blurb`)
+    which describes the cluster configuration to be used by the pipeline.
+
+    This parameter is **required** if remote jobs are being used.
+
+``task_furl`` and ``multiengine_furl``
+    Filenames which will be used for the FURL files needed for collection to an
+    :ref:`ipython-blurb` cluster.
+
+    These parameters are only required if IPython is being used within the
+    pipeline.
+
+Section ``deploy``
+------------------
+
+This section describes the environment used for starting up jobs on remote
+hosts.
+
+``engine_lpath`` and ``engine_ppath``
+    The values of ``$LD_LIBRARY_PATH`` and ``$PYTHONPATH`` which will be used
+    for all remote commands. Note that these are **not** inherited from the
+    environment on the pipeline head node.
+
+    These parameter are **required** if remote jobs are being used.
+
+``controller_ppath``
+    The value of ``$PYTHONPATH`` which will be used for an IPython controller
+    started on the head node. Note that this is not used (or required) if
+    IPython is not being used.
+
+``script_path``
+    The location of scripts needed for starting IPython engines on remote
+    hosts. This is not used (or required) if IPython is not being used, or if
+    a non-pipeline method is used for starting the engines.
+
+Section ``logging``
+-------------------
+
+This section enables the user to customise the pipeline logging output. Note
+that it is entirely optional: a log file with default settings will be written
+to the ``job_directory`` if this section is omitted.
+
+``log_file``
+    Output filename for logging.
+
+``format`` and ``datefmt``
+    Format for log entries and dates, respectively. These are used exactly as
+    per the Python logging system; see `its documentation
+    <http://docs.python.org/library/logging.html#formatters>`_ for details.
+
+Section ``remote``
+------------------
+
+This section contains parameters for configuring the remote command execution
+strategy. It is intended for expert use only.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bdbb8e3e8e3bdfbdee609b0a42ed98e0712d3c28
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst
@@ -0,0 +1,17 @@
+.. _usage:
+
+*************************
+Using the pipeline system
+*************************
+
+This section provides a guide to the important concepts a pipeline-end user
+should understand.
+
+.. toctree::
+   :maxdepth: 1
+  
+   running.rst
+   layout.rst
+   jobs.rst
+   configuration.rst
+   tasks.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e0b68c1bde6299c79ede8cf55f2e48f66d7b9ac9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst
@@ -0,0 +1,7 @@
+****
+Jobs
+****
+
+.. todo::
+
+   Details on jobs, job IDs, etc.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fbb1912b42d5a8ee1c248fea85130a325ffedd3e
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst
@@ -0,0 +1,51 @@
+.. _pipeline-layout:
+
+Pipeline layout
+===============
+
+The pipeline system is designed to be organised in a standard directory
+structure. Insofar as is possible, this contains all information needed to
+manage a cluster and assosciated set of pipeline tasks. It is not designed to
+contain the actual data that is being processed. It is assumed that
+this directory will be available to all the various cluster nodes, presumably
+using NFS.
+
+The root directory of this structure is the ``runtime`` directory. This
+contains all the information about a given "cluster" -- that is, all the
+disks, compute nodes, management node, etc described by a given
+``clusterdesc`` file. This top level directory contains that ``clusterdesc``,
+and, if appropriate, information about an associated IPython cluster: 
+
+* A PID file (``ipc.pid``) and log files from the IPython controller (named
+  according to the pattern ``ipc.log${pid}.log``)
+
+* An ``engines`` directory, which contains PID (``ipengine${N}.pid``, where
+  ``N`` simply increments according to the number of engines on the host)
+  files and log files (``log${PID}.log``) from the various engines in the
+  cluster, organised into subdirectories by hostname.
+
+* The files ``engine.furl``, ``multiengine.furl`` and ``task.furl``, which
+  provide access to the IPython engines, multiengine client and task client
+  resepectively. See the IPython documentation for details.
+
+Of course, a single cluster (and hence runtime directory) may process many
+different jobs. These are stored in the ``jobs`` subdirectory, and are
+organised by job name -- an arbitrary user-supplied string.
+
+Within each job directory, three further subdirectories are found:
+
+``logs``
+    Processing logs; where appropriate, these are filed by sub-band name.
+
+``parsets``
+    Paramaeter sets providing configuration information for the various
+    pipeline components. These should provide the static parameters used by
+    tasks such as ``DPPP`` and the imager; dynamic parameters, such as the
+    name of the files to be processed, can be added by the pipeline at
+    runtime.
+
+``vds``
+    Contains VDS and GDS files pointing to the location of the data to be
+    processed on the cluster.
+
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1db151c90d01d264a664f896126ef3ed8a1d33fc
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst
@@ -0,0 +1,108 @@
+.. _running-basic:
+
+***********************
+Running basic pipelines
+***********************
+
+This section will describe the minimal configuration needed to run a simple
+pipeline. Every pipeline recipe is independently runnable as a stand-alone
+module, so we start with that. We will then build a larger pipeline connecting
+multiple modules.
+
+A single recipe
+---------------
+
+We will start by running the :ref:`simple recipe <basic-recipe>` described in
+the :ref:`recipe-docs` section. This is included in the standard framework
+distribution: it will be available as
+``/opt/pipeline/recipes/master/example.py`` if you have followed the
+installation guide. When called in its simplest mode, this recipe will simply
+return the results of running ``/bin/ls`` in the current working directory.
+
+Before running, it is necessary to create a pipeline configuration file. The
+simplest possible configuration file defines two options:
+``runtime_directory``, which the pipeline framework uses for storing various
+information about the framework state, and ``job_directory``, which contains
+information relevant to a specific pipeline job. The former must exist; the
+latter will be created if required, and may contain a reference to the runtime
+directory. For example:
+
+.. code-block:: none
+
+  [DEFAULT]
+  runtime_directory = /home/username/pipeline_runtime
+  
+  [layout]
+  job_directory = %(runtime_directory)s/jobs/%(job_name)s
+
+On startup, the framework will first search for a file named ``pipeline.cfg``
+in your current working directory, before falling back to ``~/.pipeline.cfg``.
+The user can also specify a configuration file on the command line. See the
+:ref:`config-file` section for full details on the configuration system.
+
+With the configuration file in place, it is now possible to run the example
+recipe. **Note** that all pipeline runs must be supplied with a job identifier
+on startup: see the section on :ref:`pipeline-jobs` for more. The recipe can
+then be executed as follows:
+
+.. code-block:: bash
+
+  $ python /opt/pipeline/recipes/master/example.py --job-name EXAMPLE_JOB -d
+  $ python example.py -j EXAMPLE_JOB -d
+  2010-10-26 18:38:31 INFO    example: This is a log message
+  2010-10-26 18:38:31 DEBUG   example: /bin/ls stdout: bbs.py
+  [ls output elided]
+  2010-10-26 18:38:31 INFO    example: recipe example completed
+  Results:
+  stdout = [ls output elided]
+
+Congratulations: you have run your first LOFAR pipeline!
+
+A pipeline
+----------
+
+To turn this simple recipe into a fully-functional pipeline is simply a matter
+of wrapping it in a pipeline definition derived from the :class:`control`
+class. The :meth:`cook_recipe` method can then be used to run the recipe. Note
+that it needs to be provided with appropriate input and output objects. An
+appropriate pipeline definition might be:
+
+.. code-block:: python
+
+  import sys
+
+  from lofarpipe.support.control import control
+  from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+
+  class pipeline(control):
+      def pipeline_logic(self):
+          recipe_inputs = LOFARinput(self.inputs)
+          recipe_outputs = LOFARoutput()
+          recipe_inputs['executable'] == '/bin/true'
+          self.cook_recipe('example', recipe_inputs, recipe_outputs)
+
+  if __name__ == "__main__":
+      sys.exit(pipeline().main())
+
+In order to make it clear where to find the ``example`` recipe, we also need
+to edit ``pipeline.cfg``, adding a ``recipe_directories`` directive to the
+``DEFAULT`` section:
+
+.. code-block:: none
+
+    recipe_directories = [/opt/pipeline/recipes]
+
+Saving the above definition in ``pipeline.py``, we now have:
+
+.. code-block:: bash
+
+  $ python pipeline.py -j test_job -d
+  2010-10-27 18:17:31 INFO    pipeline: LOFAR Pipeline (pipeline) starting.
+  2010-10-27 18:17:31 INFO    pipeline.example: recipe example started
+  2010-10-27 18:17:31 INFO    pipeline.example: Starting example recipe run
+  2010-10-27 18:17:31 DEBUG   pipeline.example: Pipeline start time: 2010-10-27T16:17:31
+  2010-10-27 18:17:31 INFO    pipeline.example: This is a log message
+  2010-10-27 18:17:31 INFO    pipeline.example: recipe example completed
+  2010-10-27 18:17:31 INFO    pipeline: recipe pipeline completed
+  Results:
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst
new file mode 100644
index 0000000000000000000000000000000000000000..bb005c74288bac16d620775702bc741fda2c1f4b
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst
@@ -0,0 +1,43 @@
+*****
+Tasks
+*****
+
+Declaring the full inputs and outputs for a recipe every time it is run can be
+a chore, expecially when the same set of parameters are used frequently. This
+not only complicates the pipeline definition, it inelegantly mixes
+configuration parameters into the code defining the pipeline.  Therefore, we
+introduce the concept of a "task": the combination of a recipe and a set of
+standard paramters.
+
+First, we define a task file in the :ref:`configuration file <config-file>`:
+
+.. code-block:: none
+
+  task_files = [/path/to/tasks.cfg]
+
+Within that file, tasks are delimited by blocks headed by the task name in
+square brackets. There then follows the recipe name and the parameters to be
+provided. For example:
+
+.. code-block:: none
+
+  [example_task]
+  recipe = example
+  parameter1 = value1
+  parameter2 = value2
+
+Within the pipeline definition, this task can then be used by invoking the
+:meth:`~lofarpipe.support.baserecipe.BaseRecipe.run_task` method:
+
+.. code-block:: python
+
+   self.run_task("example_task")
+
+If required, parameters can be overridden in the arguments to
+:meth:`~lofarpipe.support.baserecipe.BaseRecipe.run_task`. For example, we
+might over-ride the value of ``parameter2`` above, while leaving
+``parameter1`` intact:
+
+.. code-block:: python
+
+   self.run_task("example_task", parameter2="value3")
diff --git a/CEP/Pipeline/framework/lofarpipe/__init__.py b/CEP/Pipeline/framework/lofarpipe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING b/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..d60c31a97a544b53039088d14fe9114583c0efc3
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/README b/CEP/Pipeline/framework/lofarpipe/cuisine/README
new file mode 100644
index 0000000000000000000000000000000000000000..176e12fe453cea36bb6b01c3700986b16d771f25
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/README
@@ -0,0 +1,8 @@
+This free software under the GNU Public Licence. See the file COPYING.
+Copyright (c) ASTRON
+
+If you are not using the Eric3 IDE you can ignore the .e3p files
+as these are the Eric3 project files (currently for version 3.6.
+
+These scripts are the base classes for the scripts in the
+WSRTpipeline_library. See the documentation in WSRTpipeline_library/doc.
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py b/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..e659b9b15110a891639166e6f5ff00edfa6dbd99
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py
@@ -0,0 +1,366 @@
+#!/usr/bin/env python
+import ingredient, cook, parset
+import sys
+
+from optparse import OptionParser
+
+####
+# Use the standard Python logging module for flexibility.
+# Standard error of external jobs goes to logging.WARN, standard output goes
+# to logging.INFO.
+import logging
+from lofarpipe.support.pipelinelogging import getSearchingLogger
+
+from traceback import format_exc
+
+class RecipeError(cook.CookError):
+    pass
+
+class NullLogHandler(logging.Handler):
+    """
+    A handler for the logging module, which does nothing.
+    Provides a sink, so that loggers with no other handlers defined do
+    nothing rather than spewing unformatted garbage.
+    """
+    def emit(self, record):
+        pass
+
+class WSRTrecipe(object):
+    """Base class for recipes, pipelines are created by calling the cook_*
+    methods.  Most subclasses should only need to reimplement go() and add
+    inputs and outputs.  Some might need to addlogger() to messages or
+    override main_results."""
+    def __init__(self):
+        ## List of inputs, self.inputs[key] != True is considered valid input
+        self.inputs   = ingredient.WSRTingredient()
+        ## List of outputs, should only be filled on succesful execution
+        self.outputs  = ingredient.WSRTingredient()
+        ## All of these should do something sensible in their __str__ for
+        ## simple print output
+
+        ## Try using the standard Python system for handling options
+        self.optionparser = OptionParser(
+            usage="usage: %prog [options]"
+        )
+        self.optionparser.remove_option('-h')
+        self.optionparser.add_option(
+            '-h', '--help', action="store_true"
+        )
+        self.optionparser.add_option(
+            '-v', '--verbose',
+            action="callback", callback=self.__setloglevel,
+            help="verbose [Default: %default]"
+        )
+        self.optionparser.add_option(
+            '-d', '--debug',
+            action="callback", callback=self.__setloglevel,
+            help="debug [Default: %default]"
+        )
+
+        self.helptext = """LOFAR/WSRT pipeline framework"""
+        self.recipe_path = ['.']
+
+    def _log_error(self, e):
+        # Sometimes, an exception will be thrown before we have any loggers
+        # defined that can handle it. Check if that's the case, and, if so,
+        # dump it to stderr.
+        handled = len(self.logger.handlers) > 0
+        my_logger = self.logger
+        while my_logger.parent:
+            my_logger = my_logger.parent
+            if len(my_logger.handlers) > 0 and my_logger is not my_logger.root:
+                handled = True
+        if handled:
+            self.logger.exception('Exception caught: ' + str(e))
+        else:
+            print >> sys.stderr, "***** Exception occurred with no log handlers"
+            print >> sys.stderr, "*****", str(e)
+
+    def help(self):
+        """Shows helptext and inputs and outputs of the recipe"""
+        print self.helptext
+        self.optionparser.print_help()
+        print '\nOutputs:'
+        for k in self._outfields.keys():
+            print '  ' + k
+
+    def main_init(self):
+        """Main initialization for stand alone execution, reading input from
+        the command line"""
+        # The root logger has a null handler; we'll override in recipes.
+        logging.getLogger().addHandler(NullLogHandler())
+        self.logger = getSearchingLogger(self.name)
+        opts = sys.argv[1:]
+        try:
+            myParset = parset.Parset(self.name + ".parset")
+            for p in myParset.keys():
+                opts[0:0] = "--" + p, myParset.getString(p)
+        except IOError:
+            logging.debug("Unable to open parset")
+        (options, args) = self.optionparser.parse_args(opts)
+        if options.help:
+            return 1
+        else:
+            for key, value in vars(options).iteritems():
+                if value is not None:
+                    self.inputs[key] = value
+            self.inputs['args'] = args
+            return 0
+
+    def main(self):
+        """This function is to be run in standalone mode."""
+        import os.path
+        self.name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+        status = self.main_init()
+        if not status:
+            status = self.run(self.name)
+            self.main_result()
+        else:
+            self.help()
+        logging.shutdown()
+        return status
+
+    def run(self, name):
+        """This code will run if all inputs are valid, and wraps the actual
+        functionality in self.go() with some exception handling, might need
+        another name, like try_execute, because it's to similar to go()."""
+        self.name = name
+        self.logger.info('recipe ' + name + ' started')
+        try:
+            status = self.go()
+            if not self.outputs.complete():
+                self.logger.warn("Note: recipe outputs are not complete")
+        except Exception, e:
+            self._log_error(e)
+            self.outputs = None ## We're not generating any results we have
+                                ## confidence in
+            return 1
+        else:
+            self.logger.info('recipe ' + name + ' completed')
+            return status
+
+    def get_run_info(self, filepath):
+        import pickle
+        try:
+            fd = open(filepath + '/pipeline.pickle')
+            results = pickle.load(fd)
+        except:
+            return None
+        fd.close()
+        if self.name in results.keys():
+            return results[self.name]
+        else:
+            return None
+
+    def set_run_info(self, filepath):
+        import pickle
+        try:
+            fd = open(filepath + '/' + 'pipeline.pickle', 'w')
+            try:
+                results = pickle.load(fd)
+            except:
+                results = {}
+            results[self.name] = {'inputs':self.inputs, 'outputs':self.outputs}
+            pickle.dump(results, fd)
+            fd.close()
+        except:
+            return None
+
+    def rerun(self, name, directory):
+        """Function that you can use to rerun a recipe from the point where it
+        ended.  Not working completely yet. [untested]"""
+        self.name = name
+        self.logger.info('recipe ' + name + ' started')
+        try:
+            results = self.get_run_info(directory)
+            if not results: return
+            if not results[self.name]: return
+            self.inputs  = results[self.name]['inputs']
+            self.outputs = results[self.name]['outputs']
+            self.run(name)
+        except Exception, e:
+            self._log_error(e)
+            self.outputs = None ## We're not generating any results we have
+                                ## confidence in
+            return 0
+        else:
+            self.logger.info('recipe ' + name + ' completed')
+            return 1
+
+    def go(self):
+        """Main functionality, this empty placeholder only shows help"""
+        self.help()
+
+    def main_result(self):
+        """Main results display for stand alone execution, displaying results
+        on stdout"""
+        if self.outputs == None:
+            print 'No results'
+        else:
+            print 'Results:'
+            for o in self.outputs.keys():
+                print str(o) + ' = ' + str(self.outputs[o])
+
+    ## Maybe these cooks should go in some subclass?
+    ## Problem is you might need all of them in a recipe describing a pipeline
+    def cook_recipe(self, recipe, inputs, outputs):
+        """Execute another recipe/pipeline as part of this one"""
+        c = cook.PipelineCook(recipe, inputs, outputs, self.logger, self.recipe_path)
+        c.spawn()
+
+    def cook_system(self, command, options):
+        """Execute an arbitrairy system call, returns it's exitstatus"""
+        l = [command]
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + ' ' + str(options))
+        c = cook.SystemCook(command, l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def cook_interactive(self, command, options, expect):
+        """Execute an arbitrairy system call, returns it's exitstatus, expect
+        can define some functions to call if certain strings are written to
+        the terminal stdout of the called program.
+        Whatever such functions return is written to the stdin of the called
+        program."""
+        commandline = [command]
+        if type(options) == list:
+            commandline.extend(options)
+        else: ## we assume it's a string
+            commandline.extend(options.split())
+        self.print_debug('running ' + command + ' ' + str(options))
+        c = cook.SystemCook(command, commandline, {})
+        c.set_expect(expect)
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def cook_miriad(self, command, options):
+        """Execute a Miriad task, uses MRIBIN, returns it's exitstatus"""
+        l = [command]
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + str(options))
+        c = cook.MiriadCook(command, l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        # should probably parse the messages on '### Fatal Error'
+        if c.exitstatus:
+            raise RecipeError('%s failed with error: %s' %
+                              (command, c.exitstatus))
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+##    def cook_aips(self, command, options):
+##        """Execute an AIPS task, returns it's exitstatus"""
+##        l = [command]
+##        if type(options) == list:
+##            l.extend(options)
+##        else: ## we assume it's a string
+##            l.extend(options.split())
+##        self.print_debug('running ' + command + str(options))
+##        c = cook.AIPSCook(command, l, {}, self.messages)
+##        c.spawn()
+##        while c.handle_messages():
+##            pass ## we could have a timer here
+##        c.wait()
+##        return c.exitstatus ## just returning the exitstatus, the programmer must decide what to do
+##
+##    def cook_aips2(self, command, options):
+##        """Execute an AIPS++ tool, returns it's exitstatus""" #?
+##        l = [command]
+##        if type(options) == list:
+##            l.extend(options)
+##        else: ## we assume it's a string
+##            l.extend(options.split())
+##        self.print_debug('running ' + command + str(options))
+##        c = cook.AIPS2Cook(command, l, {}, self.messages)
+##        c.spawn()
+##        while c.handle_messages():
+##            pass ## we could have a timer here
+##        c.wait()
+##        return c.exitstatus ## just returning the exitstatus, the programmer must decide what to do
+
+    def cook_glish(self, command, options):
+        """Execute a Glish script, uses AIPSPATH, returns it's exitstatus"""
+        l = ['glish', '-l', command + '.g']
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + str(options))
+        c = cook.GlishCook('glish', l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def print_debug(self, text):
+        """Add a message at the debug level"""
+        self.logger.debug(text)
+
+    def print_message(self, text):
+        """Add a message at the verbose level"""
+        self.logger.info(text)
+    print_notification = print_message # backwards compatibility
+
+    def print_warning(self, text):
+        """Add a message at the warning level."""
+        self.logger.warn(text)
+
+    def print_error(self, text):
+        """Add a message at the error level"""
+        self.logger.error(text)
+
+    def print_critical(self, text):
+        """Add a message at the critical level"""
+        self.logger.crit(text)
+
+
+# The feeling is this needs to be part of the ingredient, or separate module,
+# not the recipe
+    def zap(self, filepath):
+        import os #, exception
+    #    if filepath == '/' or filepath == '~/':
+    #      raise Exception
+    #    else:
+    #      for root, dirs, files in os.walk(filepath, topdown=False):
+    #        for name in files:
+    #            os.remove(join(root, name))
+    #        for name in dirs:
+    #            os.rmdir(join(root, name))
+        if os.path.isdir(filepath):
+            self.cook_system('rm', ' -rf ' + filepath)
+        elif os.path.isfile(filepath):
+            self.cook_system('rm', ' -f ' + filepath)
+        else:
+            self.print_debug(filepath + ' doesn\'t seem to exist')
+
+    def __setloglevel(self, option, opt_str, value, parser):
+        """Callback for setting log level based on command line arguments"""
+        if str(option) == '-v/--verbose':
+            self.logger.setLevel(logging.INFO)
+        elif str(option) == '-d/--debug':
+            self.logger.setLevel(logging.DEBUG)
+
+
+# Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = WSRTrecipe()
+    sys.exit(standalone.main())
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/__init__.py b/CEP/Pipeline/framework/lofarpipe/cuisine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py b/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb49d282abd57e986a60f8138353462bd23f08d4
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py
@@ -0,0 +1,264 @@
+#from message import ErrorLevel, NotifyLevel, VerboseLevel, DebugLevel
+import time, os, select, pty, fcntl, sys, logging, imp
+from lofarpipe.support.pipelinelogging import getSearchingLogger
+
+class CookError(Exception):
+    """Base class for all exceptions raised by this module."""
+    def __init__(self, value):
+        self.value = value
+    def __str__(self):
+        return `self.value`
+
+class WSRTCook(object):
+    def __init__(self, task, inputs, outputs, logger):
+        self.inputs   = inputs
+        self.outputs  = outputs
+        self.task     = task.strip()
+        self.logger   = logger
+
+class PipelineCook(WSRTCook):
+    """
+    A system for spawning a recipe, providing it with correct inputs, and
+    collecting its outputs.
+    """
+    def __init__(self, task, inputs, outputs, logger, recipe_path):
+        super(PipelineCook, self).__init__(task, inputs, outputs, logger)
+        # Ensures the recipe to be run can be imported from the recipe path
+        try:
+            try:
+                module_details = imp.find_module(task, recipe_path)
+            except ImportError:
+                # ...also support lower-cased file names.
+                module_details = imp.find_module(task.lower(), recipe_path)
+            module = imp.load_module(task, *module_details)
+            self.recipe = getattr(module, task)()
+            self.recipe.logger = getSearchingLogger("%s.%s" % (self.logger.name, task))
+            self.recipe.logger.setLevel(self.logger.level)
+        except Exception, e:
+            self.logger.exception("Exception caught: " + str(e))
+            self.recipe = None
+            raise CookError (task + ' can not be loaded')
+
+    def try_running(self):
+        """Run the recipe, inputs should already have been checked."""
+        self.recipe.name     = self.task
+        if not self.recipe.run(self.task):
+            self.copy_outputs()
+        else:
+            raise CookError (self.task + ' failed')
+
+    def copy_inputs(self):
+        """Ensure inputs are available to the recipe to be run"""
+        for k in self.inputs.keys():
+            self.recipe.inputs[k] = self.inputs[k]
+
+    def copy_outputs(self):
+        """Pass outputs from the recipe back to the rest of the pipeline"""
+        if self.recipe.outputs == None:
+            raise CookError (self.task + ' has no outputs') ## should it have??
+        else:
+            for k in self.recipe.outputs.keys():
+                self.outputs[k] = self.recipe.outputs[k]
+
+    def spawn(self):
+        """Copy inputs to the target recipe then run it"""
+        self.copy_inputs()
+        self.try_running()
+
+class SystemCook(WSRTCook):
+    """Based on Parseltongue cody by Mark Kettenis (JIVE)
+    and subProcess from: Padraig Brady at www.pixelbeat.org
+    and Pexpect from: Noah Spurrier on sourceforge"""
+    def __init__(self, task, inputs, outputs, logger):
+        super(SystemCook, self).__init__(task, inputs, outputs, logger)
+        self._pid      = None ## spawned process ID
+        self._child_fd = None ## child output file descriptor
+        self._expect   = []
+        self._fd_eof   = self._pipe_eof = 0
+        ## We can only have a pipe for stderr as otherwise stdio changes it's
+        ## buffering strategy
+        (self._errorpipe_end, self._errorpipe_front) = os.pipe()
+##        self.poll = select.poll()
+
+    def StripNoPrint(self, S):
+        from string import printable
+        return "".join([ ch for ch in S if ch in printable ])
+
+    def set_expect(self, expectlist):
+        self._expect = expectlist
+
+    def spawn(self, env=None):
+        """Try to start the task."""
+        try:
+            (self._pid, self._child_fd) = pty.fork()
+        except OSError, e:
+            self.logger.error('Unable to fork:' + str(e))
+            raise CookError ('fork failed')
+        if self._pid == 0: ## the new client
+            try:
+                #fcntl.fcntl(self.errw, fcntl.F_SETFL, os.O_NONBLOCK)
+                #os.dup2(self.outputpipe_front, 1) ## This makes stdio screw
+                #up buffering because a pipe is a block device
+
+                # we hardcoded assume stderr of the pty has fd 2
+                os.dup2(self._errorpipe_front, 2)
+
+                os.close(self._errorpipe_end)
+                os.close(self._errorpipe_front) ## close what we don't need
+                self.logger.info("starting " + " ".join(self.inputs))
+                if env:
+                    os.execvpe(self.task, self.inputs, env)
+                else:
+                    os.execvp(self.task, self.inputs)
+            except:
+                sys.stderr.write('Process could not be started: ' + self.task)
+                os._exit(1)
+        else: ## the parent
+##            self.poll.register(self._child_fd)
+##            self.poll.register(self._errorpipe_end)
+            os.close(self._errorpipe_front) ## close what we don't need
+            fcntl.fcntl(self._child_fd, fcntl.F_SETFL, os.O_NONBLOCK)
+
+    def finished(self):
+        """Check whether the task has finished."""
+        return self._pid == 0
+
+    def handle_messages(self):
+        """Read messages."""
+        tocheck=[]
+        if not self._fd_eof:
+            tocheck.append(self._child_fd)
+        if not self._pipe_eof:
+            tocheck.append(self._errorpipe_end)
+        ready = select.select(tocheck, [], [], 0.25)
+        for file in ready[0]:
+            try:
+                time.sleep(0.05)
+                text = os.read(file, 32768)
+            except: ## probalby Input/Output error because the child died
+                text = ''
+            if text:
+                for x in self._expect:
+                    if x[0] in text: ## we need to do something if we see this text
+                        returntext = x[1](text)
+                        if returntext:
+                            os.write(file, returntext)
+                text.replace('\r','\n') ## a pty returns '\r\n' even on Unix
+                text.replace('\n\n','\n')
+                for line in text.split('\n'): ## still have odd behaviour for gear output
+                    if file == self._child_fd:
+                        self.logger.info(self.StripNoPrint(line))
+                    elif file == self._errorpipe_end:
+                        self.logger.warn(self.StripNoPrint(line))
+            else:
+                if file == self._child_fd:
+                    self._fd_eof   = 1
+                elif file == self._errorpipe_end:
+                    self._pipe_eof = 1
+            return 1
+##        if self._child_fd in ready[0]:
+##            try:
+##                text = os.read(self._child_fd, 1024)
+##            except: ## probalby Input/Output error because the child died
+##                text = ''
+##            if text == '':
+##                self._fd_eof   = 1
+##            else: # should probably do some kind of line buffering
+##                if text.find('(O)k/(C)ancel (Ok)') >= 0:
+##                    os.write(self._child_fd, 'C\n')
+##                else:
+##                    self.messages.append(VerboseLevel, self.StripNoPrint(text))
+##            return 1
+##        if self._errorpipe_end in ready[0]:
+##            try:
+##                time.sleep(0.002) ## stderr isn't buffered
+##                text = os.read(self._errorpipe_end, 1024)
+##            except: ## probalby Input/Output error because the child died
+##                text = ''
+##            if text == '':
+##                self._pipe_eof = 1
+##            else: # should probably do some kind of line buffering
+##                self.messages.append(NotifyLevel, self.StripNoPrint(text))
+##            return 1
+        if self._fd_eof and self._pipe_eof: # should be an and not an or, but python 2.3.5 doesn't like it
+            return 0
+        if len(ready[0]) == 0: ## no data in 0.25 second timeout
+            return 1
+        return 0
+##        if self._fd_eof and self._pipe_eof:
+##            return 0
+##        ready = self.poll.poll(1000)
+##        for x in ready:
+##            text = ''
+##            if (x[1] & select.POLLOUT) or (x[1] & select.POLLPRI):
+##                try:
+##                    text = os.read(x[0], 1024)
+##                except:
+##                    if x[0] == self._child_fd:
+##                        self._fd_eof   = 1
+##                    elif x[0] == self._errorpipe_end:
+##                        self._pipe_eof = 1
+##            if (x[1] & select.POLLNVAL) or (x[1] & select.POLLHUP) or (x[1] & select.POLLERR) or (text == ''):
+##                if x[0] == self._child_fd:
+##                    self._fd_eof   = 1
+##                elif x[0] == self._errorpipe_end:
+##                    self._pipe_eof = 1
+##            elif text:
+##                if x[0] == self._child_fd:
+##                    self.messages.append(VerboseLevel, text)
+##                elif x[0] == self._errorpipe_end:
+##                    self.messages.append(NotifyLevel, text)
+##        if self._fd_eof and self._pipe_eof:
+##             return 0
+##        return 1 ##else
+
+    def wait(self):
+        """Check if the task is finished and clean up."""
+        ##self.__excpt = sys.exc_info() might want to check this in some way?
+        try:
+          (pid, status) = os.waitpid(self._pid, os.WNOHANG) ## clean up the zombie
+          assert(pid == self._pid)
+          if os.WIFEXITED(status) or os.WIFSIGNALED(status):
+              self._pid       = 0
+              self.exitstatus = status
+          assert(self.finished())
+          del self._pid
+  ##        self.poll.unregister(self._child_fd)
+  ##        self.poll.unregister(self._errorpipe_end)
+          os.close(self._child_fd)
+          os.close(self._errorpipe_end)
+          ## Interpret the return value
+          if (self.exitstatus == 0 or self.exitstatus > 255):
+              if (self.exitstatus > 255):
+                  self.exitstatus = self.exitstatus >> 8
+              else: self.exitstatus = 0
+              self.logger.info(self.task + ' has ended with exitstatus: ' + str(self.exitstatus))
+
+          else:
+              self.logger.warn(self.task + ' was aborted with exitstatus: ' + str(self.exitstatus))
+        except Exception, e:
+          self.logger.exception('Exception caught: ' + str(type(Exception)) + ' ' + str(e))
+          raise CookError (self.task + ' critical error' + str(type(Exception)) + ' ' + str(e))
+
+class MiriadCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        mirbin = os.environ['MIRBIN'] + '/' ## can raise an exception if it doesn't exist
+        super(MiriadCook, self).__init__(mirbin + task, inputs, outputs, logger)
+        self.logger.debug('Using ' + task + ' found in ' + mirbin)
+
+class AIPSCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # someting with the Parseltongue AIPSTask
+        super(AIPSCook, self).__init__(task, inputs, outputs, logger)
+
+class AIPS2Cook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # Don't know if we can do this right now, we might need a Python interface to AIPS++
+        super(AIPS2Cook, self).__init__(task, inputs, outputs, logger)
+
+class GlishCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # Don't know if we can do this right now, we might need a Python interface to AIPS++
+        aipspath = os.environ['AIPSPATH'] ## can raise an exception if it doesn't exist
+        super(GlishCook, self).__init__(task, inputs, outputs, logger)
+        self.logger.debug('Using ' + task + ' with AIPSPATH ' + aipspath)
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/files.py b/CEP/Pipeline/framework/lofarpipe/cuisine/files.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab51149e16fef40d47958276fe7c9b0ce92048c5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/files.py
@@ -0,0 +1,19 @@
+# The feeling is this needs to be part of the ingredient, not the recipe
+# not used currently, maybe integrate it in some way with the data returned by MSinfo?
+def zap(filepath):
+    import os #, exception
+##    if filepath == '/' or filepath == '~/':
+##      raise Exception
+##    else:
+##      for root, dirs, files in os.walk(filepath, topdown=False):
+##        for name in files:
+##            os.remove(join(root, name))
+##        for name in dirs:
+##            os.rmdir(join(root, name))
+    if os.path.isdir(filepath):
+        self.execute('rm', ' -rf ' + filepath)
+    elif os.path.isfile(filepath):
+        self.execute('rm', ' -f ' + filepath)
+    else:
+        self.messages.append(DebugLevel, filepath + " doesn't seem to exist")
+
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py b/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py
new file mode 100644
index 0000000000000000000000000000000000000000..96e12bce0ddf38cf49f48899e9c31c3498b93f98
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py
@@ -0,0 +1,7 @@
+class WSRTingredient(dict):
+    pass
+##    def __init__ (self, name):
+##        self.name = name
+##
+##    def __repr__ (self):
+##        return self.name
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py b/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..334d7e5e16e58c018256ec7831c3f24f04977d5e
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+from WSRTrecipe import *
+
+JobError     = -1
+JobHold      =  0
+JobScheduled =  1
+JobProducing =  2
+JobProduced  =  3
+
+class job_parser(WSRTrecipe):
+    def __init__(self):
+        WSRTrecipe.__init__(self)
+        ##inputs
+        self.inputs['Job'] = ''
+        self.name = 'job parser'
+        # We have no outputs
+        ## Help text
+        self.helptext = """
+        Script to parse an XML job file for use by the pipeline_manager.
+        See the exportjob.dtd for a definition of the format of a valid file."""
+    
+    ## Code to generate results ---------------------------------------------
+    def go(self):
+        try:
+            from xml.dom import minidom, Node
+            doc = minidom.parse(self.inputs['Job'])
+            if doc.documentElement.nodeName == 'exportjob':
+                self.outputs['ExportID'] = str(doc.documentElement.attributes.get('exportID').nodeValue)
+                for node in doc.documentElement.childNodes:
+                    if node.nodeName == 'scriptname':
+                        value = node.childNodes[0].nodeValue
+                        self.outputs['scriptname'] = value
+                    elif node.nodeName == 'repository':
+                        for itemnode in node.childNodes:
+                            if itemnode.nodeName == 'server':
+                                name = itemnode.childNodes[0].nodeValue
+                            elif itemnode.nodeName == 'resultdir':
+                                res  = itemnode.childNodes[0].nodeValue
+                        if res and name: 
+                            self.outputs['repository'] = (name, res)
+                    elif node.nodeName == 'inputlist':
+                        name  = "'" + node.attributes.get('name').nodeValue + "'"
+                        exec(eval("'self.outputs[%s] = []' % (name)"))
+                        for itemnode in node.childNodes:
+                            if itemnode.nodeName == 'listitem':
+                                value = itemnode.childNodes[0].nodeValue
+                                exec(eval("'self.outputs[%s].append(%s)' % (name, value)"))
+                    elif node.nodeName == 'input':
+                        name  = "'" + node.attributes.get('name').nodeValue + "'"
+                        value = node.childNodes[0].nodeValue
+                        #try: # we should just interpret the value, and have the cook/script worry about if it's an int or string.
+                        if value == 'True' or value == 'False':
+                            exec(eval("'self.outputs[%s] = %s' % (name, value)"))
+                        #except:
+                        else:
+                            value = "'''" + value + "'''" ## tripple quotes because a value could be "8 O'clock" for example
+                            exec(eval("'self.outputs[%s] = %s' % (name, value)"))
+            if self.outputs['ExportID']: ## we need an export ID to identify the job
+                self.outputs['Status'] = JobScheduled
+                return
+        except Exception, inst:
+            self.print_notification('Failed importing job: ' + self.inputs['Job'] + '; Error: ' + str(inst))
+        self.outputs['Status'] = JobError
+
+## Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = job_parser()
+    standalone.main()
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/message.py b/CEP/Pipeline/framework/lofarpipe/cuisine/message.py
new file mode 100644
index 0000000000000000000000000000000000000000..a85642e6d4f3cb0ee29a51dc50737138aa4c0e78
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/message.py
@@ -0,0 +1,78 @@
+import sys, time
+
+##global Message Levels
+ErrorLevel   = 70 ## Error
+WarningLevel = 60 ## Warning
+NotifyLevel  = 50 ## Always visible, stderr of external processes
+VerboseLevel = 30 ## Visible on verbose, stdout of external processes
+DebugLevel   = 10 ## Visible on debug
+
+class WSRTmessages(list):
+    """Class for handling message logging redirection and reporting tresholds."""
+    def __init__(self):
+        list.__init__(self)
+        self.log   = {sys.stdout:NotifyLevel} ## NotifyLevel and above are always sent to stdout
+        self._store = False
+
+    def store(self):
+        self._store = True
+            
+    def pause(self):
+        self._store = False
+                
+    def clear(self):
+        list.__init__(self)
+        self._store = False
+
+    def append(self, level, item):
+        """level determines if the message gets reported, item should basically be a string"""
+        t = time.gmtime()
+        if self._store and level > DebugLevel:
+            list.append(self, (t, level, item)) ## storing the item for parsing by the caller.
+        for output in self.log.keys():
+            if self.log[output] <= level:
+                if level >= ErrorLevel:
+                    e = ' Error   : '
+                elif level >= WarningLevel:
+                    e = ' Warning : '
+                elif level >= NotifyLevel:
+                    e = ' Notification: '
+                elif level >= VerboseLevel:
+                    e = '     Message : '
+                elif level >= DebugLevel:
+                    e = '        Debug: '
+                output.write('%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+                             + e + item.strip() + '\n')
+                output.flush()
+
+    def __repr__(self):
+        text = ''
+        for i in self:
+            t     = i[0]
+            level = i[1]
+            item  = i[2]
+            if level >= ErrorLevel:
+                e = ' Error   : '
+            elif level >= WarningLevel:
+                e = ' Warning : '
+            elif level >= NotifyLevel:
+                e = ' Notification: '
+            elif level >= VerboseLevel:
+                e = '     Message : '
+            elif level >= DebugLevel:
+                e = '        Debug: '
+            text += ('%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+                         + e + item.strip() + '\n')
+        return text
+
+    def addlogger(self, level, logger):
+        """The level should be one of the above 
+        global levels and the logger should support
+        a write(string) method."""
+        self.log[logger] = level
+
+    def setloglevel(self, level, logger):
+        """Changes the level at which logging info is written to the logger."""
+        for output in self.log.keys():
+            if logger == output:
+                self.log[logger] = level
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py b/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec4aab70205a74a3b44e9122c97190521f32606f
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py
@@ -0,0 +1,87 @@
+class Parset(dict):
+
+    def __init__(self, fileName=None):
+        if fileName: self.readFromFile(fileName)
+
+    def readFromFile(self, fileName):
+        lastline = ''
+        for line in open(fileName, 'r').readlines():
+            lastline = lastline + line.split('#')[0]
+            lastline = lastline.rstrip()
+            if len(lastline) > 0 and lastline[-1] == '\\':
+                lastline = lastline[:-1]
+            elif '=' in lastline:
+                key, value = lastline.split('=')
+                self[key.strip()] = value.strip()
+                lastline = ''
+
+    def writeToFile(self, fileName):
+        outf = open(fileName, 'w')
+        for key in sorted(self.keys()):
+            outf.write(key + ' = ' + str(self[key]) + '\n')
+        outf.close()
+
+    def getString(self, key):
+        return self[key]
+
+    def getInt(self, key):
+        return int(self[key])
+
+    def getFloat(self, key):
+        return float(self[key])
+
+    def getStringVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [lp.strip() for lp in vec]
+
+    def getIntVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [int(lp) for lp in vec]
+
+    def getFloatVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [float(lp) for lp in vec]
+
+
+### Self tests ###
+    
+if __name__ == '__main__':
+    import sys
+    import os
+    # First create a parset in memory.
+    p = Parset()
+    p['anInt'] = str(42)
+    p['aFloat'] = str(3.141592653589793)
+    p['aString'] = str('hello world')
+    p['anIntVec'] = str([1, 2, 3, 4, 5])
+    p['aFloatVec'] = str([2.5, 4.25,
+                          8.125, 16.0625])
+    p['aStringVec'] = str(['aap', 'noot', 'mies', 'wim', 'zus', 'jet',
+                           'teun', 'vuur', 'gijs', 'lam', 'kees', 'bok',
+                           'weide', 'does', 'hok', 'duif', 'schapen'])
+    # Write the parset out to file
+    p.writeToFile('p.parset');
+
+    # Create another parset by reading the written parset file
+    q = Parset()
+    q.readFromFile('p.parset')
+
+    # Clean-up temporary parset file
+    os.remove('p.parset')
+
+    # Parsets p and q must be equal
+    sys.stdout.write('Comparing parameter sets ...   ')
+    if p == q:
+        print 'ok'
+    else:
+        print 'FAIL: Expected equal parameter sets!'
+
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..65d3095af2cac616cfebbd7dad0cef748dbb2c19
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+from WSRTrecipe import *
+from job_parser import *
+import os, os.path, time, threading, types, thread, sys
+
+NewJob     = 1
+UpdateJob  = 2
+RestartCom = 3
+
+##--------------------- Server Code, should maybe be in separate module? --------------------------
+class server_wrapper(threading.Thread):
+    """Base class to create a server to listen for new jobs in a separate thread.
+    Mainly implements how to talk with the pipeline manager."""
+    def __init__(self, server): ## might need to be overridden
+        threading.Thread.__init__(self)
+        self.server     = server
+        self.stop       = False
+        self.stop_lock  = threading.Lock()
+        self.events     = [] ## list of received events. Main thread will remove them
+        self.event_lock = threading.Lock()  ## lock to make sure only one is using this list
+        self.event      = threading.Event() ## event to signal the main thread
+        self.setDaemon(True) ## this kills the server when the main thread finishes
+    
+    def run(self):
+        """Empty function override in a subclass"""
+        pass ## override in sub-class
+        
+##    def new_job(self, fileName):
+##        """Send an event to the pipeline manager that a new job is available."""
+##        self.event_lock.acquire()
+##        self.events.append((NewJob, fileName))
+##        self.event.set() ## signal the main thread we've done something
+##        self.event.clear()
+##        self.event_lock.release()
+
+    def new_job(self, fileName, fileContent):
+        """Send an event to the pipeline manager that a new job is available."""
+        self.event_lock.acquire()
+        self.events.append((NewJob, fileName, fileContent))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def update_job(self, exportID, status):
+        """Send an event to the pipeline manager that a job needs to be updated."""
+        self.event_lock.acquire()
+        self.events.append((UpdateJob, exportID, status))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def restart(self):
+        """function that gets called when a restart of the communication
+        of jobs statusses is needed."""
+        self.event_lock.acquire()
+        self.events.append((RestartCom))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def serve_forever(self):
+        """Accept requests until the pipeline manager signals to stop."""
+        self.server.socket.settimeout(60)
+        while 1:
+            #? self.stop_lock.acquire() doesn't seem necessary
+            if self.stop:
+                #? self.stop_lock.release() doesn't seem necessary
+                break
+            #? self.stop_lock.release() doesn't seem necessary
+            self.server.handle_request()
+##--------------------- End Server Code --------------------------
+
+class pipeline_manager(WSRTrecipe):
+    def __init__(self):
+        WSRTrecipe.__init__(self)
+        ## inputs
+        self.inputs['ConfigurationFile'] = 'pipeline_manager_config' ## without the .py
+        self.inputs['NewJobsDirectory']  = ''
+        self.inputs['JobsDirectory']     = ''
+        self.inputs['LogDirectory']      = ''
+
+        ##outputs
+        self.outputs['failed_communication'] = []
+
+        ## Help text
+        self.helptext = """
+        Script to run as a server for executing individial jobs
+        ConfigurationFile should be given without the '.py' extention.
+        NewJobs is where new jobs should be written, the JobsDirectory
+        contains unfinished Jobs"""
+        
+        self.jobs                 = []
+        self.parser               = job_parser()
+        self.parser.messages      = self.messages
+        self.running_job          = None
+    
+    ## Code to generate results ---------------------------------------------
+    def startup(self):
+        """Tell the user we stared, read the configuration and try to read unfinished jobs from JobDirectory"""
+        print 'WSRT pipeline manager version 0.5'
+        print 'Press Ctrl-C to abort'
+        exec(eval("'from %s import *' % self.inputs['ConfigurationFile']"))
+        self.log     = file(self.inputs['LogDirectory'] + '/pipeline_manager.log', 'a', 1)
+        self.messages.addlogger(message.DebugLevel, self.log)
+        self.print_message('----- Logging started -----')
+        ExistingJobs = os.listdir(self.inputs['JobsDirectory'])
+        self.server  = server
+        self.client  = client
+        self.restart_communication()
+        for e in ExistingJobs:
+            self.new_job(e, "")
+
+    def communicate_job(self, job): 
+        """function to write to log and communicate with GUI"""
+        if   job['Status'] == JobError:     self.print_notification('Job:' + str(job['ExportID']) + ' Failed')
+        elif job['Status'] == JobHold:      self.print_notification('Job:' + str(job['ExportID']) + ' is on Hold')
+        elif job['Status'] == JobScheduled: self.print_notification('Job:' + str(job['ExportID']) + ' Scheduled')
+        elif job['Status'] == JobProducing: self.print_notification('Job:' + str(job['ExportID']) + ' Started')
+        elif job['Status'] == JobProduced:  self.print_notification('Job:' + str(job['ExportID']) + ' Produced')
+        try:
+            if not isinstance(self.client, types.NoneType):
+                (status, message) = self.client.setStatus(str(job['ExportID']), str(job['Status']))
+                if status: ## we retry, because the client does not do an internal retry, but only reports the problem
+                    count = 1
+                    while (status and (count < 10)):
+                        self.print_notification("Got some error, retrying " + str(job['ExportID']) + ": " + message)
+                        time.sleep(60)
+                        (status, message) = self.client.setStatus(str(job['ExportID']), str(job['Status']))
+                        count += 1
+                if status:
+                    self.print_error(message)
+                else:
+                    self.print_message(message)
+        except:
+            self.outputs['failed_communication'].append((job['ExportID'], job['Status']))
+            self.set_run_info(self.inputs['LogDirectory'])
+            self.print_error('Could not update job %s status to %s.' % (str(job['ExportID']), str(job['Status'])))
+
+    def restart_communication(self):
+        """Try to tell the client what we failed to tell earlier."""
+        results = self.get_run_info(self.inputs['LogDirectory'])
+        if not results: return
+        if not results[self.name]: return
+        for i in results[self.name]['outputs']['failed_communication']:
+            try:
+                if not isinstance(self.client, types.NoneType):
+                    self.print_message(self.client.setStatus(i[0], i[1]))
+            except:
+                self.print_error('Could not update job %s status to %s.' % (str(job['ExportID']), str(job['Status'])))
+                return
+        self.outputs['failed_communication'] = []
+        self.set_run_info(self.inputs['LogDirectory'])
+
+    def new_job(self, filename, fileContent):
+        """Read filename and add to the list of jobs if it is a valid file."""
+        import shutil
+        try:
+            if fileContent:
+                f = open(self.inputs['NewJobsDirectory'] + '/' + filename, 'w')
+                f.write(fileContent)
+                f.close()
+            shutil.move(self.inputs['NewJobsDirectory'] + '/' + filename, self.inputs['JobsDirectory'] + '/' + filename)
+        except:
+            self.print_debug('file not found (existing job?): ' + self.inputs['NewJobsDirectory'] + '/' + filename)
+        self.parser.inputs['Job'] = self.inputs['JobsDirectory'] + '/' + filename
+        self.parser.outputs       = ingredient.WSRTingredient() ## empty ingredient to be able to run more than once
+        self.parser.go()
+        job             = self.parser.outputs.copy()
+        job['filename'] = filename
+        if job['Status'] == JobScheduled:
+            self.jobs.append(job)
+            self.communicate_job(job)
+        else:
+            self.print_notification('Parsing ' + self.inputs['JobsDirectory'] + '/' + filename + ' failed') ## Not implemented yet
+
+    def find_job(self, exportID):
+        for j in self.jobs:
+            if j['ExportID'] == exportID:
+                return j
+        return None
+
+    def update_job(self, exportID, status): 
+        """for communicating job status with GUI, mainly to put on Hold."""
+        j = self.find_job(exportID)
+        if j:
+            j['Status'] = status
+            self.communicate_job(j)
+        else:
+            self.print_debug('Job ' + str(exportID) + ' not found, ignoring message.')
+
+    def check_server(self):
+        """Check if there are any new jobs communicated to the server."""
+        self.server.event.wait(10)
+        self.server.event_lock.acquire()
+        while len(self.server.events) > 0:
+            job = self.server.events.pop(0)
+            if job[0] == NewJob:
+                self.new_job(job[1], job[2])
+            elif job[0] == UpdateJob:
+                self.update_job(job[1], job[2])
+            elif job[0] == RestartCom:
+                self.restart_communication()
+        self.server.event_lock.release()
+
+    def next_job(self):
+        """See if there is another job scheduled, then start it."""
+        import shutil
+        for j in self.jobs:
+            if j['Status'] >= JobScheduled:
+                if j['Status'] > JobScheduled:
+                    self.print_notification('Restarting job: ' + j['ExportID'])
+                j['Status'] = JobProducing
+                self.running_job = j
+                self.communicate_job(j)
+                self.cook_job(j)
+                self.communicate_job(j)
+                shutil.move(self.inputs['JobsDirectory'] + '/' + j['filename'], 
+                            self.inputs['LogDirectory'] + '/' + str(j['ExportID']) + '/' + j['filename'])
+                self.jobs.remove(j) ## we can not use .pop() because the first job might be on hold
+                self.running_job = None ## tell ourselves that we're doing nothing
+                return ## we did a jobs.remove() so we don't what to stay in the for loop!
+    
+    def prepare_recipe_parameters(self, job):
+        """Prepare ingedients and message handler for the cook to cook the recipe."""
+        import sys
+        logfile = file(self.inputs['LogDirectory'] + '/' + str(job['ExportID']) + '/pipeline_manager.log', 'a', 1)
+        messages = message.WSRTmessages()
+        results  = ingredient.WSRTingredient()
+        if self.messages.log[sys.stdout] == message.DebugLevel:
+            messages.addlogger(message.DebugLevel, logfile)
+            messages.setloglevel(message.DebugLevel, sys.stdout)
+        else:
+            messages.addlogger(message.VerboseLevel, logfile)
+            messages.setloglevel(message.NotifyLevel, sys.stdout)
+        inputs = job.copy()
+        del inputs['scriptname']
+        del inputs['Status']
+        del inputs['filename']
+        return (logfile, inputs, results, messages)
+
+    def cook_job(self, job):
+        """This starts a recipe with the inputs as defined in the job file."""
+        if not os.path.isdir(self.inputs['LogDirectory'] + '/' + str(job['ExportID'])):
+            os.makedirs(self.inputs['LogDirectory'] + '/' + str(job['ExportID']))
+        logfile, inputs, results, messages = self.prepare_recipe_parameters(job)
+        try:
+            self.cook_recipe(job['scriptname'], inputs, results, messages)
+        except Exception, e:
+            messages.append(message.ErrorLevel, str(e))
+            job['Status'] = JobError
+            results       = None
+        if results:
+            job['Status'] = JobProduced # something more elaborate?
+            messages.append(message.VerboseLevel, 'Results:')
+            for o in results.keys():
+                messages.append(message.VerboseLevel, str(o) + ' = ' + str(results[o]))
+        else: # should a recipe always have results?
+            messages.append(message.VerboseLevel, 'No Results!')
+            job['Status'] = JobError
+        logfile.close()
+        ## dump the logfile to the webdav as a dataproduct.
+        if 'repository' in job.keys():
+            try:
+                temp = ingredient.WSRTingredient()
+                temp['server']        = job['repository'][0]
+                temp['resultdir']     = job['repository'][1]
+                temp['filepath']      = self.inputs['LogDirectory'] + '/' + str(job['ExportID'])
+                temp['filename']      = 'pipeline_manager.log'
+                temp['resultfilename']= str(job['ExportID']) + '_pipeline.log'
+                self.cook_recipe('put_pipeline_log', temp, temp)
+            except:
+                self.print_notification('failed writing pipeline log.')
+
+    def print_time(self):
+        t = time.gmtime()
+        timestring = '\r%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+        if self.running_job:
+            timestring += ' Busy running job: ' + self.running_job['ExportID']
+        else:
+            if self.jobs:
+                timestring += ' checking for new jobs.'
+            else:
+                ##timestring += ' No jobs available, waiting for next job.'
+                timestring = '.'
+        sys.stdout.write(timestring)
+        sys.stdout.flush()
+
+    def go(self):
+        self.startup()
+        try:
+            while True: ##run forever
+                try:
+                    if not self.running_job:
+                        thread.start_new_thread((self.next_job), ())
+                    self.print_time()
+                    if not isinstance(self.server, types.NoneType):
+                        self.check_server()
+                        time.sleep(1) # temp fix as apparantly check_server can return fast enough to re-enter
+                                      # next_job before the previous one gets to self.running_job = j
+                                      # should be something with a lock like self.stop = False  self.stop_lock = threading.Lock()
+                    else:
+                        if self.jobs:
+                            time.sleep(10)
+                        else:
+                            raise Exception ("No more jobs and no Server, ending manager.")
+                except KeyboardInterrupt:
+                    self.print_notification('Pipeline Manager: Keyboard interupt detected, asking user...')
+                    reply = raw_input('Do you want to end the pipeline manager (y/n)?')
+                    if 'y' in reply:
+                        raise KeyboardInterrupt ('Pipeline Manager: User wants to end program')
+        except KeyboardInterrupt, k:
+            self.print_notification(str(k))
+        except Exception, inst:
+            self.print_error('Pipeline Manager: Exception caught: ' + str(type(Exception)) + ' ' + str(inst))
+            raise inst
+        if not isinstance(self.server, types.NoneType): ## check if the server is alive
+            self.server.stop_lock.acquire()
+            self.server.stop = True ## tell the server to stop
+            self.server.stop_lock.release()
+
+## Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = pipeline_manager()
+    standalone.main()
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d80dabe98f5eeed2027348e292a792dec39e2165
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py
@@ -0,0 +1,2 @@
+client = None
+server = None
diff --git a/CEP/Pipeline/framework/lofarpipe/support/__init__.py b/CEP/Pipeline/framework/lofarpipe/support/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py b/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b26c7322b5521e6a9ca301c7245ea2484285430
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py
@@ -0,0 +1,231 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                              Base LOFAR Recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from ConfigParser import NoOptionError, NoSectionError
+from ConfigParser import SafeConfigParser as ConfigParser
+from threading import Event
+from functools import partial
+
+import os
+import sys
+import inspect
+import logging
+import errno
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.lofarexceptions import PipelineException
+from lofarpipe.cuisine.WSRTrecipe import WSRTrecipe
+from lofarpipe.support.lofaringredient import RecipeIngredients, LOFARinput, LOFARoutput
+from lofarpipe.support.remotecommand import run_remote_command
+
+class BaseRecipe(RecipeIngredients, WSRTrecipe):
+    """
+    Provides standard boiler-plate used in the various LOFAR pipeline recipes.
+    """
+    # Class ordering is important here.
+    # WSRTrecipe.__init__ does not call a superclass, hence BaseIngredients
+    # must go first.
+    # Further, BaseIngredients.__init__ overwrites the inputs dict provided by
+    # WSRTrecipe, so it must call super() before setting up inputs.
+    inputs = {} # No inputs to add to defaults
+    def __init__(self):
+        """
+        Subclasses should define their own parameters, but remember to call
+        this __init__() method to include the required defaults.
+        """
+        super(BaseRecipe, self).__init__()
+        self.error = Event()
+        self.error.clear()
+
+    @property
+    def __file__(self):
+        """
+        Provides the file name of the currently executing recipe.
+        """
+        import inspect
+        full_location = os.path.abspath(inspect.getsourcefile(self.__class__))
+        # DANGER WILL ROBINSON!
+        # On the lofar cluster frontend (lfe001), home directories are in
+        # /data/users, but on the nodes they are in /home. This workaround
+        # means things work like one might expect for now, but this is not a
+        # good long-term solution.
+        return full_location.replace('/data/users', '/home')
+
+    def _setup_logging(self):
+        """
+        Set up logging.
+
+        We always produce a log file and log to stdout. The name of the file
+        and the logging format can be set in the pipeline configuration file.
+        """
+        try:
+            logfile = self.config.get("logging", "log_file")
+        except:
+            logfile = os.path.join(
+                self.config.get("layout", "job_directory"), 'logs/pipeline.log'
+            )
+
+        try:
+            format = self.config.get("logging", "format", raw=True)
+        except:
+            format = "%(asctime)s %(levelname)-7s %(name)s: %(message)s"
+
+        try:
+            datefmt = self.config.get("logging", "datefmt", raw=True)
+        except:
+            datefmt = "%Y-%m-%d %H:%M:%S"
+
+        try:
+            os.makedirs(os.path.dirname(logfile))
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        stream_handler = logging.StreamHandler(sys.stdout)
+        file_handler = logging.FileHandler(logfile)
+
+        formatter = logging.Formatter(format, datefmt)
+
+        stream_handler.setFormatter(formatter)
+        file_handler.setFormatter(formatter)
+        self.logger.addHandler(stream_handler)
+        self.logger.addHandler(file_handler)
+
+    def run_task(self, configblock, datafiles=[], **kwargs):
+        """
+        A task is a combination of a recipe and a set of parameters.
+        Tasks can be prefedined in the task file set in the pipeline
+        configuration (default: tasks.cfg).
+
+        Here, we load a task configuration and execute it.
+        This is a "shorthand" version of
+        :meth:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe.cook_recipe`.
+        """
+        self.logger.info("Running task: %s" % (configblock,))
+
+        # Does the task definition exist?
+        try:
+            recipe = self.task_definitions.get(configblock, "recipe")
+        except NoSectionError:
+            raise PipelineException(
+                "%s not found -- check your task definitions" % configblock
+            )
+
+        # Build inputs dict.
+        # First, take details from caller.
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = datafiles
+
+        # Add parameters from the task file.
+        # Note that we neither need the recipe name nor any items from the
+        # DEFAULT config.
+        parameters = dict(self.task_definitions.items(configblock))
+        del parameters['recipe']
+        for key in dict(self.config.items("DEFAULT")).keys():
+            del parameters[key]
+        inputs.update(parameters)
+
+        # Update inputs with provided kwargs, if any.
+        inputs.update(kwargs)
+
+        # Default outputs dict.
+        outputs = LOFARoutput()
+
+        # Cook the recipe and return the results"
+        if self.cook_recipe(recipe, inputs, outputs):
+            self.logger.warn(
+                "%s reports failure (using %s recipe)" % (configblock, recipe)
+            )
+            raise PipelineRecipeFailed("%s failed", configblock)
+        return outputs
+
+    def _read_config(self):
+        # If a config file hasn't been specified, use the default
+        if not self.inputs.has_key("config"):
+            # Possible config files, in order of preference:
+            conf_locations = (
+                os.path.join(sys.path[0], 'pipeline.cfg'),
+                os.path.join(os.path.expanduser('~'), '.pipeline.cfg')
+            )
+            for path in conf_locations:
+                if os.access(path, os.R_OK):
+                    self.inputs["config"] = path
+                    break
+            if not self.inputs.has_key("config"):
+                raise PipelineException("Configuration file not found")
+
+        config = ConfigParser({
+            "job_name": self.inputs["job_name"],
+            "start_time": self.inputs["start_time"],
+            "cwd": os.getcwd()
+        })
+        config.read(self.inputs["config"])
+        return config
+
+    def go(self):
+        """
+        This is where the work of the recipe gets done.
+        Subclasses should define their own go() method, but remember to call
+        this one to perform necessary initialisation.
+        """
+        # Every recipe needs a job identifier
+        if not self.inputs.has_key("job_name"):
+            raise PipelineException("Job undefined")
+
+        if not self.inputs.has_key("start_time"):
+            import datetime
+            self.inputs["start_time"] = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+        self.logger.debug("Pipeline start time: %s" % self.inputs['start_time'])
+
+        # Config is passed in from spawning recipe. But if this is the start
+        # of a pipeline, it won't have one.
+        if not hasattr(self, "config"):
+            self.config = self._read_config()
+
+        # Ensure we have a runtime directory
+        if not self.inputs.has_key('runtime_directory'):
+            self.inputs["runtime_directory"] = self.config.get(
+                "DEFAULT", "runtime_directory"
+            )
+        else:
+            self.config.set('DEFAULT', 'runtime_directory', self.inputs['runtime_directory'])
+        if not os.access(self.inputs['runtime_directory'], os.F_OK):
+            raise IOError, "Runtime directory doesn't exist"
+
+        # ...and task files, if applicable
+        if not self.inputs.has_key("task_files"):
+            try:
+                self.inputs["task_files"] = utilities.string_to_list(
+                    self.config.get('DEFAULT', "task_files")
+                )
+            except NoOptionError:
+                self.inputs["task_files"] = []
+        self.task_definitions = ConfigParser(self.config.defaults())
+        self.task_definitions.read(self.inputs["task_files"])
+
+        try:
+            self.recipe_path = [
+                os.path.join(root, 'master') for root in utilities.string_to_list(
+                    self.config.get('DEFAULT', "recipe_directories")
+                )
+            ]
+        except NoOptionError:
+            self.recipe_path = []
+
+        # At this point, the recipe inputs must be complete. If not, exit.
+        if not self.inputs.complete():
+            raise PipelineException(
+                "Required inputs not available: %s" %
+                " ".join(self.inputs.missing())
+            )
+
+        # Only configure handlers if our parent is the root logger.
+        # Otherwise, our parent should have done it for us.
+        if isinstance(self.logger.parent, logging.RootLogger):
+            self._setup_logging()
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py b/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbefb0a8b5aa79dab0fa014b90153515e95b178f
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py
@@ -0,0 +1,78 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                     Cluster description handler
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+import os.path
+import lofar.parameterset
+from lofarpipe.support.lofarexceptions import ClusterError
+
+class ClusterDesc(object):
+    """
+    Wrap a clusterdesc file, providing a more convenient, Pythonic interface
+    for accessing its contents.
+    """
+    def __init__(self, filename):
+        self.filename = filename
+        self.parameterset = lofar.parameterset.parameterset(self.filename)
+        self.name = self.parameterset.getString('ClusterName')
+        self.keylist = []
+        with open(filename, 'r') as file:
+            for line in file.readlines():
+                if len(line.strip()) == 0 or line.strip()[0] == '#': continue
+                self.keylist.append(line.split('=')[0].strip())
+        try:
+            subclusters = self.parameterset.get("SubClusters").expand().getStringVector()
+            self.subclusters = [
+                ClusterDesc(
+                    os.path.join(os.path.dirname(self.filename), subcluster)
+                )
+                for subcluster in subclusters
+            ]
+        except RuntimeError:
+            self.subclusters = []
+
+    def get(self, key, recursive=True):
+        values = []
+        if key in self.keylist:
+            values.extend(self.parameterset.get(key).expand().getStringVector())
+        if recursive:
+            for subcluster in self.subclusters:
+                values.extend(subcluster.get(key, recursive=True))
+        return values
+
+    def keys(self, recursive=True):
+        values = self.keylist[:]
+        if recursive:
+            for subcluster in self.subclusters:
+                values.extend(subcluster.keys(recursive=True))
+        return list(set(values))
+
+    def __str__(self):
+        return "ClusterDesc: " + self.name
+
+def get_compute_nodes(clusterdesc):
+    """
+    Return a list of all compute nodes defined (under the key "Compute.Nodes")
+    in the ClusterDesc class object clusterdesc.
+    """
+    try:
+        return clusterdesc.get('Compute.Nodes')
+    except:
+        raise ClusterError("Unable to find compute nodes in clusterdesc.")
+
+def get_head_node(clusterdesc):
+    """
+    Return the head node, defined by the key "Head.Nodes" in the ClusterDesc
+    class object clusterdesc.
+
+    Always return the first head node, even if there are several defined.
+    """
+    try:
+        return [clusterdesc.get('Head.Nodes')[0]]
+    except:
+        raise ClusterError("Unable to find head node in clusterdesc.")
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py b/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py
new file mode 100644
index 0000000000000000000000000000000000000000..5317045fa5715a2ecea2e9997996e8a30d2ef7db
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py
@@ -0,0 +1,127 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                        Management of IPython cluster processes
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+import shlex
+import subprocess
+import threading
+import logging
+import os
+import time
+
+from contextlib import contextmanager
+
+from lofarpipe.support.lofarexceptions import ClusterError
+from lofarpipe.support.clusterdesc import ClusterDesc
+from lofarpipe.support.clusterdesc import get_compute_nodes, get_head_node
+
+class ClusterHandler(object):
+    """
+    ClusterHandler provides a convenient interface for setting up and tearing
+    down an IPython cluser -- engines & controller -- over a network topology
+    described by a clusterdesc file.
+    """
+    def __init__(self, config, logger=None):
+        if not logger:
+            logging.basicConfig()
+            self.logger = logging.getLogger()
+        else:
+            self.logger = logger
+        clusterdesc = ClusterDesc(config.get('cluster', 'clusterdesc'))
+        self.head_node = get_head_node(clusterdesc)[0]
+        self.compute_nodes = get_compute_nodes(clusterdesc)
+        self.script_path = config.get('deploy', 'script_path')
+        self.config = config
+
+    def start_cluster(self, nproc=""):
+        # Optional nproc argument specifies number of engines per node
+        self.__start_controller()
+        self.__start_engines(nproc)
+
+    def stop_cluster(self):
+        self.__stop_controller()
+        self.__stop_engines()
+
+    def __execute_ssh(self, host, command):
+        ssh_cmd = shlex.split("ssh -x %s -- %s" % (host, command))
+        subprocess.check_call(ssh_cmd)
+        self.logger.info("  * %s" % (host))
+
+    def __multinode_ssh(self, nodes, command):
+        ssh_connections = [
+            threading.Thread(
+                target = self.__execute_ssh,
+                args = (node, command)
+            ) for node in nodes
+        ]
+        [thread.start() for thread in ssh_connections]
+        [thread.join() for thread in ssh_connections]
+
+    def __start_controller(self):
+        self.logger.info("Starting controller:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        controller_ppath = self.config.get('deploy', 'controller_ppath')
+        # Check that there isn't an existing pidfile
+        if os.path.isfile(os.path.join(controlpath, "ipc.pid")):
+            raise ClusterError("Controller already running")
+        # Before starting, ensure that the old engine.furl isn't lying about
+        # to cause confusion
+        try:
+            os.unlink(os.path.join(controlpath, 'engine.furl'))
+        except OSError:
+            pass
+        self.__execute_ssh(self.head_node, "bash %s/ipcontroller.sh %s start %s" % (self.script_path, controlpath, controller_ppath))
+        # Wait until an engine.furl file has been created before moving on to
+        # start engines etc.
+        while not os.path.isfile(os.path.join(controlpath, 'engine.furl')):
+            time.sleep(1)
+        self.logger.info("done.")
+
+    def __stop_controller(self):
+        self.logger.info("Stopping controller:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        controller_ppath = self.config.get('deploy', 'controller_ppath')
+        self.__execute_ssh(self.head_node, "bash %s/ipcontroller.sh %s stop %s" % (self.script_path, controlpath, controller_ppath))
+        os.unlink(os.path.join(controlpath, 'engine.furl'))
+        self.logger.info("done.")
+
+    def __start_engines(self, nproc):
+        self.logger.info("Starting engines:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        engine_ppath = self.config.get('deploy', 'engine_ppath')
+        engine_lpath = self.config.get('deploy', 'engine_lpath')
+        with open(os.path.join(controlpath, 'engine.furl')) as furlfile:
+            furl = furlfile.readline().strip()
+        command = "bash %s/ipengine.sh %s start %s %s %s %s" % (self.script_path, controlpath, engine_ppath, engine_lpath, furl, str(nproc))
+        self.__multinode_ssh(self.compute_nodes, command)
+        self.logger.info("done.")
+
+    def __stop_engines(self):
+        self.logger.info("Stopping engines:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        command= "bash %s/ipengine.sh %s stop" % (self.script_path, controlpath)
+        self.__multinode_ssh(self.compute_nodes, command)
+        self.logger.info("done.")
+
+@contextmanager
+def ipython_cluster(config, logger, nproc=""):
+    """
+    Provides a context in which an IPython cluster is available. Designed for
+    use within LOFAR pipeline recipes.
+
+    THe optional nproc argument specifies the number of engines which will be
+    started per compute node.
+    """
+    cluster_handler = ClusterHandler(config, logger)
+    cluster_handler.start_cluster(nproc)
+    # If an exception is raised during the pipeline run, make sure we catch it
+    # and shut the cluster down cleanly.
+    try:
+        yield
+    finally:
+        cluster_handler.stop_cluster()
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py b/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py
new file mode 100644
index 0000000000000000000000000000000000000000..191ef1c12f1f7b83c1d6f1b97032b21d7d9fc622
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py
@@ -0,0 +1,38 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                         Network logging system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from contextlib import contextmanager
+
+import socket
+import threading
+
+from lofarpipe.support.jobserver import JobSocketReceiver
+
+@contextmanager
+def clusterlogger(
+    logger,
+    host=None,
+    port=0
+):
+    """
+    Provides a context in which a network logging sever is available. Note
+    that the logging server is just a JobSocketReceiver with no jobpool or
+    error flag.
+
+    Yields a host name & port to which log messages can be sent.
+    """
+    if not host:
+        host = socket.gethostname()
+    logserver = JobSocketReceiver(logger, {}, threading.Event(), host=host, port=port)
+    logserver.start()
+    try:
+        yield logserver.server_address
+    except KeyboardInterrupt:
+        logserver.stop()
+        raise
+    logserver.stop()
+    [handler.flush() for handler in logger.handlers]
diff --git a/CEP/Pipeline/framework/lofarpipe/support/control.py b/CEP/Pipeline/framework/lofarpipe/support/control.py
new file mode 100644
index 0000000000000000000000000000000000000000..953496aae83dd598bb300b6151afcac6abf4687d
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/control.py
@@ -0,0 +1,45 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                        Pipeline control recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from lofarpipe.support.stateful import StatefulRecipe
+from lofarpipe.support.lofarexceptions import PipelineException
+
+#                                             Standalone Pipeline Control System
+# ------------------------------------------------------------------------------
+
+class control(StatefulRecipe):
+    """
+    Basic pipeline control framework.
+
+    Define a pipeline by subclassing and provding a body for the
+    :meth:`pipeline_logic`.
+
+    This class provides little, but can be specialised to eg provide a
+    MAC/SAS interface etc.
+    """
+    inputs = {}
+    def pipeline_logic(self):
+        """
+        Define pipeline logic here in subclasses
+        """
+        raise NotImplementedError
+
+    def go(self):
+        super(control, self).go()
+
+        self.logger.info(
+            "LOFAR Pipeline (%s) starting." %
+            (self.name,)
+        )
+
+        try:
+            self.pipeline_logic()
+        except PipelineException, message:
+            self.logger.error(message)
+            return 1
+
+        return 0
diff --git a/CEP/Pipeline/framework/lofarpipe/support/group_data.py b/CEP/Pipeline/framework/lofarpipe/support/group_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eaaba58eca75d4ca3e1aa512394f515debef1e5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/group_data.py
@@ -0,0 +1,99 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                              Group data into appropriate chunks for processing
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from collections import defaultdict
+import subprocess
+
+from lofar.parameterset import parameterset
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterdesc import get_compute_nodes
+from lofarpipe.support.parset import Parset
+
+def group_files(logger, clusterdesc, node_directory, group_size, filenames):
+        """
+        Group a list of files into blocks suitable for simultaneous
+        processing, such that a limited number of processes run on any given
+        host at a time.
+
+        All node_directory on all compute nodes specified in clusterdesc is
+        searched for any of the files listed in filenames. A generator is
+        produced; on each call, no more than group_size files per node
+        are returned.
+        """
+        # Given a limited number of processes per node, the first task is to
+        # partition up the data for processing.
+        logger.debug('Listing data on nodes')
+        data = {}
+        for node in get_compute_nodes(clusterdesc):
+            logger.debug("Node: %s" % (node))
+            exec_string = ["ssh", node, "--", "find",
+                node_directory,
+                "-maxdepth 1",
+                "-print0"
+                ]
+            logger.debug("Executing: %s" % (" ".join(exec_string)))
+            my_process = subprocess.Popen(exec_string, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            sout, serr = my_process.communicate()
+            data[node] = sout.split('\x00')
+            data[node] = utilities.group_iterable(
+                [element for element in data[node] if element in filenames],
+                group_size,
+            )
+
+        # Now produce an iterator which steps through the various chunks of
+        # data to image, and image each chunk
+        data_iterator = utilities.izip_longest(*list(data.values()))
+        for data_chunk in data_iterator:
+            to_process = []
+            for node_data in data_chunk:
+                if node_data: to_process.extend(node_data)
+            yield to_process
+
+def gvds_iterator(gvds_file, nproc=4):
+    """
+    Reads a GVDS file.
+
+    Provides a generator, which successively returns the contents of the GVDS
+    file in the form (host, filename), in chunks suitable for processing
+    across the cluster. Ie, no more than nproc files per host at a time.
+    """
+    parset = Parset(gvds_file)
+
+    data = defaultdict(list)
+    for part in range(parset.getInt('NParts')):
+        host = parset.getString("Part%d.FileSys" % part).split(":")[0]
+        file = parset.getString("Part%d.FileName" % part)
+        vds  = parset.getString("Part%d.Name" % part)
+        data[host].append((file, vds))
+
+    for host, values in data.iteritems():
+        data[host] = utilities.group_iterable(values, nproc)
+
+    while True:
+        yieldable = []
+        for host, values in data.iteritems():
+            try:
+                for filename, vds in values.next():
+                    yieldable.append((host, filename, vds))
+            except StopIteration:
+                pass
+        if len(yieldable) == 0:
+            raise StopIteration
+        else:
+            yield yieldable
+
+def load_data_map(filename):
+    """
+    Load a mapping of filename <-> compute node from a parset on disk.
+    """
+    datamap = Parset(filename)
+    data = []
+    for host in datamap:
+        for filename in datamap.getStringVector(host):
+            data.append((host, filename))
+    return data
diff --git a/CEP/Pipeline/framework/lofarpipe/support/ipython.py b/CEP/Pipeline/framework/lofarpipe/support/ipython.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbc33f1b4820d0a35dc37de75b5bd00b02f85bd5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/ipython.py
@@ -0,0 +1,56 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                   Extensions to IPython system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from ConfigParser import NoSectionError
+from IPython.kernel.task import StringTask
+from IPython.kernel import client as IPclient
+from lofarpipe.support.lofarexceptions import ClusterError
+
+class LOFARTask(StringTask):
+    """
+    Extended version of IPython's StringTask, allowing external
+    arguments for dependency resolution.
+    """
+    def __init__(self, expression, pull=None, push=None,
+        clear_before=False, clear_after=False, retries=0,
+        recovery_task=None, depend=None, dependargs=None
+    ):
+        self.dependargs = dependargs
+        return super(LOFARTask, self).__init__(
+            expression, pull, push, clear_before, clear_after,
+            retries, recovery_task, depend
+        )
+
+    def check_depend(self, properties):
+        """
+        Calls self.depend(properties, self.dependargs)
+        to see if a task should be run.
+        """
+        if self.depend is not None:
+            return self.depend(properties, self.dependargs)
+        else:
+            return True
+
+class IPythonRecipeMixIn(object):
+    """
+    Mix-in for recipes to provide access to an IPython cluster.
+    """
+    def _get_cluster(self):
+        """
+        Return task and multiengine clients connected to the running
+        pipeline's IPython cluster.
+        """
+        self.logger.info("Connecting to IPython cluster")
+        try:
+            tc  = IPclient.TaskClient(self.config.get('cluster', 'task_furl'))
+            mec = IPclient.MultiEngineClient(self.config.get('cluster', 'multiengine_furl'))
+        except NoSectionError:
+            self.logger.error("Cluster not definied in configuration")
+            raise ClusterError
+        except:
+            self.logger.error("Unable to initialise cluster")
+            raise ClusterError
+        return tc, mec
diff --git a/CEP/Pipeline/framework/lofarpipe/support/jobserver.py b/CEP/Pipeline/framework/lofarpipe/support/jobserver.py
new file mode 100644
index 0000000000000000000000000000000000000000..138bfd61d61f84f388c2d54f14474ad5639214cb
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/jobserver.py
@@ -0,0 +1,168 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                          Network services for sending/receiving job parameters
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import contextmanager
+
+import signal
+import threading
+import struct
+import socket
+import select
+import logging
+import logging.handlers
+import Queue
+import SocketServer
+import cPickle as pickle
+
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.utilities import spawn_process
+
+class JobStreamHandler(SocketServer.StreamRequestHandler):
+    """
+    Networked job server.
+
+    This will listen for:
+
+    * GET <jobid>           -- reply with a list of all job arguments
+    * PUT <jobid> <results> -- receive and unpickle job results
+    * Pickled log records
+
+    Log records are logs using whatever local logger is supploed to the
+    SocketReceiver.
+    """
+    def handle(self):
+        """
+        Each request is expected to be a 4-bute length followed by either a
+        GET/PUT request or a pickled LogRecord.
+        """
+        while True:
+            chunk = self.request.recv(4)
+            try:
+                slen = struct.unpack(">L", chunk)[0]
+            except:
+                break
+            chunk = self.connection.recv(slen)
+            while len(chunk) < slen:
+                chunk = chunk + self.connection.recv(slen - len(chunk))
+            input_msg = chunk.split(" ", 2)
+            try:
+                # Can we handle this message type?
+                if input_msg[0] == "GET":
+                    self.send_arguments(int(input_msg[1]))
+                elif input_msg[0] == "PUT":
+                    self.read_results(input_msg[1], input_msg[2])
+                else:
+                    self.handle_log_record(chunk)
+            except:
+                # Otherwise, fail.
+                self.server.error.set()
+                self.server.logger.error("Protocol error; received %s" % chunk)
+                self.server.logger.error("Aborting.")
+
+    def send_arguments(self, job_id):
+        job_id = int(job_id)
+        self.server.logger.debug(
+            "Request for job %d from %s" %
+            (job_id, self.request.getpeername())
+        )
+        args = self.server.jobpool[job_id].arguments
+        pickled_args = pickle.dumps(args)
+        length = struct.pack(">L", len(pickled_args))
+        self.request.send(length + pickled_args)
+
+    def read_results(self, job_id, pickled_results):
+        job_id = int(job_id)
+        self.server.logger.debug(
+            "Results for job %d submitted by %s" %
+            (job_id, self.request.getpeername())
+        )
+        results = pickle.loads(pickled_results)
+        self.server.jobpool[job_id].results = results
+
+    def handle_log_record(self, chunk):
+        record = logging.makeLogRecord(pickle.loads(chunk))
+        self.server.queue.put_nowait(record)
+
+class JobSocketReceiver(SocketServer.ThreadingTCPServer):
+    """
+    Simple TCP socket-based job dispatch and results collection as well as
+    network logging.
+    """
+    def __init__(
+        self,
+        logger,
+        jobpool,
+        error,
+        host=None,
+        port=logging.handlers.DEFAULT_TCP_LOGGING_PORT
+    ):
+        if not host:
+            host = socket.gethostname()
+        SocketServer.ThreadingTCPServer.__init__(self, (host, port), JobStreamHandler)
+        self.abort = False
+        self.timeout = 1
+        self.queue = Queue.Queue()
+        self.logger = logger
+        self.jobpool = jobpool
+        self.error = error
+
+    def start(self):
+        # Log messages are received in one thread, and appended to an instance
+        # of Queue.Queue. Another thread picks messages off the queue and
+        # sends them to the log handlers. We keep the latter thread running
+        # until the queue is empty, thereby avoiding the problem of falling
+        # out of the logger threads before all log messages have been handled.
+        def loop_in_thread():
+            while True:
+                rd, wr, ex = select.select(
+                    [self.socket.fileno()], [], [], self.timeout
+                )
+                if rd:
+                    self.handle_request()
+                elif self.abort:
+                    break
+
+        def log_from_queue():
+            while True:
+                try:
+                    record = self.queue.get(True, 1)
+                    # Manually check the level against the pipeline's root logger.
+                    # Not sure this should be necessary, but it seems to work...
+                    if self.logger.isEnabledFor(record.levelno):
+                        self.logger.handle(record)
+                except Queue.Empty:
+                    if self.abort:
+                        break
+
+        self.runthread = threading.Thread(target=loop_in_thread)
+        self.logthread = threading.Thread(target=log_from_queue)
+        self.runthread.start()
+        self.logthread.start()
+
+    def stop(self):
+        self.abort = True
+        self.runthread.join()
+        self.logthread.join()
+        self.server_close()
+
+@contextmanager
+def job_server(logger, jobpool, error):
+    """
+    Provides a context in which job dispatch is available.
+
+    Yields a host name & port which clients can connect to for job details.
+    """
+    jobserver = JobSocketReceiver(logger, jobpool, error, port=0)
+    jobserver.start()
+    try:
+        yield jobserver.server_address
+    except KeyboardInterrupt:
+        jobserver.stop()
+        raise
+    jobserver.stop()
+    [handler.flush() for handler in logger.handlers]
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py b/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..2df127fe0d975f153d026726d486ac82f7656c44
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py
@@ -0,0 +1,28 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                                     Exceptions
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+class ExecutableMissing(Exception):
+    pass
+
+class PipelineException(Exception):
+    pass
+
+class PipelineRecipeFailed(PipelineException):
+    pass
+
+class PipelineReceipeNotFound(PipelineException):
+    pass
+
+class PipelineQuit(PipelineException):
+    """
+    If this exception is raised during a pipeline run, we skip over all
+    subsequent steps and exit cleanly.
+    """
+    pass
+
+class ClusterError(PipelineException):
+    pass
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py b/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d3fddc16de5d2fde46325f66b1f78d208c5a8f9
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py
@@ -0,0 +1,353 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                                    Ingredients
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+from optparse import make_option
+from UserDict import DictMixin
+
+from lofarpipe.cuisine.ingredient import WSRTingredient
+from lofarpipe.support.utilities import string_to_list, is_iterable
+
+#       These are currently only used by lofarrecipe.run_task to provide default
+#              input and output dicts based on copying metadata from the parent.
+# ------------------------------------------------------------------------------
+class LOFARinput(WSRTingredient):
+    """
+    All LOFAR pipeline ingredients are required to provide a few basic
+    parameters:
+
+    * job_name
+    * runtime_directory
+    * config
+    * task_files
+    * dry_run
+    * start_time
+
+    These are defined in the RecipeIngredients class; all options (but not
+    arguments) defined there are required.
+    """
+    def __init__(self, defaults):
+        super(LOFARinput, self).__init__(self)
+        for param in RecipeIngredients.inputs.iterkeys():
+            if param != "args" and defaults.has_key(param):
+                self[param] = defaults[param]
+
+class LOFARoutput(WSRTingredient):
+    """
+    LOFARoutput makes no changes to WSRTingredient.
+    """
+    pass
+
+class Field(object):
+    """
+    Fields provided validation and type checking of input/output.
+
+    Unlimited user-defined fields are possible; they should all derive from
+    this class.
+    """
+    def __init__(self, *opts, **attrs):
+        self.optionstrings = opts
+        if attrs.has_key("help"):
+            self.help = attrs['help']
+        else:
+            self.help = ""
+        if attrs.has_key("default"):
+            self.default = attrs['default']
+        elif attrs.has_key("optional") and attrs["optional"]:
+            self.optional = True
+
+    def is_valid(self, value):
+        """
+        Check whether ``value`` is a valid value for this field.
+
+        This must be defined in subclasses.
+
+        :param value: value to be checked
+        :rtype: bool
+        """
+        raise NotImplementedError
+
+    def coerce(self, value):
+        """
+        Try to convert value into the appropriate type for this sort of field.
+        Results should be checked with ``is_valid()``.
+
+        :param value: value to be coerced
+        :rtype: coerced value
+        """
+        return value
+
+    def generate_option(self, name):
+        """
+        Generated an :mod:`optparse` option.
+
+        :param name: Destination
+        :rtype: :class:`optparse.Option`
+        """
+        if hasattr(self, "default"):
+            help = self.help + " [Default: %s]" % str(self.default)
+        elif hasattr(self, "optional"):
+            help = self.help + " [Optional]"
+        else:
+            help = self.help
+        return make_option(help=help, dest=name, *self.optionstrings)
+
+class StringField(Field):
+    """
+    A Field which accepts any string as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, str)
+
+class IntField(Field):
+    """
+    A Field which accepts any int as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, int)
+
+    def coerce(self, value):
+        try:
+            return int(value)
+        except:
+            return value
+
+class FloatField(Field):
+    """
+    A Field which accepts any float as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, float)
+
+    def coerce(self, value):
+        try:
+            return float(value)
+        except:
+            return value
+
+class FileField(Field):
+    """
+    A Field which accepts the name of an extant file.
+    """
+    def is_valid(self, value):
+        return os.path.exists(str(value))
+
+class ExecField(Field):
+    """
+    A Field which accepts the name of an executable file.
+    """
+    def is_valid(self, value):
+        return os.access(value, os.X_OK)
+
+class DirectoryField(Field):
+    """
+    A Field which accepts the name of an extant directory.
+    """
+    def is_valid(self, value):
+        return os.path.isdir(str(value))
+
+    def coerce(self, value):
+        try:
+            os.makedirs(str(value))
+        except:
+            pass
+        finally:
+            return value
+
+class BoolField(Field):
+    """
+    A Field which accepts a bool.
+    """
+    def is_valid(self, value):
+        return isinstance(value, bool)
+
+    def coerce(self, value):
+        if value == "False" or value == "None" or value == "":
+            return False
+        elif value == "True":
+            return True
+        else:
+            return value
+
+class ListField(Field):
+    """
+    A Field which accepts a non-string iterable (ie, list or tuple).
+    """
+    def is_valid(self, value):
+        return not isinstance(value, str) and is_iterable(value)
+
+    def coerce(self, value):
+        if isinstance(value, str):
+            return string_to_list(value)
+        else:
+            return value
+
+class DictField(Field):
+    """
+    A Field which accepts a dict.
+    """
+    def is_valid(self, value):
+        return isinstance(value, dict)
+
+class FileList(ListField):
+    """
+    A Field which accepts a list of extant filenames.
+    """
+    def is_valid(self, value):
+        if super(FileList, self).is_valid(value) and \
+        not False in [os.path.exists(file) for file in value]:
+            return True
+        else:
+            return False
+
+#                                            The magic that makes it all work...
+#                      RecipeIngredients should be mixed in to any recipes which
+#             need to deal with input or output. BaseRecipe already includes it,
+#                                so that will almost always the case by default.
+# ------------------------------------------------------------------------------
+
+class LOFARingredient(DictMixin):
+    """
+    LOFARingredient provides dict-like access to a group of instances of
+    :class:`Field`.  If a field is defined which does not have a value set,
+    but which does have a default, that is returned.
+    """
+    def __init__(self, fields):
+        self._fields = fields
+        self._values = {}
+
+    def __getitem__(self, key):
+        # If we don't have the value for this key, but we do have a field with
+        # a valid default, return that.
+        if (
+            not self._values.has_key(key) and
+            self._fields.has_key(key) and
+            hasattr(self._fields[key], "default")
+        ):
+            field = self._fields[key]
+            value = field.coerce(field.default)
+            if not field.is_valid(value):
+                raise TypeError(
+                    "%s is an invalid value for %s %s" %
+                    (str(value), type(field).__name__, key)
+                )
+        elif self._values.has_key(key):
+            value = self._values[key]
+        else:
+            raise KeyError(key)
+        return value
+
+    def __setitem__(self, key, value):
+        if key in self._fields:
+            field = self._fields[key]
+            converted_value = field.coerce(value)
+            if not field.is_valid(converted_value):
+                raise TypeError(
+                    "%s is an invalid value for %s %s" %
+                    (str(value), type(field).__name__, key)
+                )
+            self._values[key] = converted_value
+        else:
+            raise TypeError("Ingredient %s not defined" % key)
+
+    def keys(self):
+        # We want to return a list of everything we have a value for. That's
+        # everything in _values, plus things in _fields which have a default.
+        return list(
+            set(self._values.keys()).union(
+                set(k for k, v in self._fields.items() if hasattr(v, "default"))
+            )
+        )
+
+    def make_options(self):
+        return [value.generate_option(key) for key, value in self._fields.iteritems()]
+
+    def missing(self):
+        return [
+            key for key in self._fields.iterkeys()
+            if not self._values.has_key(key)
+            and not hasattr(self._fields[key], "optional")
+            and not hasattr(self._fields[key], "default")
+        ]
+
+    def complete(self):
+        return False if self.missing() else True
+
+    def update(self, args, **kwargs):
+        for key, value in args.iteritems():
+            self._values[key] = value
+        for key, value in kwargs.iteritems():
+            self._values[key] = value
+
+class RecipeIngredientsMeta(type):
+    """
+    This metaclass ensures that the appropriate instances of :class:`Field`
+    are available in the inputs of every LOFAR recipe.
+    """
+    def __init__(cls, name, bases, ns):
+        # Inputs are inherited from the superclass.
+        # Need to do some gymnastics here, as we don't want to update the
+        # superclass's _infields -- thus we replace it and copy the contents.
+        new_inputs = {}
+        if hasattr(cls, "_infields"):
+            new_inputs.update(cls._infields)
+        if ns.has_key("inputs"):
+            new_inputs.update(ns["inputs"])
+        cls._infields = new_inputs
+
+        # Outputs are not inherited.
+        if ns.has_key('outputs'):
+            cls._outfields = ns['outputs']
+
+class RecipeIngredients(object):
+    """
+    All LOFAR recipes ultimately inherit from this. It provides the basic
+    ingredient structure, as well as the default fields which are available in
+    every recipe.
+    """
+    __metaclass__ = RecipeIngredientsMeta
+
+    inputs = {
+        'job_name': StringField(
+            '-j', '--job-name',
+            help="Job name"
+        ),
+        'runtime_directory': FileField(
+            '-r', '--runtime-directory',
+            help="Runtime directory"
+        ),
+        'config': FileField(
+            '-c', '--config',
+            help="Configuration file"
+        ),
+        'task_files': FileList(
+            '--task-file',
+            help="Task definition file"
+        ),
+        'start_time': StringField(
+            '--start-time',
+            help="[Expert use] Pipeline start time"
+        ),
+        'dry_run': BoolField(
+            '-n', '--dry-run',
+            help="Dry run",
+            default=False
+        ),
+        'args': ListField(
+            '--args', help="Args", default=[]
+        )
+    }
+
+    outputs = {}
+
+    def __init__(self):
+        super(RecipeIngredients, self).__init__()
+        #                  Must run the following *after* WSRTrecipe.__init__().
+        # ----------------------------------------------------------------------
+        self.inputs = LOFARingredient(self._infields)
+        self.outputs = LOFARingredient(self._outfields)
+        self.optionparser.add_options(self.inputs.make_options())
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py b/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py
new file mode 100644
index 0000000000000000000000000000000000000000..1886ec867813d5a55c13f00be13ab23307b116c2
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py
@@ -0,0 +1,119 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                            Compute node system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+import socket
+import struct
+import platform
+import logging
+import logging.handlers
+import cPickle as pickle
+
+def run_node(*args):
+    """
+    Run on node to automatically locate, instantiate and execute the
+    correct LOFARnode class.
+    """
+    import imp
+    control_script = getattr(
+        imp.load_module(recipename, *imp.find_module(recipename, [nodepath])),
+        recipename
+    )
+    return control_script(loghost=loghost, logport=logport).run_with_logging(*args)
+
+class LOFARnode(object):
+    """
+    Base class for node jobs called through IPython or directly via SSH.
+
+    Sets up TCP based logging.
+    """
+    def __init__(
+        self,
+        loghost=None,
+        logport=logging.handlers.DEFAULT_TCP_LOGGING_PORT
+    ):
+        self.logger = logging.getLogger(
+            'node.%s.%s' % (platform.node(), self.__class__.__name__)
+        )
+        self.logger.setLevel(logging.DEBUG)
+        self.loghost = loghost
+        self.logport = int(logport)
+        self.outputs = {}
+
+    def run_with_logging(self, *args):
+        """
+        Calls the run() method, ensuring that the logging handler is added
+        and removed properly.
+        """
+        if self.loghost:
+            my_tcp_handler = logging.handlers.SocketHandler(self.loghost, self.logport)
+            self.logger.addHandler(my_tcp_handler)
+        try:
+            return self.run(*args)
+        finally:
+            if self.loghost:
+                my_tcp_handler.close()
+                self.logger.removeHandler(my_tcp_handler)
+
+    def run(self):
+        # Override in subclass.
+        raise NotImplementedError
+
+class LOFARnodeTCP(LOFARnode):
+    """
+    This node script extends :class:`~lofarpipe.support.lofarnode.LOFARnode`
+    to receive instructions via TCP from a
+    :class:`~lofarpipe.support.jobserver.JobSocketReceiver`.
+    """
+    def __init__(self, job_id, host, port):
+        self.job_id, self.host, self.port = int(job_id), host, int(port)
+        self.__fetch_arguments()
+        super(LOFARnodeTCP, self).__init__(self.host, self.port)
+
+    def run_with_stored_arguments(self):
+        """
+        After fetching arguments remotely, use them to run the standard
+        run_with_logging() method.
+        """
+        returnvalue = self.run_with_logging(*self.arguments)
+        self.__send_results()
+        return returnvalue
+
+    def __fetch_arguments(self):
+        """
+        Connect to a remote job dispatch server (an instance of
+        jobserver.JobSocketReceive) and obtain all the details necessary to
+        run this job.
+        """
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        try:
+            s.connect((self.host, self.port))
+        except Exception, e:
+            print "Could not connect to %s:%s (got %s)" % (self.host, str(self.port), str(e))
+            raise
+        message = "GET %d" % self.job_id
+        s.send(struct.pack(">L", len(message)) + message)
+        chunk = s.recv(4)
+        slen = struct.unpack(">L", chunk)[0]
+        chunk = s.recv(slen)
+        while len(chunk) < slen:
+            chunk += s.recv(slen - len(chunk))
+        self.arguments = pickle.loads(chunk)
+
+    def __send_results(self):
+        """
+        Send the contents of self.outputs to the originating job dispatch
+        server.
+        """
+        message = "PUT %d %s" % (self.job_id, pickle.dumps(self.outputs))
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        try:
+            s.connect((self.host, int(self.port)))
+        except Exception, e:
+            print "Could not connect to %s:%s (got %s)" % (self.host, str(self.port), str(e))
+            raise
+        s.send(struct.pack(">L", len(message)) + message)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py b/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4431dbfcb6b9734bad7c84cecce8b209699513a
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py
@@ -0,0 +1,13 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                           IPython and RemoteCommand all in one
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.ipython import IPythonRecipeMixIn
+from lofarpipe.support.stateful import BaseRecipe
+
+class LOFARrecipe(BaseRecipe, IPythonRecipeMixIn, RemoteCommandRecipeMixIn):
+    pass
diff --git a/CEP/Pipeline/framework/lofarpipe/support/mac.py b/CEP/Pipeline/framework/lofarpipe/support/mac.py
new file mode 100644
index 0000000000000000000000000000000000000000..7890b27f552fbac1149bafd1030502166d43b162
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/mac.py
@@ -0,0 +1,202 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                       Pipeline MAC integration
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import threading
+import collections
+
+from lofarpipe.support.control import control
+from lofarpipe.support.lofarexceptions import PipelineException, PipelineQuit
+
+#                                            Required by MAC EventPort Interface
+# ------------------------------------------------------------------------------
+
+from ep.control import *
+from ep.control import OK as controlOK
+
+#                                                           Integration with MAC
+# ------------------------------------------------------------------------------
+
+class MAC_control(control):
+    """
+    This extends the control framework to interface with MAC.
+    """
+    def __init__(self):
+        super(MAC_control, self).__init__()
+        self.optionparser.add_option('--controllername')
+        self.optionparser.add_option('--servicemask')
+        self.optionparser.add_option('--targethost')
+        self.optionparser.add_option('--treeid')
+
+    def pipeline_logic(self):
+        """
+        Define pipeline logic in subclasses.
+        """
+        raise NotImplementedError
+
+    def run_task(self, configblock, datafiles=[]):
+        self.logger.info( "Waiting for run state...")
+        self.state['run'].wait()
+        self.logger.info( "Running.")
+        self.logger.debug("Quit is %s" % (str(self.state['quit'].isSet())))
+        if self.state['quit'].isSet():
+            self.logger.info("Pipeline instructed to quit; bailing out")
+            raise PipelineQuit
+        try:
+            super(MAC_control, self).run_task(configblock, datafiles)
+        except PipelineException, message:
+            self.logger.error(message)
+#            raise PipelineQuit
+
+    def go(self):
+        #     Pipeline logic proceeds as in a standard recipe in its own thread
+        #                          MAC control takes place in a separate thread
+        # ---------------------------------------------------------------------
+        super(MAC_control, self).go()
+
+        self.logger.info(
+            "LOFAR Pipeline (%s) starting." %
+            (self.name,)
+        )
+
+        self.state = {
+            'run':      threading.Event(),
+            'quit':     threading.Event(),
+            'pause':    threading.Event(),
+            'finished': threading.Event()
+        }
+
+        control_thread = threading.Thread(target=self.control_loop)
+        pipeline_thread = threading.Thread(target=self.pipeline_logic)
+
+        pipeline_thread.setDaemon(True)
+        control_thread.start()
+        pipeline_thread.start()
+        control_thread.join()
+        self.logger.info("Control loop finished; shutting down")
+        return 0
+
+    def control_loop(self):
+        """
+        Loop until the pipeline finishes, receiving and responding to messages
+        sent by MAC.
+        """
+        #                                             Connect to the MAC server
+        # ---------------------------------------------------------------------
+        try:
+            my_interface = ControllerPort_Interface(
+                self.inputs['servicemask'], self.inputs['targethost']
+            )
+        except:
+            self.logger.info("Control interface not connected; quitting")
+            self.state['quit'].set()
+            self.state['run'].set()
+            return
+        my_interface.send_event(
+            ControlConnectEvent(self.inputs['controllername'])
+        )
+
+        #                    Buffer events received from the EventPort interface
+        # ----------------------------------------------------------------------
+        class ReceiverThread(threading.Thread):
+            def __init__(self, interface):
+                super(ReceiverThread, self).__init__()
+                self.interface = interface
+                self.event_queue = collections.deque()
+                self.active = True
+            def run(self):
+                while self.active:
+                    self.event_queue.append(self.interface.receive_event())
+                    self.logger.debug("Got a new event")
+            def next_event(self):
+                try:
+                    return self.event_queue.popleft()
+                except IndexError:
+                    return None
+        event_receiver = ReceiverThread(my_interface)
+        event_receiver.setDaemon(True)
+        event_receiver.start()
+        controllername = self.inputs['controllername']
+
+        #            The main control loop continues until the pipeline finshes
+        # ---------------------------------------------------------------------
+        while True:
+            #                               Handle any events received from MAC
+            # -----------------------------------------------------------------
+            current_event = event_receiver.next_event()
+
+            if isinstance(current_event, ControlConnectedEvent):
+                self.logger.debug("Received ConnectedEvent")
+            elif isinstance(current_event, ControlClaimEvent):
+                self.logger.debug("Received ClaimEvent")
+                my_interface.send_event(
+                    ControlClaimedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlPrepareEvent):
+                self.logger.debug("Received PrepareEvent")
+                my_interface.send_event(
+                    ControlPreparedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlSuspendEvent):
+                self.logger.debug("Received SuspendEvent")
+                self.logger.debug("Clearing run state; pipeline must pause")
+                self.state['run'].clear()
+                my_interface.send_event(
+                    ControlSuspendedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlResumeEvent):
+                self.logger.debug("Received ResumeEvent")
+                self.logger.debug("Setting run state: pipeline may run")
+                self.state['run'].set()
+                my_interface.send_event(
+                    ControlResumedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlReleaseEvent):
+                self.logger.debug("Received ReleaseEvent")
+                my_interface.send_event(
+                    ControlReleasedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlQuitEvent):
+                self.logger.debug("Received QuitEvent")
+                self.logger.debug("Setting quit state: pipeline must exit")
+                self.state['quit'].set()
+                self.state['run'].set()
+                my_interface.send_event(
+                    ControlQuitedEvent(
+                        controllername,
+                        self.inputs['treeid'],
+                        controlOK,
+                        "no error"
+                    )
+                )
+            elif isinstance(current_event, ControlResyncEvent):
+                self.logger.debug("Received ResyncEvent")
+                my_interface.send_event(
+                    ControlResyncedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlScheduleEvent):
+                self.logger.debug("Received ScheduleEvent")
+                my_interface.send_event(
+                    ControlScheduledEvent(controllername, controlOK)
+                )
+
+            #                  Shut everything down if the pipeline is finished
+            # -----------------------------------------------------------------
+            if self.state['finished'].isSet():
+                self.logger.debug("Got finished state: control loop exiting")
+                my_interface.send_event(
+                    ControlQuitedEvent(
+                        controllername,
+                        self.inputs['treeid'],
+                        controlOK,
+                        "pipeline finished"
+                    )
+                )
+                event_receiver.active = False
+                break
+
+            self.logger.debug("Control looping...")
+            time.sleep(1)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/parset.py b/CEP/Pipeline/framework/lofarpipe/support/parset.py
new file mode 100644
index 0000000000000000000000000000000000000000..dee9ae3643f20dc119ebc166ee323299ea8104b1
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/parset.py
@@ -0,0 +1,114 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                          Parameterset Handling
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+from tempfile import mkstemp
+from contextlib import contextmanager
+
+from lofar.parameterset import parameterset
+
+class Parset(parameterset):
+    """
+    This wraps lofar.parameterset to provide a convenient means of iterating
+    over the parameterset's keys.
+
+    It should be replaced (eventually) by rolling this functionality directly
+    into the C++ implementation.
+    """
+    def __init__(self, filename=None, caseInsensitive=False):
+        super(Parset, self).__init__(filename, caseInsensitive)
+        self.keys = []
+        if filename:
+            self._append_file(filename)
+
+    def add(self, key, value):
+        super(Parset, self).add(key, value)
+        self.keys.append(key)
+
+    def adoptFile(self, filename, prefix=''):
+        super(Parset, self).adoptFile(filename, prefix)
+        self._append_file(filename, prefix)
+
+    def clear(self):
+        super(Parset, self).clear()
+        self.keys = []
+
+    def remove(self, key):
+        super(Parset, self).remove(key)
+        self.keys.remove(key)
+
+    def replace(self, key, value):
+        super(Parset, self).replace(key, value)
+        if not key in self.keys:
+            self.keys.append(key)
+
+    def subtractSubset(self, baseKey):
+        super(Parset, self).subtractSubset(baseKey)
+        self.keys = filter(
+            lambda key: False if key[:len(baseKey)] == baseKey else True,
+            self.keys
+        )
+
+    def makeSubset(self, baseKey, prefix=None):
+        newps = Parset()
+        for key in self.keys:
+            if key[:len(baseKey)] == baseKey:
+                if prefix:
+                    newkey = key.replace(baseKey, prefix)
+                else:
+                    newkey = key
+                newps.add(newkey, self[key].get())
+        return newps
+
+    def addStringVector(self, key, vector):
+        super(Parset, self).add(key, "[ %s ]" % ", ".join(vector))
+        self.keys.append(key)
+
+    def _append_file(self, filename, prefix=''):
+        file = open(filename, 'r')
+        for line in file:
+            key = line.split("=")[0].strip()
+            if key:
+                self.keys.append(prefix + key)
+        file.close()
+
+    def __iter__(self):
+        return iter(self.keys)
+
+def get_parset(parset_filename):
+    """
+    Returns an instance of Parset with the given file loaded.
+    """
+    return Parset(parset_filename)
+
+def patch_parset(parset, data, output_dir=None):
+    """
+    Generate a parset file by adding the contents of the data dictionary to
+    the specified parset object. Write it to file, and return the filename.
+    """
+    temp_parset = get_parset(parset)
+    for key, value in data.iteritems():
+        temp_parset.replace(key, value)
+    fd, output = mkstemp(dir=output_dir)
+    temp_parset.writeFile(output)
+    os.close(fd)
+    return output
+
+@contextmanager
+def patched_parset(parset, data, output_dir=None, unlink=True):
+    """
+    Wrap patch_parset() in a contextmanager which removes the generated parset
+    when it finishes.
+
+    The never_unlink flag is to facilitate debugging -- one can leave a
+    patched parset in place for manual checking if required.
+    """
+    filename = patch_parset(parset, data, output_dir)
+    try:
+        yield filename
+    finally:
+        if unlink: os.unlink(filename)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py b/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c53f7ab1298d876da8e7ed3c8a8a43144e18db2
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py
@@ -0,0 +1,269 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                               Logging routines
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from contextlib import contextmanager
+from string import Template
+
+import os
+import time
+import resource
+import threading
+import logging
+import re
+
+class SearchPattern(object):
+    """
+    Match the contents of LogRecords against a regular expression, keeping
+    track of matching records.
+    """
+    def __init__(self, pattern):
+        self.pattern = re.compile(pattern)
+        self.zero()
+
+    def check(self, record):
+        """
+        If the message attached to LogRecords record matches our pattern,
+        store the record.
+        """
+        if self.pattern.search(record.getMessage()):
+            self.results.append(record)
+
+    def zero(self):
+        """
+        Reset our list of stored messages.
+        """
+        self.results = []
+
+class SearchPatterns(dict):
+    """
+    A dictionary of SearchPattern objects.
+
+    When a new entry is appended, it's automatically compiled into a
+    SearchPattern. Other access patterns are as for a dictionary.
+    """
+    def __init__(self):
+        # We only support "bare" init, ie no arguments.
+        super(SearchPatterns, self).__init__()
+
+    def __setitem__(self, name, pattern):
+        # Compile supplied string to a SearchPattern and add it to our
+        # dictionary.
+        super(SearchPatterns, self).__setitem__(name, SearchPattern(pattern))
+
+    def check(self, record):
+        """
+        Check the supplied LogRecord against all
+        registered SearchPatetrn objects.
+        """
+        for pattern in self.itervalues():
+            pattern.check(record)
+
+    def zero(self, name):
+        """
+        Zero the counter on a given SearchPattern.
+        """
+        self[name].zero()
+
+    def zero_all(self, name):
+        """
+        Zero the counter on all SearchPatterns registered.
+        """
+        for name in self.iterkeys():
+            self.zero(name)
+
+class SearchingLogger(logging.Logger):
+    """
+    A SearchingLogger will act as a normal logger object, forwarding
+    LogRecords to the appropriate handlers. In addition, it will check the
+    LogRecord against a SearchPatterns object and save any useful results.
+    """
+    def __init__(self, *args, **kwargs):
+        logging.Logger.__init__(self, *args, **kwargs)
+        self.searchpatterns = SearchPatterns()
+
+    def handle(self, record):
+        logging.Logger.handle(self, record)
+        self.searchpatterns.check(record)
+
+def getSearchingLogger(name):
+    """
+    Return an instance of SearchingLogger with the given name.
+
+    Equivalent to logging.getLogger, but returns a SearchingLogger.
+    """
+    old_class = logging.getLoggerClass()
+    logging.setLoggerClass(SearchingLogger)
+    try:
+        return logging.getLogger(name)
+    finally:
+        logging.setLoggerClass(old_class)
+
+def log_file(filename, logger, killswitch):
+    """
+    Do the equivalent of tail -f on filename -- ie, watch it for updates --
+    and send any lines written to the file to the logger.
+
+    killswitch is an instance of threading.Event: when set, we bail out of the
+    loop.
+
+    :param filename: Full path to file to watch
+    :param logger: Logger to which to send updates
+    :param killswitch: instance of :class:`threading.Event` -- stop watching file when set
+    """
+    if not os.path.exists(filename):
+        open(filename, 'w').close()
+    with open(filename, 'r') as f:
+        while not killswitch.isSet():
+            line = f.readline()
+            if not line:
+                f.seek(0, 2)
+                time.sleep(1)
+            else:
+                logger.debug(line.strip())
+
+class LogCatcher(object):
+    """
+    Sets up a context in which we can catch logs from individual pipeline
+    process in a file, then send then to the pipeline logger.
+
+    This provides the basic mechanism, but requires subclassing to define
+    self.log_prop and self.log_prop_filename (the name & contents of the log
+    configuration file).
+    """
+    def __init__(self):
+        raise NotImplementedError
+
+    def __enter__(self):
+        log_filename = os.path.join(
+            self.working_dir, "pipeline_process.log"
+        )
+        with open(self.log_prop_filename, 'w') as log_prop_file:
+            log_prop_file.write(
+                self.log_prop.substitute(log_filename=log_filename)
+            )
+        local_logger = logging.getLogger(self.logger_name)
+        self.killswitch = threading.Event()
+        self.logging_thread = threading.Thread(
+            target=log_file,
+            args=(log_filename, local_logger, self.killswitch)
+        )
+        self.logging_thread.start()
+        return local_logger
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        time.sleep(2)   # Wait in case any remaining data is to be flushed to log
+        self.killswitch.set()
+        self.logging_thread.join()
+
+class CatchLog4CPlus(LogCatcher):
+    """
+    Implement a LogCatcher for log4cplus (as used by most LOFAR pipeline
+    tools).
+    """
+    def __init__(self, working_dir, logger_name, executable_name):
+        self.log_prop = Template("""
+log4cplus.rootLogger=DEBUG, FILE
+log4cplus.logger.TRC=TRACE9
+
+log4cplus.appender.FILE=log4cplus::RollingFileAppender
+log4cplus.appender.FILE.File=$log_filename
+log4cplus.appender.FILE.ImmediateFlush=true
+log4cplus.appender.FILE.MaxFileSize=10MB
+#log4cplus.appender.FILE.MaxBackupIndex=1
+#log4cplus.appender.FILE.layout=log4cplus::PatternLayout
+log4cplus.appender.FILE.layout.ConversionPattern=%l [%-3p] - %m%n
+        """)
+        self.log_prop_filename = os.path.join(
+            working_dir, executable_name + ".log_prop"
+        )
+        self.working_dir = working_dir
+        self.logger_name = logger_name
+
+class CatchLog4CXX(LogCatcher):
+    """
+    Implement a LogCatcher for log4cplus (as used by ASKAP tools, eg cimager).
+    """
+    def __init__(self, working_dir, logger_name):
+        self.log_prop = Template("""
+log4j.rootLogger=DEBUG, FILE
+
+log4j.appender.FILE=org.apache.log4j.RollingFileAppender
+log4j.appender.FILE.File=$log_filename
+log4j.appender.FILE.ImmediateFlush=true
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%l [%-3p] - %m%n
+        """)
+        self.log_prop_filename = os.path.join(
+            working_dir, "askap.log_cfg"
+        )
+        self.working_dir = working_dir
+        self.logger_name = logger_name
+
+@contextmanager
+def log_time(logger):
+    """
+    Send information about the processing time used by code in this context to
+    the specified logger.
+
+    :param logger: logger to which timing information should be sent.
+    """
+    def get_rusage():
+        return [
+            x + y for x, y in zip(
+                resource.getrusage(resource.RUSAGE_CHILDREN),
+                resource.getrusage(resource.RUSAGE_SELF)
+            )
+        ]
+
+    start_time = time.time()
+    start_rusage = get_rusage()
+    try:
+        yield
+    finally:
+        total_rusage = [x - y for x, y in zip(get_rusage(), start_rusage)]
+        logger.info(
+            "Total time %.4fs; user time: %.4fs; system time: %.4fs" % (
+                time.time() - start_time, total_rusage[0], total_rusage[1]
+            )
+        )
+        logger.debug(
+            "Start time was %.4fs; end time was %.4fs" % (
+                start_time, time.time()
+            )
+        )
+
+def log_process_output(process_name, sout, serr, logger):
+    """
+    Log stdout/stderr from a process if they contain anything interesting --
+    some line-noise produced by many CEP executables is stripped.
+
+    :param process_name: Name to be used for logging purposes
+    :param sout: Standard out to log (string)
+    :param serr: Standard error to log (string)
+    :param logger: Logger to which messages should be sent
+
+    The ``sout`` and ``serr`` params are intended to be used with the output
+    of :meth:`subprocess.Popen.communicate`, but any string-a-like should
+    work.
+    """
+    #     These lines are never logged, since they don't tell us anything useful
+    # --------------------------------------------------------------------------
+    excludepatterns = (
+        "Debug: registered context Global=0\n",
+        "tcgetattr: Invalid argument\n",
+    )
+
+    for pattern in excludepatterns:
+        sout = sout.replace(pattern, "")
+        serr = serr.replace(pattern, "")
+
+    if len(sout.strip()) > 0:
+        logger.debug("%s stdout: %s" % (process_name, sout))
+    if len(serr.strip()) > 0:
+        logger.debug("%s stderr: %s" % (process_name, serr))
diff --git a/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py b/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py
new file mode 100644
index 0000000000000000000000000000000000000000..3570565b30a35e7fff495f600f5b1e24ee9c7ec4
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py
@@ -0,0 +1,307 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                           Run a remote command
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from collections import defaultdict
+from threading import BoundedSemaphore
+
+import re
+import os
+import signal
+import threading
+import time
+
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.utilities import spawn_process
+from lofarpipe.support.jobserver import job_server
+
+# By default, Linux allocates lots more memory than we need(?) for a new stack
+# frame. When multiplexing lots of threads, that will cause memory issues.
+threading.stack_size(1048576)
+
+class ParamikoWrapper(object):
+    """
+    Sends an SSH command to a host using paramiko, then emulates a Popen-like
+    interface so that we can pass it back to pipeline recipes.
+    """
+    def __init__(self, paramiko_client, command):
+        self.returncode = None
+        self.client = paramiko_client
+        self.chan = paramiko_client.get_transport().open_session()
+        self.chan.get_pty()
+        self.chan.exec_command(command)
+        self.stdout = self.chan.makefile('rb', -1)
+        self.stderr = self.chan.makefile_stderr('rb', -1)
+
+    def communicate(self):
+        if not self.returncode:
+            self.returncode = self.chan.recv_exit_status()
+        stdout = "\n".join(line.strip() for line in self.stdout.readlines()) + "\n"
+        stderr = "\n".join(line.strip() for line in self.stdout.readlines()) + "\n"
+        return stdout, stderr
+
+    def poll(self):
+        if not self.returncode and self.chan.exit_status_ready():
+            self.returncode = self.chan.recv_exit_status()
+        return self.returncode
+
+    def wait(self):
+        if not self.returncode:
+            self.returncode = self.chan.recv_exit_status()
+        return self.returncode
+
+    def kill(self):
+        self.chan.close()
+
+def run_remote_command(config, logger, host, command, env, arguments=None):
+    """
+    Run command on host, passing it arguments from the arguments list and
+    exporting key/value pairs from env(a dictionary).
+
+    Returns an object with poll() and communicate() methods, similar to
+    subprocess.Popen.
+
+    This is a generic interface to potentially multiple ways of running
+    commands (SSH, mpirun, etc). The appropriate method is chosen from the
+    config block supplied (with SSH as a fallback).
+    """
+    try:
+        method = config.get('remote', 'method')
+    except:
+        method = None
+
+    if method=="paramiko":
+        try:
+            key_filename = config.get('remote', 'key_filename')
+        except:
+            key_filename = None
+        return run_via_paramiko(logger, host, command, env, arguments, key_filename)
+    elif method=="mpirun":
+        return run_via_mpirun(logger, host, command, env, arguments)
+    else:
+        return run_via_ssh(logger, host, command, env, arguments)
+
+def run_via_mpirun(logger, host, command, environment, arguments):
+    """
+    Dispatch a remote command via mpirun.
+
+    Return a Popen object pointing at the MPI command, to which we add a kill
+    method for shutting down the connection if required.
+    """
+    logger.debug("Dispatching command to %s with mpirun" % host)
+    mpi_cmd = ["/usr/bin/mpirun", "-host", host]
+    for key in environment.keys():
+        mpi_cmd.extend(["-x", key])
+    mpi_cmd.append("--")
+    mpi_cmd.extend(command.split()) # command is split into (python, script)
+    mpi_cmd.extend(str(arg) for arg in arguments)
+    env = os.environ
+    env.update(environment)
+    process = spawn_process(mpi_cmd, logger, env=env)
+    # mpirun should be killed with a SIGTERM to enable it to shut down the
+    # remote command.
+    process.kill = lambda : os.kill(process.pid, signal.SIGTERM)
+    return process
+
+def run_via_ssh(logger, host, command, environment, arguments):
+    """
+    Dispatch a remote command via SSH.
+
+    We return a Popen object pointing at the SSH session, to which we add a
+    kill method for shutting down the connection if required.
+    """
+    logger.debug("Dispatching command to %s with ssh" % host)
+    ssh_cmd = ["ssh", "-n", "-tt", "-x", host, "--", "/bin/sh", "-c"]
+
+    commandstring = ["%s=%s" % (key, value) for key, value in environment.items()]
+    commandstring.append(command)
+    commandstring.extend(re.escape(str(arg)) for arg in arguments)
+    ssh_cmd.append('"' + " ".join(commandstring) + '"')
+    process = spawn_process(ssh_cmd, logger)
+    process.kill = lambda : os.kill(process.pid, signal.SIGKILL)
+    return process
+
+def run_via_paramiko(logger, host, command, environment, arguments, key_filename):
+    """
+    Dispatch a remote command via paramiko.
+
+    We return an instance of ParamikoWrapper.
+    """
+    logger.debug("Dispatching command to %s with paramiko" % host)
+    import paramiko
+    client = paramiko.SSHClient()
+    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+    client.connect(host, key_filename=key_filename)
+    commandstring = ["%s=%s" % (key, value) for key, value in environment.items()]
+    commandstring.append(command)
+    commandstring.extend(re.escape(str(arg)) for arg in arguments)
+    return ParamikoWrapper(client, " ".join(commandstring))
+
+class ProcessLimiter(defaultdict):
+    """
+    Provide a dictionary-like structure of bounded semaphores with arbitrary
+    keys.
+
+    This gives a convenient way to keep tabs on the number of simultaneous
+    jobs running on a given host.
+
+    :param nproc: Bound value for semaphore (ie, maximum number of jobs)
+    :type nproc: integer or none
+    """
+    def __init__(self, nproc=None):
+        if nproc:
+            super(ProcessLimiter, self).__init__(
+                lambda: BoundedSemaphore(int(nproc))
+            )
+        else:
+            class Unlimited(object):
+                """
+                Dummy semaphore for the unlimited case.
+                Acquire and release always succeed.
+                """
+                def acquire(self):
+                    return True
+                def release(self):
+                    return True
+            super(ProcessLimiter, self).__init__(Unlimited)
+
+class ComputeJob(object):
+    """
+    Container for information about a job to be dispatched to a compute node.
+
+    :param host: Target host for job
+    :param command: Full path to command to be run on target host
+    :param arguments: List of arguments which will be passed to command
+    """
+    def __init__(self, host, command, arguments=[]):
+        self.host = host
+        self.command = command
+        self.arguments = arguments
+        self.results = {}
+
+    def dispatch(self, logger, config, limiter, id, jobhost, jobport, error, killswitch):
+
+        """
+        Dispatch this job to the relevant compute node.
+
+        Note that error is an instance of threading.Event, which will be set
+        if the remote job fails for some reason.
+        """
+        self.id = id
+        limiter[self.host].acquire()
+        try:
+            if killswitch.isSet():
+                logger.debug("Shutdown in progress: not starting remote job")
+                error.set()
+                return 1
+            process = run_remote_command(
+                config,
+                logger,
+                self.host,
+                self.command,
+                {
+                    "PYTHONPATH": config.get('deploy', 'engine_ppath'),
+                    "LD_LIBRARY_PATH": config.get('deploy', 'engine_lpath')
+                },
+                arguments=[id, jobhost, jobport]
+            )
+            # Wait for process to finish. In the meantime, if the killswitch
+            # is set (by an exception in the main thread), forcibly kill our
+            # job off.
+            while process.poll() == None:
+                if killswitch.isSet():
+                    process.kill()
+                else:
+                    time.sleep(1)
+            sout, serr = process.communicate()
+
+            serr = serr.replace("Connection to %s closed.\r\n" % self.host, "")
+            log_process_output("Remote command", sout, serr, logger)
+        except Exception, e:
+            logger.exception("Failed to run remote process %s (%s)" % (self.command, str(e)))
+            error.set()
+            return 1
+        finally:
+            limiter[self.host].release()
+        if process.returncode != 0:
+            logger.error(
+                "Remote process %s failed (status: %d)" % (self.command, process.returncode)
+            )
+            error.set()
+        return process.returncode
+
+def threadwatcher(threadpool, logger, killswitch):
+    """
+    Start and watch a pool of threads. If an exception is thrown during
+    processing, set the killswitch so that all threads can shut down cleanly,
+    then join all the threads to wait for them to finish.
+
+    :param threadpool: Pool of threads to handle
+    :param logger: Logger
+    :type logger: logging.Logger or descendant
+    :param killswitch: Indication for threads to abort
+    :type killswitch: threading.Event
+    """
+    # If we receive a SIGTERM, shut down processing.
+    signal.signal(signal.SIGTERM, killswitch.set)
+    try:
+        # Start all the threads, but don't just join them, as that
+        # blocks all exceptions in the main thread. Instead, we wake
+        # up every second to handle exceptions.
+        [thread.start() for thread in threadpool]
+        logger.info("Waiting for compute threads...")
+
+        while True in [thread.isAlive() for thread in threadpool]:
+            time.sleep(1)
+    except:
+        # If something throws an exception (normally a
+        # KeyboardException, ctrl-c) set the kill switch to tell the
+        # comput threads to terminate, then wait for them.
+        logger.warn("Processing interrupted: shutting down")
+        killswitch.set()
+    finally:
+        # Always make sure everything has finished. Note that if an exception
+        # is thrown before all the threads have started, they will not all be
+        # alive (and hence not join()-able).
+        [thread.join() for thread in threadpool if thread.isAlive()]
+
+class RemoteCommandRecipeMixIn(object):
+    """
+    Mix-in for recipes to dispatch jobs using the remote command mechanism.
+    """
+    def _schedule_jobs(self, jobs, max_per_node=None):
+        """
+        Schedule a series of compute jobs. Blocks until completion.
+
+        :param jobs: iterable of :class:`~lofarpipe.support.remotecommand.ComputeJob` to be scheduled
+        :param max_per_node: maximum number of simultaneous jobs on any given node
+        :type max_per_node: integer or none
+        :rtype: dict mapping integer job id to :class:`~lofarpipe.support.remotecommand.ComputeJob`
+        """
+        threadpool = []
+        jobpool = {}
+        limiter = ProcessLimiter(max_per_node)
+        killswitch = threading.Event()
+
+        if max_per_node:
+            self.logger.info("Limiting to %d simultaneous jobs/node" % max_per_node)
+
+        with job_server(self.logger, jobpool, self.error) as (jobhost, jobport):
+            self.logger.debug("Job dispatcher at %s:%d" % (jobhost, jobport))
+            for job_id, job in enumerate(jobs):
+                jobpool[job_id] = job
+                threadpool.append(
+                    threading.Thread(
+                        target=job.dispatch,
+                        args=(
+                            self.logger, self.config, limiter, job_id,
+                            jobhost, jobport, self.error, killswitch
+                        )
+                    )
+                )
+            threadwatcher(threadpool, self.logger, killswitch)
+        return jobpool
diff --git a/CEP/Pipeline/framework/lofarpipe/support/stateful.py b/CEP/Pipeline/framework/lofarpipe/support/stateful.py
new file mode 100644
index 0000000000000000000000000000000000000000..90e8bc3d97b962bd93c7ea5e55da3a44694ec4bb
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/stateful.py
@@ -0,0 +1,96 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                          Stateful LOFAR Recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from functools import wraps
+
+import os.path
+import cPickle
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.lofarexceptions import PipelineException
+
+def stateful(run_task):
+    @wraps(run_task)
+    def wrapper(self, configblock, datafiles=[], **kwargs):
+        try:
+            my_state = self.completed.pop()
+        except (AttributeError, IndexError):
+            my_state = ('','')
+
+        if configblock == my_state[0]:
+            # We have already run this task and stored its state, or...
+            self.logger.info("Task already exists in saved state; skipping")
+            return my_state[1]
+        elif my_state[0] != '':
+            # There is a stored task, but it doesn't match this one, or...
+            self.logger.error("Stored state does not match pipeline definition; bailing out")
+            raise PipelineException("Stored state does not match pipeline definition")
+        else:
+            # We need to run this task now.
+            outputs = run_task(self, configblock, datafiles, **kwargs)
+            self.state.append((configblock, outputs))
+            self._save_state()
+            return outputs
+    return wrapper
+
+class StatefulRecipe(BaseRecipe):
+    """
+    Enables recipes to save and restore state.
+
+    This is used exactly as :class:`~lofarpipe.support.baserecipe.BaseRecipe`,
+    but will write a ``statefile`` in the job directory, recording the current
+    state of the pipeline after each recipe completes. If the pipeline is
+    interrupted, it can automatically resume where it left off.
+
+    To reset the pipeline and start from the beginning again, just remove the
+    ``statefile``.
+    """
+    inputs = {} # No non-default inputs
+    def __init__(self):
+        super(StatefulRecipe, self).__init__()
+        self.state = []
+        self.completed = []
+
+    def _save_state(self):
+        """
+        Dump pipeline state to file.
+        """
+        statefile = open(
+            os.path.join(
+                self.config.get('layout', 'job_directory'),
+                'statefile'
+            ),
+        'w')
+        state = [self.inputs, self.state]
+        cPickle.dump(state, statefile)
+
+    def go(self):
+        super(StatefulRecipe, self).go()
+        statefile = os.path.join(
+            self.config.get('layout', 'job_directory'),
+            'statefile'
+        )
+        try:
+            statefile = open(statefile, 'r')
+            inputs, self.state = cPickle.load(statefile)
+            statefile.close()
+
+            # What's the correct thing to do if inputs differ from the saved
+            # state? start_time will always change.
+            for key, value in inputs.iteritems():
+                if key != "start_time" and self.inputs[key] != value:
+                    raise PipelineException(
+                        "Input %s (%s) differs from saved state (%s)" %
+                        (key, str(self.inputs[key]), inputs[key])
+                    )
+
+            self.completed = list(reversed(self.state))
+        except (IOError, EOFError):
+            # Couldn't load state
+            self.completed = []
+
+    run_task = stateful(BaseRecipe.run_task)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/utilities.py b/CEP/Pipeline/framework/lofarpipe/support/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..09cbd93ccf399a222183fdcabf04f65f678693a5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/utilities.py
@@ -0,0 +1,231 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                               Utility routines
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from subprocess import Popen, CalledProcessError, PIPE
+from itertools import islice, repeat, chain, izip
+from contextlib import closing, contextmanager
+from time import sleep
+from random import randint
+
+import os
+import errno
+import shutil
+import subprocess
+
+from lofarpipe.support.pipelinelogging import log_process_output
+
+#                                                                  Compatibility
+#                               The following used to be defined in this module;
+#                        they are now included so as not to break existing code.
+# ------------------------------------------------------------------------------
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.parset import get_parset, patch_parset
+
+#                                                File and Directory Manipulation
+# ------------------------------------------------------------------------------
+
+def get_mountpoint(path):
+    """
+    Return the path to the mount point containing the given path.
+
+    :param path: Path to check
+    """
+    return path if os.path.ismount(path) else get_mountpoint(
+        os.path.abspath(os.path.join(path, os.pardir))
+    )
+
+def create_directory(dirname):
+    """
+    Recursively create a directory, without failing if it already exists.
+    """
+    try:
+        os.makedirs(dirname)
+    except OSError, failure:
+        if failure.errno != errno.EEXIST:
+            raise
+
+#                                                    IPython Dependency Checking
+# ------------------------------------------------------------------------------
+
+def build_available_list(listname):
+    """
+    This can be pushed to an IPython engine and run there to generate a list
+    of data which is locally stored (and hence available for processing).
+    """
+    import os
+    from IPython.kernel.engineservice import get_engine
+    available = [
+        filename for filename in filenames if os.access(filename, os.R_OK)
+    ]
+    properties = get_engine(id).properties
+    properties[listname] = available
+
+def clear_available_list(listname):
+    """
+    Delete lists of locally stored data to free up resources on the engine.
+    """
+    from IPython.kernel.engineservice import get_engine
+    del(get_engine(id).properties[listname])
+
+def check_for_path(properties, dependargs):
+    """
+    Run on IPython engine to check for the existence of a given path in the
+    locally available data, as recorded by build_available_list().
+
+    Used for dependency checking when scheduling jobs.
+    """
+    try:
+        return dependargs[0] in properties[dependargs[1]]
+    except NameError:
+        return False
+
+#                                                       Iterators and Generators
+# ------------------------------------------------------------------------------
+
+def is_iterable(obj):
+    """
+    Return True if the given object is iterable, else False.
+    """
+    try:
+        iter(obj)
+    except:
+        return False
+    else:
+        return True
+
+try:
+    from itertools import izip_longest
+except ImportError:
+    def izip_longest(*args, **kwds):
+        """
+        This code is lifted from the Python 2.6 manual, since izip_longest
+        isn't available in the 2.5 standard library.
+        izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
+        """
+        fillvalue = None
+        def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
+            yield counter()         # yields the fillvalue, or raises IndexError
+        fillers = repeat(fillvalue)
+        iters = [chain(it, sentinel(), fillers) for it in args]
+        try:
+            for tup in izip(*iters):
+                yield tup
+        except IndexError:
+            pass
+
+def group_iterable(iterable, size):
+    """
+    Group the iterable into tuples of given size.  Returns a generator.
+
+    Example:
+    >>> for x in group([1,2,3,4,5], 3): print x
+    (1, 2, 3)
+    (4, 5)
+    """
+    return (
+        filter(lambda x: x is not None, x)
+        for x in izip_longest(
+            *[islice(iterable, n, None, size) for n in xrange(size)]
+        )
+    )
+
+#                                                                  Miscellaneous
+# ------------------------------------------------------------------------------
+
+def read_initscript(logger, filename, shell="/bin/sh"):
+    """
+    Return a dict of the environment after sourcing the given script in a shell.
+    """
+    if not os.path.exists(filename):
+        logger.warn("Environment initialisation script not found!")
+        return {}
+    else:
+        logger.debug("Reading environment from %s" % filename)
+        p = subprocess.Popen(
+            ['. %s ; env' % (filename)],
+            shell=True,
+            executable=shell,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            close_fds=True
+        )
+        so, se = p.communicate()
+        environment = [x.split('=', 1) for x in so.strip().split('\n')]
+        environment = filter(lambda x: len(x) == 2, environment)
+        return dict(environment)
+
+def string_to_list(my_string):
+    """
+    Convert a list-like string (as in pipeline.cfg) to a list of values.
+    """
+    return [x.strip() for x in my_string.strip('[] ').split(',') if x.strip()]
+
+def spawn_process(cmd, logger, cwd=None, env=None, max_tries=2, max_timeout=30):
+    """
+    Tries to spawn a process.
+
+    If it hits an OSError due to lack of memory or too many open files, it
+    will keep trying max_tries times, waiting timeout seconds between each.
+
+    If successful, the process object is returned. Otherwise, we eventually
+    propagate the exception.
+    """
+    trycounter = 0
+    while True:
+        try:
+            process = Popen(
+                cmd, cwd=cwd, env=env, stdin=PIPE, stdout=PIPE, stderr=PIPE
+            )
+        except OSError, e:
+            logger.warn(
+                "Failed to spawn external process %s (%s)" % (" ".join(cmd), str(e))
+            )
+            if trycounter < max_tries:
+                timeout = randint(1, max_timeout)
+                logger.warn(
+                    "Retrying in %d seconds (%d more retries)." %
+                    (timeout, max_tries - trycounter - 1)
+                )
+                trycounter += 1
+                sleep(timeout)
+            else:
+                raise
+        else:
+            break
+    return process
+
+def catch_segfaults(cmd, cwd, env, logger, max=1, cleanup=lambda: None):
+    """
+    Run cmd in cwd with env, sending output to logger.
+
+    If it segfaults, retry upto max times.
+    """
+    tries = 0
+    while tries <= max:
+        if tries > 0:
+            logger.debug("Retrying...")
+        logger.debug("Running: %s" % (' '.join(cmd),))
+        process = spawn_process(cmd, logger, cwd, env)
+        sout, serr = process.communicate()
+        log_process_output(cmd[0], sout, serr, logger)
+        if process.returncode == 0:
+            break
+        elif process.returncode == -11:
+            logger.warn("%s process segfaulted!" % cmd[0])
+            cleanup()
+            tries += 1
+            continue
+        else:
+            raise CalledProcessError(
+                process.returncode, cmd[0]
+            )
+    if tries > max:
+        logger.error("Too many segfaults from %s; aborted" % (cmd[0]))
+        raise CalledProcessError(process.returncode, cmd[0])
+    return process
diff --git a/CEP/Pipeline/framework/lofarpipe/tests/__init__.py b/CEP/Pipeline/framework/lofarpipe/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py b/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py
new file mode 100644
index 0000000000000000000000000000000000000000..32e5e81af9d46e88cfea18b4634801058b7aa8a5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py
@@ -0,0 +1,216 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                             Tests: ingredients
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+import unittest
+import os
+
+class StringFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.StringField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import StringField
+        self.stringfield = StringField(default="a string")
+
+    def test_validator(self):
+        """
+        Check that strings are correctly regarded as valid, and integers
+        aren't.
+        """
+        self.assertFalse(self.stringfield.is_valid(1))
+        self.assertTrue(self.stringfield.is_valid("1"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.stringfield.default, "a string")
+
+class IntFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.IntField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import IntField
+        self.intfield = IntField(default=1)
+
+    def test_validator(self):
+        """
+        Check that integers are correctly regarded as valid, and strings
+        aren't.
+        """
+        self.assertFalse(self.intfield.is_valid("1"))
+        self.assertTrue(self.intfield.is_valid(1))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.intfield.default, 1)
+
+    def test_coerce(self):
+        """
+        Check that a string is correctly coerced to an integer.
+        """
+        self.assertEqual(self.intfield.coerce("1"), 1)
+
+class FloatFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.FloatField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import FloatField
+        self.floatfield = FloatField(default=1.0)
+
+    def test_validator(self):
+        """
+        Check that floats are correctly regarded as valid, and strings
+        aren't.
+        """
+        self.assertFalse(self.floatfield.is_valid("1"))
+        self.assertTrue(self.floatfield.is_valid(1.0))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.floatfield.default, 1.0)
+
+    def test_coerce(self):
+        """
+        Check that a string is correctly coerced to an float.
+        """
+        self.assertEqual(self.floatfield.coerce("1"), 1.0)
+
+class FileFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.FileField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import FileField
+        self.filefield = FileField(default='/')
+
+    def test_validator(self):
+        """
+        Integers are not valid as filenames, and certainly don't exist on
+        disk.
+
+        ``/`` should, though.
+        """
+        self.assertFalse(self.filefield.is_valid(1))
+        self.assertTrue(self.filefield.is_valid("/"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.filefield.default, "/")
+
+class ExecFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.ExecField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import ExecField
+        self.execfield = ExecField(default='/bin/ls')
+
+    def test_validator(self):
+        """
+        ``/etc/passwd`` should always exist as a file on disk, but not be
+        executable.
+
+        ``/bin/ls`` should always exist, and must be executable.
+        """
+        self.assertFalse(self.execfield.is_valid("/etc/passwd"))
+        self.assertTrue(self.execfield.is_valid("/bin/ls"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.execfield.default, "/bin/ls")
+
+class DirectoryFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.DirectoryField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import DirectoryField
+        self.directoryfield = DirectoryField(default='/tmp')
+
+    def test_validator(self):
+        """
+        An integer is not a valid directory.
+
+        ``/tmp`` should always be valid.
+        """
+        self.assertFalse(self.directoryfield.is_valid(1))
+        self.assertTrue(self.directoryfield.is_valid("/tmp"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.directoryfield.default, "/tmp")
+
+    def test_coerce(self):
+        """
+        Coercing a create-able directory name should cause it to exist. We
+        should always be able to write in ``/tmp``.
+        """
+        self.directoryfield.coerce("/tmp/foo")
+        self.assertTrue(os.path.exists("/tmp/foo"))
+
+class LOFARIngredientTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.LOFARingredient`
+    """
+    def setUp(self):
+        """
+        An instance of
+        :class:`~lofarpipe.support.lofaringredient.LOFARingredient` is defined
+        which contains three instances of
+        :class:`~lofarpipe.support.lofaringredient.StringField`.
+        """
+        from lofarpipe.support.lofaringredient import StringField
+        from lofarpipe.support.lofaringredient import LOFARingredient
+        f = StringField(default="foo")
+        g = StringField()
+        h = StringField(default=1)
+        self.lofaringredient = LOFARingredient({"f": f, "g": g, "h": h})
+
+    def test_keys(self):
+        """
+        ``self.lofaringredient`` should contain keys for the two fields
+        which have default parameters, but not for the one which is unset.
+        """
+        self.assertEqual(len(self.lofaringredient.keys()), 2)
+        self.assertRaises(KeyError, lambda: self.lofaringredient['g'])
+
+    def test_values(self):
+        """
+        Prior to setting, the value of the fields should be equal to
+        the default value.
+        """
+        self.assertEqual(self.lofaringredient['f'], "foo")
+
+    def test_set(self):
+        """
+        When set, the value of the fields should be equal to the new value.
+        """
+        self.lofaringredient['g'] = "bar"
+        self.assertEqual(self.lofaringredient['g'], "bar")
+
+    def test_bad_values(self):
+        """
+        Unacceptable values should raise an exception.
+        """
+        self.assertRaises(TypeError, lambda: self.lofaringredient['h'])
+        self.lofaringredient['h'] = "bar"
+        self.assertEqual(self.lofaringredient['h'], "bar")
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/CEP/Pipeline/framework/setup.py b/CEP/Pipeline/framework/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ef0cc273a5153c245fb7c54d64ceb0fbf4b0180
--- /dev/null
+++ b/CEP/Pipeline/framework/setup.py
@@ -0,0 +1,16 @@
+from distutils.core import setup
+
+setup(
+    name="Pipeline Framework",
+    version="0.1.dev",
+    packages=[
+        'lofarpipe',
+        'lofarpipe.cuisine',
+        'lofarpipe.support',
+        'lofarpipe.tests',
+    ],
+    description="LOFAR Pipeline System",
+    author="John Swinbank",
+    author_email="swinbank@transientskp.org",
+    url="http://www.transientskp.org/",
+)
diff --git a/CEP/Pipeline/mac/Makefile b/CEP/Pipeline/mac/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..62c5e49d3ab0b2394c7eb06e36b79da3047f241f
--- /dev/null
+++ b/CEP/Pipeline/mac/Makefile
@@ -0,0 +1,45 @@
+LOFARROOT = /home/swinbank/Work/LOFAR
+
+MACPRTSRC = $(LOFARROOT)/MAC/MACIO/src/EventPort.cc
+MACEVTSRC = $(LOFARROOT)/MAC/MACIO/src/GCF_Event.cc
+MACINC    = -I$(LOFARROOT)/MAC/MACIO/include
+
+COMMONSRC = $(LOFARROOT)/LCS/Common/src/SymbolTable.cc $(LOFARROOT)/LCS/Common/src/AddressTranslator.cc $(LOFARROOT)/LCS/Common/src/hexdump.cc $(LOFARROOT)/LCS/Common/src/SystemUtil.cc $(LOFARROOT)/LCS/Common/src/StringUtil.cc $(LOFARROOT)/LCS/Common/src/Backtrace.cc $(LOFARROOT)/LCS/Common/src/Net/Socket.cc $(LOFARROOT)/LCS/Common/src/LofarLogger.cc
+COMMONINC = -I$(LOFARROOT)/LCS/Common/include
+
+APLINC    = -I$(LOFARROOT)/MAC/APL/APLCommon/include
+GCFINC    = -I$(LOFARROOT)/MAC/GCF/TM/include
+
+LOG4CPLUS = -I/data/sys/opt/LofIm/external/log4cplus/builds/log4cplus-1.0.2-3/include -L/data/sys/opt/LofIm/external/log4cplus/builds/log4cplus-1.0.2-3/lib/ -llog4cplus
+BPYTHON   = -I/usr/include/python2.5 -lboost_python -lpython2.5
+
+OUTPUTDIR = ep
+
+default: ep_interface ep_echo ep_control
+
+ep_interface: sb_protocol
+	g++ -fPIC -shared -o $(OUTPUTDIR)/interface/_ep_interface.so src/ep_interface.cc src/SB_Protocol.cc -I./include $(MACPRTSRC) $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+ep_echo: echo_protocol ep_interface
+	g++ -fPIC -shared -o $(OUTPUTDIR)/echo/_ep_echo.so src/ep_echo.cc src/eventwrappers.cc src/Echo_Protocol.cc -I./include $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+ep_control: controller_protocol ep_interface
+	g++ -fPIC -shared -o $(OUTPUTDIR)/control/_ep_control.so src/ep_control.cc src/Controller_Protocol.cc -I./include $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(APLINC) $(GCFINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+sb_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/MACIO/src/SB_Protocol.prot
+	mv -f SB_Protocol.cc  src/
+	mv -f SB_Protocol.ph include/
+	
+echo_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/MACIO/test/Echo_Protocol.prot
+	mv -f Echo_Protocol.cc  src/
+	mv -f Echo_Protocol.ph include/
+
+controller_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/APL/APLCommon/src/Controller_Protocol.prot
+	mv -f Controller_Protocol.cc  src/
+	mv -f Controller_Protocol.ph include/
+
+clean:
+	\rm -f src/Echo_Protocol.cc src/SB_Protocol.cc src/Controller_Protocol.cc include/Echo_Protocol.ph include/Controller_Protocol.ph include/SB_Protocol.ph ep/interface/_ep_interface.so ep/echo/_ep_echo.so ep/control/_ep_control.so
diff --git a/CEP/Pipeline/mac/README b/CEP/Pipeline/mac/README
new file mode 100644
index 0000000000000000000000000000000000000000..34d893d45e60393b2282a9d003bcfe4abebdabf2
--- /dev/null
+++ b/CEP/Pipeline/mac/README
@@ -0,0 +1 @@
+Simple demonstration of the Echo Protocol over an EventPort using Python.
diff --git a/CEP/Pipeline/mac/ep/__init__.py b/CEP/Pipeline/mac/ep/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/mac/ep/control/__init__.py b/CEP/Pipeline/mac/ep/control/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed2ec0c75b7d3a97ea921bdc8e374db97591ac76
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/control/__init__.py
@@ -0,0 +1,69 @@
+# Note that we require GenericEvent, so ep.interface must be available first
+# We'll use it to create a customised EventPort interface
+from ep.interface import EventPort_Interface, EventNotFoundException
+
+# Module level constants
+
+# Protocol ID
+from _ep_control import PROTOCOL
+
+# Result/error states
+from _ep_control import OK, LOST_CONN
+
+# Event signals
+from _ep_control import CONTROL_STARTED
+from _ep_control import CONTROL_CONNECT
+from _ep_control import CONTROL_CONNECTED
+from _ep_control import CONTROL_RESYNC
+from _ep_control import CONTROL_RESYNCED
+from _ep_control import CONTROL_SCHEDULE
+from _ep_control import CONTROL_SCHEDULED
+from _ep_control import CONTROL_CLAIM
+from _ep_control import CONTROL_CLAIMED
+from _ep_control import CONTROL_PREPARE
+from _ep_control import CONTROL_PREPARED
+from _ep_control import CONTROL_RESUME
+from _ep_control import CONTROL_RESUMED
+from _ep_control import CONTROL_SUSPEND
+from _ep_control import CONTROL_SUSPENDED
+from _ep_control import CONTROL_RELEASE
+from _ep_control import CONTROL_RELEASED
+from _ep_control import CONTROL_QUIT
+from _ep_control import CONTROL_QUITED
+
+# Events we might receive
+from _ep_control import ControlConnectEvent
+from _ep_control import ControlClaimEvent
+from _ep_control import ControlPrepareEvent
+from _ep_control import ControlResumeEvent
+from _ep_control import ControlSuspendEvent
+from _ep_control import ControlReleaseEvent
+from _ep_control import ControlQuitEvent
+from _ep_control import ControlResyncEvent
+from _ep_control import ControlScheduleEvent
+
+# Events we can send
+from _ep_control import ControlConnectedEvent
+from _ep_control import ControlResyncedEvent
+from _ep_control import ControlClaimedEvent
+from _ep_control import ControlPreparedEvent
+from _ep_control import ControlResumedEvent
+from _ep_control import ControlSuspendedEvent
+from _ep_control import ControlReleasedEvent
+from _ep_control import ControlQuitedEvent
+from _ep_control import ControlScheduledEvent
+
+class ControllerPort_Interface(EventPort_Interface):
+    def __init__(self, servicemask, hostname):
+        event_mapping = {
+            CONTROL_CONNECTED:  ControlConnectedEvent,
+            CONTROL_RESYNC:   ControlResyncEvent,
+            CONTROL_SCHEDULE: ControlScheduleEvent,
+            CONTROL_CLAIM:    ControlClaimEvent,
+            CONTROL_PREPARE:  ControlPrepareEvent,
+            CONTROL_RESUME:   ControlResumeEvent,
+            CONTROL_SUSPEND:  ControlSuspendEvent,
+            CONTROL_RELEASE:  ControlReleaseEvent,
+            CONTROL_QUIT:     ControlQuitEvent
+        }
+        super(ControllerPort_Interface, self).__init__(servicemask, PROTOCOL, event_mapping, hostname)
diff --git a/CEP/Pipeline/mac/ep/echo/__init__.py b/CEP/Pipeline/mac/ep/echo/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d36d33440782c986c66b63fffc76a2063578d386
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/echo/__init__.py
@@ -0,0 +1,17 @@
+# We'll create a customized EventPort interface
+# Note that we require GenericEvent, so ep.interface must be available first
+from ep.interface import EventPort_Interface, EventNotFoundException
+
+# Relevant protocol & event names
+from _ep_echo import PROTOCOL
+from _ep_echo import ECHO, PING, CLOCK
+
+# Events we can handle
+from _ep_echo import EchoPingEvent, EchoEchoEvent
+
+class EchoPort_Interface(EventPort_Interface):
+    def __init__(self):
+        event_mapping = {
+            ECHO: EchoEchoEvent
+        }
+        super(EchoPort_Interface, self).__init__("EchoServer:test", PROTOCOL, event_mapping)
diff --git a/CEP/Pipeline/mac/ep/interface/__init__.py b/CEP/Pipeline/mac/ep/interface/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9218a05c33805dcfa79cc5f6cfd77f3e7d5f0622
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/interface/__init__.py
@@ -0,0 +1,22 @@
+from _ep_interface import *
+
+class EventPortException(Exception):
+    pass
+
+class EventNotFoundException(EventPortException):
+    pass
+
+class EventPort_Interface(EP_Interface):
+
+    def __init__(self, servicemask, protocol, mapping, hostname='localhost'):
+        super(EventPort_Interface, self).__init__(servicemask, protocol, hostname)
+        self.__event_mapping = mapping
+
+    def receive_event(self):
+        gev = super(EventPort_Interface, self).receive_event()
+        try:
+            return self.__event_mapping[gev.signal](gev)
+        except KeyError:
+            raise EventNotFoundException
+
+
diff --git a/CEP/Pipeline/mac/include/controlwrappers.h b/CEP/Pipeline/mac/include/controlwrappers.h
new file mode 100644
index 0000000000000000000000000000000000000000..eb0c7f595d510ea7260f7f377cc621ea1e185cd4
--- /dev/null
+++ b/CEP/Pipeline/mac/include/controlwrappers.h
@@ -0,0 +1,313 @@
+#ifndef EP_CONTROLWRAPPERS_H
+#define EP_CONTROLWRAPPERS_H
+
+/*!
+  \file controlwrappers.h
+  \ingroup pipeline
+*/
+
+#include "Controller_Protocol.ph"
+#include "eventwrappers.h"
+
+typedef LOFAR::TYPES::uint16 uint16;
+typedef LOFAR::TYPES::uint32 uint32;
+
+// === Sendable messages ========================================================
+
+/*!
+  \class CONTROLConnectEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLConnectEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLConnectEvent* my_event;
+public:
+    CONTROLConnectEventWrapper(std::string cntlrName) {
+        this->my_event = new CONTROLConnectEvent;
+        this->my_event->cntlrName = cntlrName;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLConnectEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+  \class CONTROLResyncedEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLResyncedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResyncedEvent* my_event;
+public:
+    CONTROLResyncedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLResyncedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResyncedEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+  \class CONTROLClaimedEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLClaimedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLClaimedEvent* my_event;
+public:
+    CONTROLClaimedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLClaimedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLClaimedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLPreparedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLPreparedEvent* my_event;
+public:
+    CONTROLPreparedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLPreparedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLPreparedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLScheduledEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLScheduledEvent* my_event;
+public:
+    CONTROLScheduledEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLScheduledEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLScheduledEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLResumedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResumedEvent* my_event;
+public:
+    CONTROLResumedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLResumedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResumedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLSuspendedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLSuspendedEvent* my_event;
+public:
+    CONTROLSuspendedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLSuspendedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLSuspendedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLReleasedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLReleasedEvent* my_event;
+public:
+    CONTROLReleasedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLReleasedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLReleasedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLQuitedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLQuitedEvent* my_event;
+public:
+    CONTROLQuitedEventWrapper(std::string cntlrName, uint32 treeID, uint16 result, std::string errorMsg) {
+        this->my_event = new CONTROLQuitedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+        this->my_event->treeID = treeID;
+        this->my_event->errorMsg = errorMsg;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    uint32 get_treeID() { return this->my_event->treeID; }
+    std::string get_errorMsg() { return this->my_event->errorMsg; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLQuitedEvent* get_event_ptr() { return this->my_event; }
+};
+
+// Receivable messages
+
+// First the simple: connected, claim, prepare, resume, suspend, release, quit
+class CONTROLConnectedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLConnectedEvent* my_event;
+public:
+    CONTROLConnectedEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLConnectedEvent(*event_ptr);
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLConnectedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLClaimEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLClaimEvent* my_event;
+public:
+    CONTROLClaimEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLClaimEvent(*event_ptr);
+    }
+    CONTROLClaimEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLClaimEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLPrepareEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLPrepareEvent* my_event;
+public:
+    CONTROLPrepareEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLPrepareEvent(*event_ptr);
+    }
+    CONTROLPrepareEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLPrepareEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLResumeEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResumeEvent* my_event;
+public:
+    CONTROLResumeEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLResumeEvent(*event_ptr);
+    }
+    CONTROLResumeEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResumeEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLSuspendEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLSuspendEvent* my_event;
+public:
+    CONTROLSuspendEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLSuspendEvent(*event_ptr);
+    }
+    CONTROLSuspendEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLSuspendEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLReleaseEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLReleaseEvent* my_event;
+public:
+    CONTROLReleaseEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLReleaseEvent(*event_ptr);
+    }
+    CONTROLReleaseEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLReleaseEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLQuitEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLQuitEvent* my_event;
+public:
+    CONTROLQuitEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLQuitEvent(*event_ptr);
+    }
+    CONTROLQuitEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLQuitEvent* get_event_ptr() { return this->my_event; }
+};
+
+// ...then the more awkward: resync, schedule.
+
+class CONTROLResyncEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResyncEvent* my_event;
+public:
+    CONTROLResyncEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLResyncEvent(*event_ptr);
+    }
+    CONTROLResyncEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_curState() { return this->my_event->curState; }
+    std::string get_hostname() { return this->my_event->hostname; }
+
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResyncEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLScheduleEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLScheduleEvent* my_event;
+public:
+    CONTROLScheduleEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLScheduleEvent(*event_ptr);
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    time_t get_startTime() { return this->my_event->startTime; }
+    time_t get_stopTime() { return this->my_event->stopTime; }
+
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLScheduleEvent* get_event_ptr() { return this->my_event; }
+};
+
+
+#endif
+
diff --git a/CEP/Pipeline/mac/include/echowrappers.h b/CEP/Pipeline/mac/include/echowrappers.h
new file mode 100644
index 0000000000000000000000000000000000000000..3ae0ee3ac68e5a36c5f54ae622940ea5a19134f2
--- /dev/null
+++ b/CEP/Pipeline/mac/include/echowrappers.h
@@ -0,0 +1,46 @@
+#ifndef EP_ECHOWRAPPERS_H
+#define EP_ECHOWRAPPERS_H
+
+/*!
+	\file echowrappers.h
+	\ingroup pipeline
+*/
+
+#include "Echo_Protocol.ph"
+#include "eventwrappers.h"
+
+/*!
+	\class EchoPingEventWrapper
+	\ingroup pipeline
+*/
+class EchoPingEventWrapper : public GenericEventWrapper {
+private:
+    EchoPingEvent* my_event;
+public:
+    EchoPingEventWrapper();
+    double get_pt();
+    void set_pt(double);
+    LOFAR::TYPES::uint16 get_seqnr();
+    void set_seqnr(LOFAR::TYPES::uint16);
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual EchoPingEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+	\class EchoEchoEventWrapper
+	\ingroup pipeline
+*/
+class EchoEchoEventWrapper : public GenericEventWrapper {
+private:
+    EchoEchoEvent* my_event;
+public:
+    EchoEchoEventWrapper(LOFAR::MACIO::GCFEvent*);
+    EchoEchoEventWrapper(GenericEventWrapper&);
+    double get_pt();
+    double get_et();
+    LOFAR::TYPES::uint16 get_seqnr();
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual EchoEchoEvent* get_event_ptr() { return this->my_event; }
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/ep_interface.h b/CEP/Pipeline/mac/include/ep_interface.h
new file mode 100644
index 0000000000000000000000000000000000000000..29e38c922ac40963c7a4fb2ebb4bc68ed9de6215
--- /dev/null
+++ b/CEP/Pipeline/mac/include/ep_interface.h
@@ -0,0 +1,42 @@
+#ifndef EP_INTERFACE_H
+#define EP_INTERFACE_H
+
+/*!
+  \file ep_interface.h
+  \ingroup pipeline
+*/
+
+#include <boost/python.hpp>
+#include <lofar_config.h>
+#include <Common/LofarLogger.h>
+#include <MACIO/EventPort.h>
+#include <string>
+
+#include "eventwrappers.h"
+
+/*!
+  \class EP_Interface
+  \ingroup pipeline
+  \brief Event Port Interface
+*/
+class EP_Interface {
+private:
+    LOFAR::MACIO::EventPort* my_EventPort;
+public:
+    EP_Interface(std::string servicename, short protocol_id, std::string host = "") {
+        this->my_EventPort = new LOFAR::MACIO::EventPort(servicename, false, protocol_id, host, true);
+    }
+    GenericEventWrapper* receive_event() {
+        LOFAR::MACIO::GCFEvent* ackPtr;
+        Py_BEGIN_ALLOW_THREADS
+        ackPtr = my_EventPort->receive();
+        Py_END_ALLOW_THREADS
+        return new GenericEventWrapper(ackPtr);
+    }
+    void send_event(GenericEventWrapper* wrapped_event) {
+        this->my_EventPort->send(wrapped_event->get_event_ptr());
+        }
+    ~EP_Interface() { delete this->my_EventPort; }
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/eventwrappers.h b/CEP/Pipeline/mac/include/eventwrappers.h
new file mode 100644
index 0000000000000000000000000000000000000000..6ce4913d198d604061100d8ea7cdedb3e3ae44fc
--- /dev/null
+++ b/CEP/Pipeline/mac/include/eventwrappers.h
@@ -0,0 +1,32 @@
+#ifndef EP_EVENTWRAPPERS_H
+#define EP_EVENTWRAPPERS_H
+
+/*!
+	\file eventwrappers.h
+	\ingroup pipeline
+*/
+
+#include <lofar_config.h>
+#include <Common/LofarLogger.h>
+#include <sys/time.h>
+
+/*!
+	\class GenericEventWrapper
+	\ingroup pipeline
+	\brief Interface definition class for a generic event
+*/
+class GenericEventWrapper {
+private:
+    LOFAR::MACIO::GCFEvent* my_event;
+public:
+    GenericEventWrapper() {
+        this->my_event = new LOFAR::MACIO::GCFEvent;
+    }
+    GenericEventWrapper(LOFAR::MACIO::GCFEvent* event_ptr) {
+        this->my_event = event_ptr;
+    }
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual LOFAR::MACIO::GCFEvent* get_event_ptr() { return my_event;}
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/lofar_config.h b/CEP/Pipeline/mac/include/lofar_config.h
new file mode 100644
index 0000000000000000000000000000000000000000..d1d5a8a5a8618c059ed7d0fcfae03dbe0376216e
--- /dev/null
+++ b/CEP/Pipeline/mac/include/lofar_config.h
@@ -0,0 +1,167 @@
+/* $Id$ */
+
+/*!
+	\file lofar_config.h
+	\ingroup pipeline
+*/
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) header files          |
+\*-------------------------------------------------------------------------*/
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <net/ethernet.h> header file. */
+#define HAVE_NET_ETHERNET_H 1
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+#define HAVE_NETINET_IN_H 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#define HAVE_SYS_RESOURCE_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/timepps.h> header file. */
+/* #undef HAVE_SYS_TIMEPPS_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) types                 |
+\*-------------------------------------------------------------------------*/
+
+/* Define if `long long' is supported */
+#define HAVE_LONG_LONG 1
+
+/* Define if `uint' is supported */
+#define HAVE_UINT 1
+
+/* Define if `ulong' is supported */
+#define HAVE_ULONG 1
+
+/* Define if `ushort' is supported */
+#define HAVE_USHORT 1
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+   significant byte first (like Motorola and SPARC, unlike Intel). */
+/* #undef WORDS_BIGENDIAN */
+
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) libraries             |
+\*-------------------------------------------------------------------------*/
+
+/* Define if AIPS++ is installed */
+#define HAVE_AIPSPP
+
+/* Define if libbfd is available */
+#define HAVE_BFD 1
+
+/* Define if BG/L MPICH is installed */
+/* #undef HAVE_BGLMPICH */
+
+/* Define if Blitz is installed */
+/* #undef HAVE_BLITZ */
+
+/* Define if BOOST is installed */
+#define HAVE_BOOST 1
+
+/* Define if BOOST component regex is installed */
+#define HAVE_BOOST_REGEX 1
+
+/* Define if BOOST component date_time is installed */
+#define HAVE_BOOST_DATE_TIME 1
+
+/* Define if FFTW2 is installed */
+/* #undef HAVE_FFTW2 */
+
+/* Define if FFTW3 is installed */
+/* #undef HAVE_FFTW3 */
+
+/* Define if LAM is installed */
+/* #undef HAVE_LAM */
+
+/* Define if LOG4CPLUS is installed */
+#define HAVE_LOG4CPLUS 1
+
+/* Define if LOG4CXX is installed */
+/* #undef HAVE_LOG4CXX */
+
+/* Define if we have an MPI implementation installed */
+/* #undef HAVE_MPI */
+
+/* Define if MASS is installed */ 
+/* #undef HAVE_MASS */
+
+/* Define if MPICH is installed */
+/* #undef HAVE_MPICH */
+
+/* Define if libpqxx is installed */
+#define HAVE_PQXX
+
+/* Define if PVSS is installed */
+/* #undef HAVE_PVSS */
+
+/* Define if using Rational Purify */
+/* #undef HAVE_PURIFY */
+
+/* Define if readline is installed */
+#define HAVE_READLINE 1
+
+/* Define if ScaMPI is installed */
+/* #undef HAVE_SCAMPI */
+
+/* Defined if shared memory is used */
+#define HAVE_SHMEM 1
+
+
+/*-------------------------------------------------------------------------*\
+|  Defines for the presence or absence of (system) functions                |
+\*-------------------------------------------------------------------------*/
+
+/* Define to __PRETTY_FUNCTION__, __FUNCTION__, or "<unknown>" */
+#define AUTO_FUNCTION_NAME __PRETTY_FUNCTION__
+
+/* Define to 1 if you have the `backtrace' function. */
+#define HAVE_BACKTRACE 1
+
+/* Define to 1 if you have the `cplus_demangle' function. */
+/* #undef HAVE_CPLUS_DEMANGLE */
+
+/* Define to 1 if you have a declaration for the `basename' function. */
+/* #undef HAVE_DECL_BASENAME */
diff --git a/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py b/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef749b66383a0ac619e78e80f31e73b2dc62355f
--- /dev/null
+++ b/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+
+import sys, os
+import datetime
+import subprocess
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofar.parameterset import parameterset
+from lofarpipe.support.utilities import create_directory, string_to_list
+
+# USER EDITABLE PARAMETERS ----------------------------------------------------
+
+# NB: task file is defined in configuration file, not here.
+pipeline_definition = '/please/provide/path/to/sip.py'
+config_file         = '/please/provide/path/to/pipeline.cfg'
+
+# Set up environment for pipeline run
+pipeline_environment = {
+    "PYTHONPATH": "/opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages",
+    "LD_LIBRARY_PATH": "/opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib:/opt/LofIm/daily/casarest/lib:/data/sys/opt/lofar/external/log4cplus/lib", 
+    "PATH": "/opt/pipeline/dependencies/bin:/home/swinbank/sw/bin:/opt/pipeline/dependencies/bin:/usr/local/bin:/usr/bin:/usr/X11R6/bin:/bin:/usr/games:/opt/LofIm/daily/casarest/bin:/opt/LofIm/daily/casarest/bin",
+}
+
+# -----------------------------------------------------------------------------
+
+# To ensure consistency in the configuration between this wrapper and the
+# pipeline, we will set the start time here.
+start_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+# We should always be called with standard command line arguments:
+# tree ID, parset, ... others?
+input_parset = parameterset(sys.argv[1])
+tree_id      = sys.argv[2] # check this!
+
+# Extract runtime, working, results directories from input parset
+runtime_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.runtimeDirectory")
+working_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.workingDirectory")
+results_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.resultDirectory")
+
+# Set up configuration for later processing stages
+config = ConfigParser({
+    "job_name": tree_id,
+    "cwd": os.getcwd(),
+    "start_time": start_time,
+})
+config.read(config_file)
+config.set('DEFAULT', 'runtime_directory', runtime_directory)
+config.set('DEFAULT', 'default_working_directory', working_directory)
+
+# Extract input file list from parset
+to_process = input_parset.getStringVector('ObsSW.Observation.DataProducts.measurementSets')
+
+# Read config file to establish location of parset directory to use
+parset_directory = config.get("layout", "parset_directory")
+create_directory(parset_directory)
+
+# For each task (currently only ndppp), extract and write parset
+tasks = ConfigParser(config.defaults())
+tasks.read(string_to_list(config.get("DEFAULT", "task_files")))
+ndppp_parset_location = tasks.get("ndppp", "parset")
+input_parset.makeSubset("ObsSW.Observation.ObservationControl.PythonControl.DPPP.").writeFile(ndppp_parset_location)
+
+# Run pipeline & wait for result
+subprocess.check_call(['python', pipeline_definition, '-j', tree_id, '-d', '--config', config_file, '--runtime-directory', runtime_directory, '--default-working-directory', working_directory, '--start-time', start_time])
diff --git a/CEP/Pipeline/mac/src/ep_control.cc b/CEP/Pipeline/mac/src/ep_control.cc
new file mode 100644
index 0000000000000000000000000000000000000000..9d94cbdec1a9e434898857161ab2bf9194a0d9b8
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_control.cc
@@ -0,0 +1,131 @@
+#include "Controller_Protocol.ph"
+#include "controlwrappers.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_control)
+{
+    using namespace boost::python;
+    
+    // Module level attributes in Python
+    
+    // Protocol ID
+    scope().attr("PROTOCOL")          = (short) CONTROLLER_PROTOCOL;
+
+    // Possible errors
+    scope().attr("OK")                = (short) CONTROL_OK_ERR;
+    scope().attr("LOST_CONN")         = (short) CONTROL_LOST_CONN_ERR;
+
+    // Possible signals
+    scope().attr("CONTROL_STARTED")   = CONTROL_STARTED;
+    scope().attr("CONTROL_CONNECT")   = CONTROL_CONNECT;
+    scope().attr("CONTROL_CONNECTED") = CONTROL_CONNECTED;
+    scope().attr("CONTROL_RESYNC")    = CONTROL_RESYNC;
+    scope().attr("CONTROL_RESYNCED")  = CONTROL_RESYNCED;
+    scope().attr("CONTROL_SCHEDULE")  = CONTROL_SCHEDULE;
+    scope().attr("CONTROL_SCHEDULED") = CONTROL_SCHEDULED;
+    scope().attr("CONTROL_CLAIM")     = CONTROL_CLAIM;
+    scope().attr("CONTROL_CLAIMED")   = CONTROL_CLAIMED;
+    scope().attr("CONTROL_PREPARE")   = CONTROL_PREPARE;
+    scope().attr("CONTROL_PREPARED")  = CONTROL_PREPARED;
+    scope().attr("CONTROL_RESUME")    = CONTROL_RESUME;
+    scope().attr("CONTROL_RESUMED")   = CONTROL_RESUMED;
+    scope().attr("CONTROL_SUSPEND")   = CONTROL_SUSPEND;
+    scope().attr("CONTROL_SUSPENDED") = CONTROL_SUSPENDED;
+    scope().attr("CONTROL_RELEASE")   = CONTROL_RELEASE;
+    scope().attr("CONTROL_RELEASED")  = CONTROL_RELEASED;
+    scope().attr("CONTROL_QUIT")      = CONTROL_QUIT;
+    scope().attr("CONTROL_QUITED")    = CONTROL_QUITED;
+    scope().attr("CONTROL_COMMON")    = CONTROL_COMMON;
+
+    // Events
+
+    // These events may be sent
+    class_<CONTROLConnectEventWrapper, bases<GenericEventWrapper> >("ControlConnectEvent", init<std::string>())
+        .add_property("controller_name", &CONTROLConnectedEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLScheduledEventWrapper, bases<GenericEventWrapper> >("ControlScheduledEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLScheduledEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLScheduledEventWrapper::get_result)
+    ;
+
+    class_<CONTROLResyncedEventWrapper, bases<GenericEventWrapper> >("ControlResyncedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLResyncedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLResyncedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLClaimedEventWrapper, bases<GenericEventWrapper> >("ControlClaimedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLClaimedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLClaimedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLPreparedEventWrapper, bases<GenericEventWrapper> >("ControlPreparedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLPreparedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLPreparedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLResumedEventWrapper, bases<GenericEventWrapper> >("ControlResumedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLResumedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLResumedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLSuspendedEventWrapper, bases<GenericEventWrapper> >("ControlSuspendedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLSuspendedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLSuspendedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLReleasedEventWrapper, bases<GenericEventWrapper> >("ControlReleasedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLReleasedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLReleasedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLQuitedEventWrapper, bases<GenericEventWrapper> >("ControlQuitedEvent", init<std::string, uint32, uint16, std::string>())
+        .add_property("controller_name", &CONTROLQuitedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLQuitedEventWrapper::get_result)
+        .add_property("treeID", &CONTROLQuitedEventWrapper::get_treeID)
+        .add_property("error_message", &CONTROLQuitedEventWrapper::get_errorMsg)
+    ;
+
+    // These events may be received
+    class_<CONTROLConnectedEventWrapper, bases<GenericEventWrapper> >("ControlConnectedEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLConnectedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLConnectedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLClaimEventWrapper, bases<GenericEventWrapper> >("ControlClaimEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLClaimEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLPrepareEventWrapper, bases<GenericEventWrapper> >("ControlPrepareEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLPrepareEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLResumeEventWrapper, bases<GenericEventWrapper> >("ControlResumeEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLResumeEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLSuspendEventWrapper, bases<GenericEventWrapper> >("ControlSuspendEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLSuspendEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLReleaseEventWrapper, bases<GenericEventWrapper> >("ControlReleaseEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLReleaseEventWrapper::get_cntlrName)
+    ;
+        
+    class_<CONTROLQuitEventWrapper, bases<GenericEventWrapper> >("ControlQuitEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLQuitEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLResyncEventWrapper, bases<GenericEventWrapper> >("ControlResyncEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLResyncEventWrapper::get_cntlrName)
+        .add_property("current_state", &CONTROLResyncEventWrapper::get_curState)
+        .add_property("hostname", &CONTROLResyncEventWrapper::get_hostname)
+    ;
+
+    class_<CONTROLScheduleEventWrapper, bases<GenericEventWrapper> >("ControlScheduleEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLScheduleEventWrapper::get_cntlrName)
+        .add_property("start_time", &CONTROLScheduleEventWrapper::get_startTime)
+        .add_property("stop_time", &CONTROLScheduleEventWrapper::get_stopTime)
+    ;
+
+}
diff --git a/CEP/Pipeline/mac/src/ep_echo.cc b/CEP/Pipeline/mac/src/ep_echo.cc
new file mode 100644
index 0000000000000000000000000000000000000000..bfc8488378191a73cfdc5a9dd9650f27a827ec5f
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_echo.cc
@@ -0,0 +1,24 @@
+#include "Echo_Protocol.ph"
+#include "echowrappers.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_echo) {
+    using namespace boost::python;
+
+    // These are exposed as module-level attributes in Python
+    scope().attr("PROTOCOL") = (short) ECHO_PROTOCOL;
+    scope().attr("PING") = ECHO_PING;
+    scope().attr("ECHO") = ECHO_ECHO;
+    scope().attr("CLOCK") = ECHO_CLOCK;
+
+    class_<EchoPingEventWrapper, boost::noncopyable, bases<GenericEventWrapper> >("EchoPingEvent")
+        .add_property("ping_time", &EchoPingEventWrapper::get_pt, &EchoPingEventWrapper::set_pt)
+        .add_property("seqnr", &EchoPingEventWrapper::get_seqnr, &EchoPingEventWrapper::set_seqnr)
+    ;
+
+    class_<EchoEchoEventWrapper, bases<GenericEventWrapper> >("EchoEchoEvent", init<GenericEventWrapper&>())
+        .add_property("ping_time", &EchoEchoEventWrapper::get_pt)
+        .add_property("echo_time", &EchoEchoEventWrapper::get_et)
+        .add_property("seqnr", &EchoEchoEventWrapper::get_seqnr)
+    ;
+}
diff --git a/CEP/Pipeline/mac/src/ep_interface.cc b/CEP/Pipeline/mac/src/ep_interface.cc
new file mode 100644
index 0000000000000000000000000000000000000000..36e2bfac877cd73a432d760930a7501c72012350
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_interface.cc
@@ -0,0 +1,17 @@
+#include "ep_interface.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_interface) {
+    using namespace boost::python;
+
+    // We export GenericEventWrapper here, but it is required by all the
+    // protocols. Hence, ep.interface must always be imported first.
+    class_<GenericEventWrapper>("GenericEvent")
+        .add_property("signal", &GenericEventWrapper::get_signal)
+    ;
+
+    class_<EP_Interface>("EP_Interface", "EP_Interface(ServiceMask, Protocol, Host=localhost)", init<std::string, short, optional<std::string> >())
+        .def("receive_event", &EP_Interface::receive_event, return_value_policy<manage_new_object>())
+        .def("send_event", &EP_Interface::send_event)
+    ;
+}
diff --git a/CEP/Pipeline/mac/src/eventwrappers.cc b/CEP/Pipeline/mac/src/eventwrappers.cc
new file mode 100644
index 0000000000000000000000000000000000000000..5542310e029780a1d9b2152fb4d5f2c9e352f715
--- /dev/null
+++ b/CEP/Pipeline/mac/src/eventwrappers.cc
@@ -0,0 +1,53 @@
+#include "echowrappers.h"
+
+//
+// Specific event types
+//
+
+// Ping event
+EchoPingEventWrapper::EchoPingEventWrapper() {
+    this->my_event = new EchoPingEvent;
+    timeval ping_time;
+    gettimeofday(&ping_time, 0);
+    this->my_event->ping_time = ping_time;
+}
+
+double EchoPingEventWrapper::get_pt() {
+    return 1.0 * this->my_event->ping_time.tv_sec + (this->my_event->ping_time.tv_usec / 1000000.0); 
+}
+
+void EchoPingEventWrapper::set_pt(double my_time) {
+    this->my_event->ping_time.tv_sec = (int) my_time;
+    this->my_event->ping_time.tv_usec = (int) (1000000.0 * (my_time - (int) my_time));
+}
+
+LOFAR::TYPES::uint16 EchoPingEventWrapper::get_seqnr() {
+    return this->my_event->seqnr;
+}
+
+void EchoPingEventWrapper::set_seqnr(LOFAR::TYPES::uint16 my_seqnr) {
+    this->my_event->seqnr = my_seqnr;
+}
+
+// Echo event
+EchoEchoEventWrapper::EchoEchoEventWrapper(LOFAR::MACIO::GCFEvent* my_event) {
+    this->my_event = new EchoEchoEvent(*my_event);
+}
+
+EchoEchoEventWrapper::EchoEchoEventWrapper(GenericEventWrapper& my_gev) {
+    LOFAR::MACIO::GCFEvent* event_ptr;
+    event_ptr = my_gev.get_event_ptr();
+    this->my_event = new EchoEchoEvent(*event_ptr);
+}
+
+double EchoEchoEventWrapper::get_pt() {
+    return 1.0 * this->my_event->ping_time.tv_sec + (this->my_event->ping_time.tv_usec / 1000000.0); 
+}
+
+double EchoEchoEventWrapper::get_et() {
+    return 1.0 * this->my_event->echo_time.tv_sec + (this->my_event->echo_time.tv_usec / 1000000.0); 
+}
+
+LOFAR::TYPES::uint16 EchoEchoEventWrapper::get_seqnr() {
+    return this->my_event->seqnr;
+}
diff --git a/CEP/Pipeline/mac/test_ep.py b/CEP/Pipeline/mac/test_ep.py
new file mode 100644
index 0000000000000000000000000000000000000000..e8397d4caa41e93cc61084479eab7a11c91e2196
--- /dev/null
+++ b/CEP/Pipeline/mac/test_ep.py
@@ -0,0 +1,97 @@
+import unittest
+import ep.echo
+import ep.control
+import time
+
+class TestPingEvent(unittest.TestCase):
+    def setUp(self):
+        self.epe = ep.echo.EchoPingEvent()
+
+    def test_init_time(self):
+        self.assertTrue(self.epe.ping_time < time.time())
+
+    def test_change_time(self):
+        now = time.time()
+        self.epe.ping_time = now
+        self.assertAlmostEqual(self.epe.ping_time, now, 5)
+
+    def test_changq_seqnr(self):
+        self.epe.seqnr = 10
+        self.assertEqual(self.epe.seqnr, 10)
+
+class TestReceiveEcho(unittest.TestCase):
+    def setUp(self):
+        interface = ep.echo.EchoPort_Interface()
+        self.epe = ep.echo.EchoPingEvent()
+        interface.send_event(self.epe)
+        self.eee = interface.receive_event()
+
+    def test_ping_time(self):
+        self.assertEqual(self.epe.ping_time, self.eee.ping_time)
+
+    def test_seqnr(self):
+        self.assertEqual(self.epe.seqnr, self.eee.seqnr)
+
+    def test_long_ping(self):
+        self.assertTrue(self.eee.echo_time > self.eee.ping_time)
+
+class TestControllerSendables(unittest.TestCase):
+    def test_control_started(self):
+        event = ep.control.ControlStartedEvent("controller name", True)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertTrue(event.successful)
+        self.assertEqual(event.signal, ep.control.CONTROL_STARTED)
+
+    def test_control_connected(self):
+        event = ep.control.ControlConnectedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_CONNECTED)
+
+    def test_control_resynced(self):
+        event = ep.control.ControlResyncedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RESYNCED)
+
+    def test_control_claimed(self):
+        event = ep.control.ControlClaimedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_CLAIMED)
+
+    def test_control_prepared(self):
+        event = ep.control.ControlPreparedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_PREPARED)
+
+    def test_control_resumed(self):
+        event = ep.control.ControlResumedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RESUMED)
+
+    def test_control_suspended(self):
+        event = ep.control.ControlSuspendedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_SUSPENDED)
+
+    def test_control_released(self):
+        event = ep.control.ControlReleasedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RELEASED)
+
+    def test_control_quited(self):
+        event = ep.control.ControlQuitedEvent("controller name", 1, ep.control.OK, "no error")
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.treeID, 1)
+        self.assertEqual(event.error_message, "no error")
+        self.assertEqual(event.signal, ep.control.CONTROL_QUITED)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/CEP/Pipeline/recipes/examples/master/example.py b/CEP/Pipeline/recipes/examples/master/example.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd9e2777d8e2cdb0a025d070d24e405275454b7e
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/master/example.py
@@ -0,0 +1,60 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                 Example recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+import subprocess
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.lofaringredient import ExecField, StringField
+from lofarpipe.support.pipelinelogging import log_process_output
+
+class example(BaseRecipe):
+    inputs = {
+        'executable': ExecField(
+            '--executable',
+            help="Command to run",
+            default="/bin/ls"
+        )
+    }
+
+    outputs = {
+        'stdout': StringField()
+    }
+
+    def go(self):
+        self.logger.info("Starting example recipe run")
+        super(example, self).go()
+
+        self.logger.info("This is a log message")
+
+        my_process = subprocess.Popen(
+            [
+                self.inputs['executable']
+            ],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+        )
+        sout, serr = my_process.communicate()
+        self.outputs['stdout'] = sout
+        log_process_output(
+            self.inputs['executable'],
+            sout,
+            serr,
+            self.logger
+        )
+
+        if my_process.returncode == 0:
+            return 0
+        else:
+            self.logger.warn(
+                "Return code (%d) is not 0." % my_process.returncode
+            )
+            return 1
+
+
+if __name__ == '__main__':
+    sys.exit(example().main())
diff --git a/CEP/Pipeline/recipes/examples/master/example_parallel.py b/CEP/Pipeline/recipes/examples/master/example_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb2131cb4fd21d80b70015941984b05bb0c14095
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/master/example_parallel.py
@@ -0,0 +1,25 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                    Example recipe with simple job distribution
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class example_parallel(BaseRecipe, RemoteCommandRecipeMixIn):
+    def go(self):
+        super(example_parallel, self).go()
+        node_command = "python %s" % (self.__file__.replace("master", "nodes"))
+        job = ComputeJob("localhost", node_command, arguments=["example_argument"])
+        self._schedule_jobs([job])
+        if self.error.isSet():
+            return 1
+        else:
+            return 0
+
+if __name__ == "__main__":
+    sys.exit(example_parallel().main())
diff --git a/CEP/Pipeline/recipes/examples/nodes/example_parallel.py b/CEP/Pipeline/recipes/examples/nodes/example_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..85239c3b2c2d2dfe1b26fae38dc8087f16ed5c5a
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/nodes/example_parallel.py
@@ -0,0 +1,20 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                Example of a simple node script
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+from lofarpipe.support.lofarnode import LOFARnode
+
+class example_parallel(LOFARnodeTCP):
+    def run(self, *args):
+        for arg in args:
+            self.logger.info("Received %s as argument" % str(arg))
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(example_parallel(jobid, jobhost, jobport).run_with_stored_arguments())
+
diff --git a/CEP/Pipeline/recipes/sip/master/bbs.py b/CEP/Pipeline/recipes/sip/master/bbs.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ee952631bfd1fe6e125c800f81fcc96e4001b87
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/bbs.py
@@ -0,0 +1,415 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                BBS (BlackBoard Selfcal) recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import closing
+import psycopg2, psycopg2.extensions
+import subprocess
+import sys
+import os
+import threading
+import tempfile
+import shutil
+import time
+import signal
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.group_data import gvds_iterator
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.remotecommand import run_remote_command
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.jobserver import job_server
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+class bbs(BaseRecipe):
+    """
+    The bbs recipe coordinates running BBS on a group of MeasurementSets. It
+    runs both GlobalControl and KernelControl; as yet, SolverControl has not
+    been integrated.
+
+    The recipe will also run the sourcedb and parmdb recipes on each of the
+    input MeasuementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'control_exec': ingredient.ExecField(
+            '--control-exec',
+            dest="control_exec",
+            help="BBS Control executable"
+        ),
+        'kernel_exec': ingredient.ExecField(
+            '--kernel-exec',
+            dest="kernel_exec",
+            help="BBS Kernel executable"
+        ),
+        'initscript': ingredient.FileField(
+            '--initscript',
+            dest="initscript",
+            help="Initscript to source (ie, lofarinit.sh)"
+        ),
+        'parset': ingredient.FileField(
+            '-p', '--parset',
+            dest="parset",
+            help="BBS configuration parset"
+        ),
+        'key': ingredient.StringField(
+            '--key',
+            dest="key",
+            help="Key to identify BBS session"
+        ),
+        'db_host': ingredient.StringField(
+            '--db-host',
+            dest="db_host",
+            help="Database host with optional port"
+        ),
+        'db_user': ingredient.StringField(
+            '--db-user',
+            dest="db_user",
+            help="Database user"
+        ),
+        'db_name': ingredient.StringField(
+            '--db-name',
+            dest="db_name",
+            help="Database name"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--combinevds',
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        ),
+        'makesourcedb': ingredient.ExecField(
+            '--makesourcedb',
+            help="makesourcedb executable",
+            default="/opt/LofIm/daily/lofar/bin/makesourcedb"
+        ),
+        'parmdbm': ingredient.ExecField(
+            '--parmdbm',
+            help="parmdbm executable",
+            default="/opt/LofIm/daily/lofar/bin/parmdbm"
+        ),
+        'skymodel': ingredient.FileField(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="Input sky catalogue"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting BBS run")
+        super(bbs, self).go()
+
+        #             Generate source and parameter databases for all input data
+        # ----------------------------------------------------------------------
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['executable'] = self.inputs['parmdbm']
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('parmdb', inputs, outputs):
+            self.logger.warn("parmdb reports failure")
+            return 1
+        inputs['args'] = self.inputs['args']
+        inputs['executable'] = self.inputs['makesourcedb']
+        inputs['skymodel'] = self.inputs['skymodel']
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('sourcedb', inputs, outputs):
+            self.logger.warn("sourcedb reports failure")
+            return 1
+
+        #              Build a GVDS file describing all the data to be processed
+        # ----------------------------------------------------------------------
+        self.logger.debug("Building VDS file describing all data for BBS")
+        vds_file = os.path.join(
+            self.config.get("layout", "job_directory"),
+            "vds",
+            "bbs.gvds"
+        )
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['gvds'] = vds_file
+        inputs['unlink'] = False
+        inputs['makevds'] = self.inputs['makevds']
+        inputs['combinevds'] = self.inputs['combinevds']
+        inputs['nproc'] = self.inputs['nproc']
+        inputs['directory'] = os.path.dirname(vds_file)
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('new_vdsmaker', inputs, outputs):
+            self.logger.warn("new_vdsmaker reports failure")
+            return 1
+        self.logger.debug("BBS GVDS is %s" % (vds_file,))
+
+
+        #      Iterate over groups of subbands divided up for convenient cluster
+        #          procesing -- ie, no more than nproc subbands per compute node
+        # ----------------------------------------------------------------------
+        for to_process in gvds_iterator(vds_file, int(self.inputs["nproc"])):
+            #               to_process is a list of (host, filename, vds) tuples
+            # ------------------------------------------------------------------
+            hosts, ms_names, vds_files = map(list, zip(*to_process))
+
+            #             The BBS session database should be cleared for our key
+            # ------------------------------------------------------------------
+            self.logger.debug(
+                "Cleaning BBS database for key %s" % (self.inputs["key"])
+            )
+            with closing(
+                psycopg2.connect(
+                    host=self.inputs["db_host"],
+                    user=self.inputs["db_user"],
+                    database=self.inputs["db_name"]
+                )
+            ) as db_connection:
+                db_connection.set_isolation_level(
+                    psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
+                )
+                with closing(db_connection.cursor()) as db_cursor:
+                    db_cursor.execute(
+                        "DELETE FROM blackboard.session WHERE key=%s",
+                        (self.inputs["key"],)
+                    )
+
+            #     BBS GlobalControl requires a GVDS file describing all the data
+            #          to be processed. We assemble that from the separate parts
+            #                                         already available on disk.
+            # ------------------------------------------------------------------
+            self.logger.debug("Building VDS file describing data for BBS run")
+            vds_dir = tempfile.mkdtemp()
+            vds_file = os.path.join(vds_dir, "bbs.gvds")
+            combineproc = utilities.spawn_process(
+                [
+                    self.inputs['combinevds'],
+                    vds_file,
+                ] + vds_files,
+                self.logger
+            )
+            sout, serr = combineproc.communicate()
+            log_process_output(self.inputs['combinevds'], sout, serr, self.logger)
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(
+                    combineproc.returncode, command
+                )
+
+            #      Construct a parset for BBS GlobalControl by patching the GVDS
+            #           file and database information into the supplied template
+            # ------------------------------------------------------------------
+            self.logger.debug("Building parset for BBS control")
+            bbs_parset = utilities.patch_parset(
+                self.inputs['parset'],
+                {
+                    'Observation': vds_file,
+                    'BBDB.Key': self.inputs['key'],
+                    'BBDB.Name': self.inputs['db_name'],
+                    'BBDB.User': self.inputs['db_user'],
+                    'BBDB.Host': self.inputs['db_host'],
+    #                'BBDB.Port': self.inputs['db_name'],
+                }
+            )
+            self.logger.debug("BBS control parset is %s" % (bbs_parset,))
+
+            try:
+                #        When one of our processes fails, we set the killswitch.
+                #      Everything else will then come crashing down, rather than
+                #                                         hanging about forever.
+                # --------------------------------------------------------------
+                self.killswitch = threading.Event()
+                self.killswitch.clear()
+                signal.signal(signal.SIGTERM, self.killswitch.set)
+
+                #                           GlobalControl runs in its own thread
+                # --------------------------------------------------------------
+                run_flag = threading.Event()
+                run_flag.clear()
+                bbs_control = threading.Thread(
+                    target=self._run_bbs_control,
+                    args=(bbs_parset, run_flag)
+                )
+                bbs_control.start()
+                run_flag.wait()    # Wait for control to start before proceeding
+
+                #      We run BBS KernelControl on each compute node by directly
+                #                             invoking the node script using SSH
+                #      Note that we use a job_server to send out job details and
+                #           collect logging information, so we define a bunch of
+                #    ComputeJobs. However, we need more control than the generic
+                #     ComputeJob.dispatch method supplies, so we'll control them
+                #                                          with our own threads.
+                # --------------------------------------------------------------
+                command = "python %s" % (self.__file__.replace('master', 'nodes'))
+                env = {
+                    "LOFARROOT": utilities.read_initscript(self.logger, self.inputs['initscript'])["LOFARROOT"],
+                    "PYTHONPATH": self.config.get('deploy', 'engine_ppath'),
+                    "LD_LIBRARY_PATH": self.config.get('deploy', 'engine_lpath')
+                }
+                jobpool = {}
+                bbs_kernels = []
+                with job_server(self.logger, jobpool, self.error) as (jobhost, jobport):
+                    self.logger.debug("Job server at %s:%d" % (jobhost, jobport))
+                    for job_id, details in enumerate(to_process):
+                        host, file, vds = details
+                        jobpool[job_id] = ComputeJob(
+                            host, command,
+                            arguments=[
+                                self.inputs['kernel_exec'],
+                                self.inputs['initscript'],
+                                file,
+                                self.inputs['key'],
+                                self.inputs['db_name'],
+                                self.inputs['db_user'],
+                                self.inputs['db_host']
+                            ]
+                        )
+                        bbs_kernels.append(
+                            threading.Thread(
+                                target=self._run_bbs_kernel,
+                                args=(host, command, env, job_id,
+                                    jobhost, str(jobport)
+                                )
+                            )
+                        )
+                    self.logger.info("Starting %d threads" % len(bbs_kernels))
+                    [thread.start() for thread in bbs_kernels]
+                    self.logger.debug("Waiting for all kernels to complete")
+                    [thread.join() for thread in bbs_kernels]
+
+
+                #         When GlobalControl finishes, our work here is done
+                # ----------------------------------------------------------
+                self.logger.info("Waiting for GlobalControl thread")
+                bbs_control.join()
+            finally:
+                os.unlink(bbs_parset)
+                shutil.rmtree(vds_dir)
+                if self.killswitch.isSet():
+                    #  If killswitch is set, then one of our processes failed so
+                    #                                   the whole run is invalid
+                    # ----------------------------------------------------------
+                    return 1
+
+        return 0
+
+    def _run_bbs_kernel(self, host, command, env, *arguments):
+        """
+        Run command with arguments on the specified host using ssh. Return its
+        return code.
+
+        The resultant process is monitored for failure; see
+        _monitor_process() for details.
+        """
+        try:
+            bbs_kernel_process = run_remote_command(
+                self.config,
+                self.logger,
+                host,
+                command,
+                env,
+                arguments=arguments
+            )
+        except Exception, e:
+            self.logger.exception("BBS Kernel failed to start")
+            self.killswitch.set()
+            return 1
+        result = self._monitor_process(bbs_kernel_process, "BBS Kernel on %s" % host)
+        sout, serr = bbs_kernel_process.communicate()
+        serr = serr.replace("Connection to %s closed.\r\n" % host, "")
+        log_process_output("SSH session (BBS kernel)", sout, serr, self.logger)
+        return result
+
+    def _run_bbs_control(self, bbs_parset, run_flag):
+        """
+        Run BBS Global Control and wait for it to finish. Return its return
+        code.
+        """
+        env = utilities.read_initscript(self.logger, self.inputs['initscript'])
+        self.logger.info("Running BBS GlobalControl")
+        working_dir = tempfile.mkdtemp()
+        with CatchLog4CPlus(
+            working_dir,
+            self.logger.name + ".GlobalControl",
+            os.path.basename(self.inputs['control_exec'])
+        ):
+            with utilities.log_time(self.logger):
+                try:
+                    bbs_control_process = utilities.spawn_process(
+                        [
+                            self.inputs['control_exec'],
+                            bbs_parset,
+                            "0"
+                        ],
+                        self.logger,
+                        cwd=working_dir,
+                        env=env
+                    )
+                    # _monitor_process() needs a convenient kill() method.
+                    bbs_control_process.kill = lambda : os.kill(bbs_control_process.pid, signal.SIGKILL)
+                except OSError, e:
+                    self.logger.error("Failed to spawn BBS Control (%s)" % str(e))
+                    self.killswitch.set()
+                    return 1
+                finally:
+                    run_flag.set()
+
+            returncode = self._monitor_process(
+                bbs_control_process, "BBS Control"
+            )
+            sout, serr = bbs_control_process.communicate()
+        shutil.rmtree(working_dir)
+        log_process_output(
+            self.inputs['control_exec'], sout, serr, self.logger
+        )
+        return returncode
+
+    def _monitor_process(self, process, name="Monitored process"):
+        """
+        Monitor a process for successful exit. If it fails, set the kill
+        switch, so everything else gets killed too. If the kill switch is set,
+        then kill this process off.
+
+        Name is an optional parameter used only for identification in logs.
+        """
+        while True:
+            try:
+                returncode = process.poll()
+                if returncode == None:                   # Process still running
+                    time.sleep(1)
+                elif returncode != 0:                           # Process broke!
+                    self.logger.warn(
+                        "%s returned code %d; aborting run" % (name, returncode)
+                    )
+                    self.killswitch.set()
+                    break
+                else:                                   # Process exited cleanly
+                    self.logger.info("%s clean shutdown" % (name))
+                    break
+                if self.killswitch.isSet():        # Other process failed; abort
+                    self.logger.warn("Killing %s" % (name))
+                    process.kill()
+                    returncode = process.wait()
+                    break
+            except:
+                # An exception here is likely a ctrl-c or similar. Whatever it
+                # is, we bail out.
+                self.killswitch.set()
+        return returncode
+
+if __name__ == '__main__':
+    sys.exit(bbs().main())
diff --git a/CEP/Pipeline/recipes/sip/master/cimager.py b/CEP/Pipeline/recipes/sip/master/cimager.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a04f9aecd05a56d6d9b6405ecbf53b6aee5f2b4
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/cimager.py
@@ -0,0 +1,310 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  cimager (ASKAP imager) recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import contextmanager
+
+import os
+import sys
+import time
+import threading
+import collections
+import subprocess
+import tempfile
+import signal
+
+from pyrap.quanta import quantity
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.pipelinelogging import log_time, log_process_output
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.remotecommand import ProcessLimiter
+from lofarpipe.support.remotecommand import run_remote_command
+from lofarpipe.support.remotecommand import threadwatcher
+from lofarpipe.support.parset import Parset
+from lofarpipe.support.parset import get_parset
+from lofarpipe.support.parset import patched_parset, patch_parset
+from lofarpipe.support.utilities import spawn_process
+from lofarpipe.support.lofarexceptions import PipelineException
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+
+class ParsetTypeField(ingredient.StringField):
+    """
+    Input field which accepts the string values either "cimager" or
+    "mwimager". Enables specification of type of parset supplied to the
+    cimager recipe.
+    """
+    def is_valid(self, value):
+        if value == "cimager" or value == "mwimager":
+            return True
+        else:
+            return False
+
+
+class cimager(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Provides a convenient, pipeline-based mechanism of running the cimager on
+    a dataset.
+
+    Can ingest either an MWimager-style parset, converting to cimager format
+    as required, or a cimager parset directly.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'imager_exec': ingredient.ExecField(
+            '--imager-exec',
+            help="cimager executable"
+        ),
+        'convert_exec': ingredient.ExecField(
+            '--convert-exec',
+            help="convertimagerparset executable"
+        ),
+        'parset': ingredient.FileField(
+            '--parset',
+            help="Imager configuration parset (mwimager or cimager format)"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        ),
+        'timestep': ingredient.FloatField(
+            '--timestep',
+            help="If non-zero, multiple images will be made, each using timestep seconds of data",
+            default=0.0
+        ),
+        'results_dir': ingredient.DirectoryField(
+            '--results-dir',
+            help="Directory in which resulting images will be placed",
+        ),
+        'parset_type': ParsetTypeField(
+            '--parset-type',
+            default="mwimager",
+            help="cimager or mwimager"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--comebinevds',
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        )
+    }
+
+    outputs = {
+        'images': ingredient.ListField()
+    }
+
+    def go(self):
+        self.logger.info("Starting cimager run")
+        super(cimager, self).go()
+        self.outputs['images' ] = []
+
+        #              Build a GVDS file describing all the data to be processed
+        # ----------------------------------------------------------------------
+        self.logger.debug("Building VDS file describing all data for cimager")
+        gvds_file = os.path.join(
+            self.config.get("layout", "job_directory"),
+            "vds",
+            "cimager.gvds"
+        )
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['gvds'] = gvds_file
+        inputs['unlink'] = False
+        inputs['makevds'] = self.inputs['makevds']
+        inputs['combinevds'] = self.inputs['combinevds']
+        inputs['nproc'] = self.inputs['nproc']
+        inputs['directory'] = os.path.dirname(gvds_file)
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('new_vdsmaker', inputs, outputs):
+            self.logger.warn("new_vdsmaker reports failure")
+            return 1
+        self.logger.debug("cimager GVDS is %s" % (gvds_file,))
+
+        #                            Read data for processing from the GVDS file
+        # ----------------------------------------------------------------------
+        parset = Parset(gvds_file)
+
+        data = []
+        for part in range(parset.getInt('NParts')):
+            host = parset.getString("Part%d.FileSys" % part).split(":")[0]
+            vds  = parset.getString("Part%d.Name" % part)
+            data.append((host, vds))
+
+        #                                 Divide data into timesteps for imaging
+        #          timesteps is a list of (start, end, results directory) tuples
+        # ----------------------------------------------------------------------
+        timesteps = []
+        results_dir = self.inputs['results_dir']
+        if self.inputs['timestep'] == 0:
+            self.logger.info("No timestep specified; imaging all data")
+            timesteps = [(None, None, results_dir)]
+        else:
+            self.logger.info("Using timestep of %s s" % self.inputs['timestep'])
+            gvds = get_parset(gvds_file)
+            start_time = quantity(gvds['StartTime'].get()).get('s').get_value()
+            end_time = quantity(gvds['EndTime'].get()).get('s').get_value()
+            step = float(self.inputs['timestep'])
+            while start_time < end_time:
+                timesteps.append(
+                    (
+                        start_time, start_time+step,
+                        os.path.join(results_dir, str(start_time))
+                    )
+                )
+                start_time += step
+
+        #                          Run each cimager process in a separate thread
+        # ----------------------------------------------------------------------
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        for label, timestep in enumerate(timesteps):
+            self.logger.info("Processing timestep %d" % label)
+            jobs = []
+            parsets = []
+            start_time, end_time, resultsdir = timestep
+            for host, vds in data:
+                vds_data = Parset(vds)
+                frequency_range = [
+                    vds_data.getDoubleVector("StartFreqs")[0],
+                    vds_data.getDoubleVector("EndFreqs")[-1]
+                ]
+                parsets.append(
+                    self.__get_parset(
+                        os.path.basename(vds_data.getString('FileName')).split('.')[0],
+                        vds_data.getString("FileName"),
+                        str(frequency_range),
+                        vds_data.getString("Extra.FieldDirectionType"),
+                        vds_data.getStringVector("Extra.FieldDirectionRa")[0],
+                        vds_data.getStringVector("Extra.FieldDirectionDec")[0],
+                        'True', # cimager bug: non-restored image unusable
+                    )
+                )
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            self.inputs['imager_exec'],
+                            vds,
+                            parsets[-1],
+                            resultsdir,
+                            start_time,
+                            end_time
+                        ]
+                    )
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+            for parset in parsets:
+                parset = Parset(parset)
+                image_names = parset.getStringVector("Cimager.Images.Names")
+                self.outputs['images'].extend(image_names)
+            [os.unlink(parset) for parset in parsets]
+
+        #                Check if we recorded a failing process before returning
+        # ----------------------------------------------------------------------
+        if self.error.isSet():
+            self.logger.warn("Failed imager process detected")
+            return 1
+        else:
+            return 0
+
+    def __get_parset(
+        self, name, dataset, frequency, ms_dir_type,
+        ms_dir_ra, ms_dir_dec, restore
+    ):
+        def convert_mwimager_parset(parset):
+            try:
+                with patched_parset(
+                    parset,
+                    {
+                        'dataset': dataset,
+                        'Images.frequency': frequency,
+                        'msDirType': ms_dir_type,
+                        'msDirRa': ms_dir_ra,
+                        'msDirDec': ms_dir_dec,
+                        'restore': restore # cimager bug: non-restored image unusable
+                    }
+                ) as cimager_parset:
+                    fd, converted_parset = tempfile.mkstemp(
+                        dir=self.config.get("layout", "job_directory")
+                    )
+                    convert_process = spawn_process(
+                        [
+                            self.inputs['convert_exec'],
+                            cimager_parset,
+                            converted_parset
+                        ],
+                        self.logger
+                    )
+                    os.close(fd)
+                    sout, serr = convert_process.communicate()
+                    log_process_output(self.inputs['convert_exec'], sout, serr, self.logger)
+                    if convert_process.returncode != 0:
+                        raise subprocess.CalledProcessError(
+                            convert_process.returncode, convert_exec
+                        )
+                    return converted_parset
+            except OSError, e:
+                self.logger.error("Failed to spawn convertimagerparset (%s)" % str(e))
+                raise
+            except subprocess.CalledProcessError, e:
+                self.logger.error(str(e))
+                raise
+
+        def populate_cimager_parset(parset):
+            input_parset = Parset(parset)
+            patch_dictionary = {
+                'Cimager.dataset': dataset,
+                'Cimager.restore': restore
+            }
+            image_names = []
+            for image_name in input_parset.getStringVector('Cimager.Images.Names'):
+                image_names.append("%s_%s" % (image_name, name))
+                subset = input_parset.makeSubset(
+                    "Cimager.Images.%s" % image_name,
+                    "Cimager.Images.%s" % image_names[-1]
+                )
+                patch_dictionary[
+                    "Cimager.Images.%s.frequency" % image_names[-1]
+                ] = frequency
+                patch_dictionary[
+                    "Cimager.Images.%s.direction" % image_names[-1]
+                ] = "[ %s,%s,%s ]" % (ms_dir_ra, ms_dir_dec, ms_dir_type)
+                for key in subset:
+                    patch_dictionary[key] = subset[key].get()
+            input_parset.subtractSubset('Cimager.Images.image')
+            for key in input_parset:
+                patch_dictionary[key] = input_parset[key].get()
+            patch_dictionary['Cimager.Images.Names'] = "[ %s ]" % ", ".join(image_names)
+            return patch_parset(
+                None, patch_dictionary,
+                self.config.get("layout", "job_directory")
+            )
+
+        try:
+            if self.inputs['parset_type'] == "mwimager":
+                cimager_parset = convert_mwimager_parset(self.inputs['parset'])
+            elif self.inputs['parset_type'] == "cimager":
+                cimager_parset = populate_cimager_parset(self.inputs['parset'])
+        except Exception, e:
+            self.logger.exception("Failed to generate imager parset")
+            raise
+
+        return cimager_parset
+
+if __name__ == '__main__':
+    sys.exit(cimager().main())
diff --git a/CEP/Pipeline/recipes/sip/master/count_timesteps.py b/CEP/Pipeline/recipes/sip/master/count_timesteps.py
new file mode 100644
index 0000000000000000000000000000000000000000..04311bb409e5421dc17ccdfde9794211f4030c63
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/count_timesteps.py
@@ -0,0 +1,62 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                             Return total length of observation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class count_timesteps(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Accept a list of baselines (in the format used by NDPPP logging).
+
+    Flag them in all MeasurementSets.
+    """
+    inputs = {
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+    outputs = {
+        'start_time': ingredient.FloatField(),
+        'end_time': ingredient.FloatField()
+    }
+
+    def go(self):
+        self.logger.info("Starting count_timesteps run")
+        super(count_timesteps, self).go()
+
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command, arguments=[ms]
+                )
+            )
+        jobs = self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        self.outputs['start_time'] = min(job.results['start_time'] for job in jobs.itervalues())
+        self.outputs['end_time'] = max(job.results['end_time'] for job in jobs.itervalues())
+
+        if self.error.isSet():
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(count_timesteps().main())
diff --git a/CEP/Pipeline/recipes/sip/master/datamapper.py b/CEP/Pipeline/recipes/sip/master/datamapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f4a636dfd685f19914dbf2376195ec13a6d3269
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/datamapper.py
@@ -0,0 +1,81 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                 Map subbands on storage nodes to compute nodes
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os.path
+from itertools import cycle
+from collections import defaultdict
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+from lofarpipe.support.parset import Parset
+import lofarpipe.support.lofaringredient as ingredient
+
+class datamapper(BaseRecipe):
+    """
+    Parses a list of filenames and attempts to map them to appropriate compute
+    nodes (ie, which can access the files) on the LOFAR CEP cluster. Mapping
+    by filename in this way is fragile, but is the best we can do for now.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Full path (including filename) of mapfile to produce (clobbered if exists)"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="Full path (including filename) of generated mapfile"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting datamapper run")
+        super(datamapper, self).go()
+
+        #      We build lists of compute-nodes per cluster and data-per-cluster,
+        #          then match them up to schedule jobs in a round-robin fashion.
+        # ----------------------------------------------------------------------
+        clusterdesc = ClusterDesc(self.config.get('cluster', "clusterdesc"))
+        if clusterdesc.subclusters:
+            available_nodes = dict(
+                (cl.name, cycle(get_compute_nodes(cl)))
+                for cl in clusterdesc.subclusters
+            )
+        else:
+            available_nodes = {
+                clusterdesc.name: cycle(get_compute_nodes(clusterdesc))
+            }
+
+        data = defaultdict(list)
+        for filename in self.inputs['args']:
+            subcluster = filename.split(os.path.sep)[2]
+            try:
+                host = available_nodes[subcluster].next()
+            except KeyError, key:
+                self.logger.error("%s is not a known cluster" % str(key))
+                raise
+
+            data[host].append(filename)
+
+        #                                 Dump the generated mapping to a parset
+        # ----------------------------------------------------------------------
+        parset = Parset()
+        for host, filenames in data.iteritems():
+            parset.addStringVector(host, filenames)
+
+        parset.writeFile(self.inputs['mapfile'])
+        self.outputs['mapfile'] = self.inputs['mapfile']
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(datamapper().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py b/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..36332ca328d61bd9b6e3ece6ee19f0a159a6280f
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py
@@ -0,0 +1,186 @@
+from __future__ import with_statement
+import sys, os
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.group_data import group_files
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.clusterdesc import ClusterDesc
+
+def run_casapy(infile, parset, start_time, end_time, increment):
+    # Run on engine to process data with Casapy
+    from lofarrecipe.nodes.casapy import casapy_node
+    return casapy_node(loghost=loghost, logport=logport).run(
+        infile,
+        parset,
+        start_time,
+        end_time,
+        increment
+    )
+
+class casapy(LOFARrecipe):
+    def __init__(self):
+        super(casapy, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="CASApy executable"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="Parset containing configuration for CASAPY"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '-t', '--increment',
+            dest="increment",
+            help="Length of each image in seconds"
+        )
+        self.optionparser.add_option(
+            '-g', '--g(v)ds-file',
+            dest="gvds",
+            help="G(V)DS file describing data to be processed"
+        )
+        self.optionparser.add_option(
+            '--makevds-exec',
+            dest="makevds_exec",
+            help="makevds executable"
+        )
+        self.optionparser.add_option(
+            '--combinevds-exec',
+            dest="combinevds_exec",
+            help="combinevds executable"
+        )
+        self.optionparser.add_option(
+            '--max-bands-per-node',
+            dest="max_bands_per_node",
+            help="Maximum number of subbands to farm out to a given cluster node",
+            default="8"
+        )
+
+    def go(self):
+        self.logger.info("Starting CASApy run")
+        super(casapy, self).go()
+
+        job_directory = self.config.get("layout", "job_directory")
+
+        # Connect to the IPython cluster and initialise it with
+        # the funtions we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_casapy=run_casapy,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Use build_available_list() to determine which SBs are available
+        # on each engine; we use this for dependency resolution later.
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=self.inputs['args']))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+
+        for data_group in group_files(
+            self.logger,
+            clusterdesc,
+            os.path.join(self.inputs['working_directory'], self.inputs['job_name']),
+            int(self.inputs['max_bands_per_node']),
+            self.inputs['args']
+        ):
+            self.logger.debug("Processing: " + str(data_group))
+            self.logger.info("Calling vdsmaker")
+            inputs = LOFARinput(self.inputs)
+            inputs['directory'] = self.config.get('layout', 'vds_directory')
+            inputs['gvds'] = self.inputs['gvds']
+            inputs['args'] = data_group
+            inputs['makevds'] = self.inputs['makevds_exec']
+            inputs['combinevds'] = self.inputs['combinevds_exec']
+            outputs = LOFARoutput()
+            if self.cook_recipe('vdsmaker', inputs, outputs):
+                self.logger.warn("vdsmaker reports failure")
+                return 1
+
+
+            gvds = utilities.get_parset(
+                os.path.join(
+                    self.config.get('layout', 'vds_directory'), self.inputs['gvds']
+                )
+            )
+            start_time = gvds['StartTime']
+            end_time = gvds['EndTime']
+            self.inputs['increment'] = int(self.inputs['increment'])
+
+            # clusterlogger context manager accepts networked logging
+            # from compute nodes.
+            with clusterlogger(self.logger) as (loghost, logport):
+                # Timer for total casapy job execution
+                with utilities.log_time(self.logger):
+                    self.logger.debug("Logging to %s:%d" % (loghost, logport))
+                    tasks = []
+                    # Iterate over SB names, building and scheduling a casapy job
+                    # for each one.
+                    for ms_name in data_group:
+                        task = LOFARTask(
+                            "result = run_casapy(infile, parset, start_time, end_time, increment)",
+                            push=dict(
+                                infile=ms_name,
+                                parset=self.inputs['parset'],
+                                start_time=start_time,
+                                end_time=end_time,
+                                increment=self.inputs['increment'],
+                                loghost=loghost,
+                                logport=logport
+                            ),
+                            pull="result",
+                            depend=utilities.check_for_path,
+                            dependargs=(ms_name, available_list)
+                        )
+                        self.logger.info("Scheduling processing of %s" % (ms_name,))
+                        if self.inputs['dry_run'] == "False":
+                            self.inputs['dry_run'] = False
+                        if not self.inputs['dry_run']:
+                            tasks.append(tc.run(task))
+                        else:
+                            self.logger.info("Dry run: scheduling skipped")
+
+                    # Wait for all jobs to finish
+                    self.logger.info("Waiting for all CASApy tasks to complete")
+                    tc.barrier(tasks)
+
+            failure = False
+            for task in tasks:
+                ##### Print failing tasks?
+                ##### Abort if all tasks failed?
+                res = tc.get_task_result(task)
+                if res.failure:
+                    self.logger.warn("Task %s failed" % (task))
+                    self.logger.warn(res)
+                    self.logger.warn(res.failure.getTraceback())
+                    failure = True
+
+        if failure:
+            return 1
+#            self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(casapy().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/collector.py b/CEP/Pipeline/recipes/sip/master/deprecated/collector.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d87ba4845cc05c6d8d337ca594292520dadaf9a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/collector.py
@@ -0,0 +1,140 @@
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+import lofarpipe.support.utilities as utilities
+import pyrap.images
+from skim.main import run as create_hdf5
+import os, os.path, glob, subprocess, sys, numpy
+import shutil, errno, re, logging, imp
+
+class collector(LOFARrecipe):
+    """
+    Collect images into results directory.
+    Convert to fits files.
+    Average.
+
+    Outstanding issue: breaks if the results directory is already
+    populated and the --clobber option isn't set.
+    """
+
+    def __init__(self):
+        super(collector, self).__init__()
+        self.optionparser.add_option(
+            '--image-re',
+            dest="image_re",
+            help="Regular expression to match CASA image names",
+        )
+        self.optionparser.add_option(
+            '--working-directory',
+            dest="working_directory",
+            help="Working directory containing images on compute nodes",
+        )
+        self.optionparser.add_option(
+            '--image2fits',
+            dest="image2fits",
+            help="Location of image2fits tool (from casacore)"
+        )
+        self.optionparser.add_option(
+            '--averaged-name',
+            dest="averaged_name",
+            help="Base filename for averaged images"
+        )
+
+    def go(self):
+        self.logger.info("Starting data collector run")
+        super(collector, self).go()
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+        results_dir = self.config.get('layout', 'results_directory')
+        try:
+            os.makedirs(results_dir)
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        self.logger.debug("Copying CASA images to to %s"  % (results_dir))
+        for node in get_compute_nodes(clusterdesc):
+            self.logger.debug("Node: %s" % (node))
+            try:
+                exec_string = [
+                            "ssh",
+                            node,
+                            "--",
+                            "cp",
+                            "-r",
+                            "%s/%s/%s" % (
+                                self.inputs['working_directory'],
+                                self.inputs['job_name'],
+                                self.inputs['image_re']
+                            ),
+                            results_dir
+                    ]
+                self.logger.info(exec_string)
+                subprocess.check_call(exec_string, close_fds=True)
+            except subprocess.CalledProcessError:
+                self.logger.warn("No images moved from %s" % (node))
+        
+        image_names = glob.glob("%s/%s" % (results_dir, self.inputs['image_re']))
+        if len(image_names) > 0:
+            self.logger.info("Averaging results")
+            result = reduce(
+                numpy.add,
+                (pyrap.images.image(file).getdata() for file in image_names)
+            ) / len(image_names)
+
+            self.logger.info("Writing averaged files")
+            averaged_file = os.path.join(
+                        self.config.get('layout', 'results_directory'),
+                        self.inputs['averaged_name']
+            )
+            # Output for the averaged image.
+            # Use the coordinate system from SB0.
+            output = pyrap.images.image(
+                averaged_file + ".img", values=result,
+                coordsys=pyrap.images.image(image_names[0]).coordinates()
+            )
+            self.logger.info("Wrote: %s" % (averaged_file + ".img",))
+            output.tofits(averaged_file + ".fits")
+            self.logger.info("Wrote: %s" % (averaged_file + ".fits",))
+            self.outputs['data'] = (averaged_file + ".fits",)
+        else:
+            self.logger.info("No images found; not averaging")
+            self.outputs['data'] = None
+
+        self.logger.info("Generating FITS files")
+        fits_files = []
+        for filename in image_names:
+            self.logger.debug(filename)
+            subband = re.search('(SB\d+)', os.path.basename(filename)).group()
+            output = os.path.join(
+                self.config.get('layout', 'results_directory'),
+                "%s.fits" % (subband)
+            )
+            fits_files.append(output)
+            subprocess.check_call(
+                [
+                    self.inputs['image2fits'],
+                    'in=%s' % (filename),
+                    'out=%s' % (output)
+                ],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                close_fds=True
+            )
+
+        self.logger.info("Creating HDF5 file")
+        hdf5logger = logging.getLogger(self.logger.name + ".hdf5")
+        hdf5logger.setLevel(logging.INFO)
+        create_hdf5(
+            self.config.get('layout', 'job_directory'),
+            self.inputs['start_time'],
+            hdf5logger
+        )
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(collector().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py b/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf4bf423b56f1fe255964a9769d867bc18074b4c
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py
@@ -0,0 +1,67 @@
+from __future__ import with_statement
+import sys, os, tempfile
+
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+
+def make_columns(file):
+    from lofarrecipe.nodes.colmaker import makecolumns_node
+    return makecolumns_node(loghost=loghost, logport=logport).run(file)
+
+class colmaker(LOFARrecipe):
+    """
+    Add imaging columns to inputs using pyrap.
+    """
+    def go(self):
+        super(colmaker, self).go()
+
+        ms_names = self.inputs['args']
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                make_columns=make_columns,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Build VDS files for each of the newly created MeasurementSets
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "colmaker")
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+        tasks = []
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            for ms_name in ms_names:
+                task = LOFARTask(
+                    "result = make_columns(ms_name)",
+                    push=dict(
+                        ms_name=ms_name,
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all colmaker tasks to complete")
+            tc.barrier(tasks)
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                print res.failure
+
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+
+if __name__ == '__main__':
+    sys.exit(colmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/copier.py b/CEP/Pipeline/recipes/sip/master/deprecated/copier.py
new file mode 100644
index 0000000000000000000000000000000000000000..47434b3d24299d347c3e0623a700ac779733645d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/copier.py
@@ -0,0 +1,50 @@
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+import os.path
+
+class copier(LOFARrecipe):
+    """
+    Copy files to compute nodes.
+    """
+    def __init__(self):
+        super(copier, self).__init__()
+        self.optionparser.add_option(
+            '--destination',
+            dest="destination",
+            help="Destination directory on compute nodes"
+        )
+
+    def go(self):
+        self.logger.info("Starting copier run")
+        super(copier, self).go()
+
+        tc, mec = self._get_cluster()
+
+        mec.execute('import shutil')
+
+        self.logger.info("Compiling list of output destinations")
+        destinations = [
+            os.path.join(
+                self.inputs['destination'],
+                os.path.basename(file)
+            )
+            for file in self.inputs['args']
+        ]
+        self.logger.debug(destinations)
+
+        self.logger.info("Copying files on cluster")
+        try:
+            tc.map(
+                lambda x: shutil.copytree(x[0], x[1]),
+                zip(self.inputs['args'], destinations)
+            )
+        except Exception, e:
+            self.logger.exception('Failed to copy files on cluster')
+            return 1
+
+        self.outputs['ms_names'] = destinations
+
+        return 0
+
+
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py b/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py
new file mode 100644
index 0000000000000000000000000000000000000000..71795699c6748670c743efe5d6e532caf7c73edc
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py
@@ -0,0 +1,151 @@
+from __future__ import with_statement
+import sys, os
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class dppp(LOFARrecipe):
+    def __init__(self):
+        super(dppp, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="DPPP executable"
+        )
+        self.optionparser.add_option(
+            '--initscript',
+            dest="initscript",
+            help="DPPP initscript"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="Parset containing configuration for DPPP"
+        )
+        self.optionparser.add_option(
+            '--suffix',
+            dest="suffix",
+            default=".dppp",
+            help="Suffix to add to trimmed data (default: overwrite existing)"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--data-start-time',
+            help="Start time to be passed to DPPP (optional)",
+        )
+        self.optionparser.add_option(
+            '--data-end-time',
+            help="End time to be passed to DPPP (optional)",
+        )
+        self.optionparser.add_option(
+            '--nthreads',
+            help="Number of threads per (N)DPPP process",
+            default="2"
+        )
+
+
+    def go(self):
+        self.logger.info("Starting DPPP run")
+        super(dppp, self).go()
+
+        job_directory = self.config.get("layout", "job_directory")
+        ms_names = self.inputs['args']
+
+        # Connect to the IPython cluster and initialise it with
+        # the funtions we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_dppp=run_node,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.debug("Pushed functions to cluster")
+
+        # Use build_available_list() to determine which SBs are available
+        # on each engine; we use this for dependency resolution later.
+        self.logger.debug("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        self.logger.debug("Data lists available. Starting processing loop.")
+
+        # clusterlogger context manager accepts networked logging
+        # from compute nodes.
+        with clusterlogger(self.logger) as (loghost, logport):
+            # Timer for total DPPP job execution
+            with utilities.log_time(self.logger):
+                self.logger.debug("Logging to %s:%d" % (loghost, logport))
+                tasks = []
+                outnames = []
+                # Iterate over SB names, building and scheduling a DPPP job
+                # for each one.
+                for ms_name in ms_names:
+                    outnames.append(
+                        os.path.join(
+                            self.inputs['working_directory'],
+                            self.inputs['job_name'],
+                            os.path.basename(ms_name) + self.inputs['suffix']
+                        )
+                    )
+                    task = LOFARTask(
+                        "result = run_dppp(ms_name, ms_outname, parset, executable, initscript, start_time, end_time, nthreads)",
+                        push=dict(
+                            recipename=self.name,
+                            nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                            ms_name=ms_name,
+                            ms_outname=outnames[-1],
+                            parset=self.inputs['parset'],
+                            executable=self.inputs['executable'],
+                            initscript=self.inputs['initscript'],
+                            start_time=self.inputs['data_start_time'],
+                            end_time=self.inputs['data_end_time'],
+                            end_time=self.inputs['nthreads'],
+                            loghost=loghost,
+                            logport=logport
+                        ),
+                        pull="result",
+                        depend=utilities.check_for_path,
+                        dependargs=(ms_name, available_list)
+                    )
+                    self.logger.info("Scheduling processing of %s" % (ms_name,))
+                    if self.inputs['dry_run'] == "False":
+                        self.inputs['dry_run'] = False
+                    if not self.inputs['dry_run']:
+                        tasks.append((tc.run(task), ms_name))
+                    else:
+                        self.logger.info("Dry run: scheduling skipped")
+
+                # Wait for all jobs to finish
+                self.logger.debug("Waiting for all DPPP tasks to complete")
+                tc.barrier([task for task, subband in tasks])
+
+        failure = False
+        for task, subband in tasks:
+            ##### Print failing tasks?
+            ##### Abort if all tasks failed?
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed (processing %s)" % (task, subband))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+        self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(dppp().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py b/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..efab83835737bdb885355683247885354bac5ac8
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py
@@ -0,0 +1,63 @@
+from __future__ import with_statement
+import sys, os
+
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class dummy_echo_parallel(LOFARrecipe):
+    def __init__(self):
+        super(dummy_echo_parallel, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="Executable to be run (ie, dummy_echo.sh)",
+            default="/home/swinbank/sw/bin/dummy_echo.sh"
+        )
+
+    def go(self):
+        self.logger.info("Starting dummy_echo run")
+        super(dummy_echo_parallel, self).go()
+
+        # Connect to the IPython cluster and initialise it with the functions
+        # we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_dummy_echo=run_node,
+            )
+        )
+        self.logger.info("Cluster initialised")
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = [] # this will be a list of scheduled jobs
+            for filename in self.inputs['args']:
+                task = LOFARTask(
+                    "result = run_dummy_echo(filename, executable)",
+                    push = dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        filename=filename,
+                        executable=self.inputs['executable'],
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result"
+                )
+                self.logger.info("Scheduling processing of %s" % (filename))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all dummy_echo tasks to complete")
+            tc.barrier(tasks)
+
+        for task in tasks:
+            result = tc.get_task_result(task)
+            if result.failure:
+                self.logger.warn(result)
+                self.logger.warn(result.failure.getTraceback())
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(dummy_echo_parallel().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py b/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c023ea6c117773a905780ff48c7002942146a43
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py
@@ -0,0 +1,18 @@
+import sys
+from lofarpipe.support.pyraprunner import pyraprunner
+from lofarpipe.support.utilities import string_to_list
+
+class excluder(pyraprunner):
+    def __init__(self):
+        super(excluder, self).__init__()
+        self.optionparser.add_option(
+            '--station',
+            dest="station",
+            help="Name of stations to exclude (e.g. DE001LBA)"
+        )
+
+    def _generate_arguments(self):
+        return "\"%s\"" % ('\", \"'.join(string_to_list(self.inputs['station'])))
+
+if __name__ == '__main__':
+    sys.exit(excluder().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py b/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a303aead787752d566d5cc79fe19062ddec66f6
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py
@@ -0,0 +1,68 @@
+from __future__ import with_statement
+import sys, os
+from lofarpipe.support.pyraprunner import pyraprunner
+
+# Quick n dirty tool to read ASCII sourcelists used as input by BBS.
+# The format for these doesn't seem all that well specified: see the
+# makesourcedb tool, which vaguely refers to a format string which might have
+# spaces, commas, ...
+# We'll do our best.
+
+class Source(dict):
+    pass
+
+class SourceList(list):
+    def __init__(self, filename):
+        # Default format if we can't read one from the file
+        format = (
+            "Name", "Type", "Ra", "Dec", "I", "Q", "U", "V",
+            "ReferenceFrequency='60e6'", "SpectralIndexDegree='0'",
+            "SpectralIndex:0='0.0'", "Major", "Minor", "Phi"
+        )
+        with open(filename, 'r') as file:
+            try:
+                # Maybe the first line is a comma-separated format string...
+                first_line = file.readline().strip().split()
+                if first_line.split()[-1] == "format":
+                    format = map(str.strip, first_line[3:-10].split(","))
+                else:
+                    raise
+            except:
+                # ...or maybe not.
+                file.seek(0)
+            for line in file:
+                if len(line.strip()) == 0 or line.strip()[0] == '#': continue
+                data = map(str.strip, line.split(','))
+                self.append(Source(zip(format, data)))
+
+class flagger(pyraprunner):
+    def __init__(self):
+        super(flagger, self).__init__()
+        self.optionparser.add_option(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="initial sky model (in makesourcedb format)"
+        )
+        self.optionparser.add_option(
+            '--n-factor',
+            dest="n_factor",
+            type="float",
+            help="Custom factor for flagging threshold"
+        )
+
+    def _generate_arguments(self):
+        self.inputs['skymodel'] = os.path.join(
+            self.config.get("layout", "parset_directory"),
+            self.inputs['skymodel']
+        )
+        self.logger.info("Using %s for %s skymodel" %
+            (self.inputs['skymodel'], "flagger")
+        )
+        if not os.access(self.inputs['skymodel'], os.R_OK):
+            raise IOError
+
+        sl = SourceList(self.inputs['skymodel'])
+        return float(self.inputs['n_factor']) * sum(float(s['I']) for s in sl)
+
+if __name__ == '__main__':
+    sys.exit(flagger().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py b/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec28e97b68b37daaba1183ec02518a2e23523234
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py
@@ -0,0 +1,214 @@
+from __future__ import with_statement
+import sys, os, tempfile, glob, subprocess, itertools
+from contextlib import closing
+from lofarpipe.support.clusterdesc import ClusterDesc
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+from lofarpipe.support.group_data import group_files
+import lofarpipe.support.utilities as utilities
+
+class mwimager(LOFARrecipe):
+    def __init__(self):
+        super(mwimager, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="Executable to be run (ie, mwimager script)"
+        )
+        self.optionparser.add_option(
+            '--initscript',
+            dest="initscript",
+            help="Initscript to source (ie, lofarinit.sh)"
+        )
+        self.optionparser.add_option(
+            '-g', '--g(v)ds-file',
+            dest="gvds",
+            help="G(V)DS file describing data to be processed"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="MWImager configuration parset"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--log',
+            dest="log",
+            help="Log file"
+        )
+        self.optionparser.add_option(
+            '--askapsoft-path',
+            dest="askapsoft_path",
+            help="Path to cimager.sh"
+        )
+        self.optionparser.add_option(
+            '--casa',
+            dest="casa",
+            help="Use the CASA lwimager",
+            action="store_true"
+        )
+        self.optionparser.add_option(
+            '--makevds-exec',
+            dest="makevds_exec",
+            help="makevds executable"
+        )
+        self.optionparser.add_option(
+            '--combinevds-exec',
+            dest="combinevds_exec",
+            help="combinevds executable"
+        )
+        self.optionparser.add_option(
+            '--max-bands-per-node',
+            dest="max_bands_per_node",
+            help="Maximum number of subbands to farm out to a given cluster node",
+            default="8"
+        )
+
+    def go(self):
+        self.logger.info("Starting MWImager run")
+        super(mwimager, self).go()
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+
+        self.outputs["data"] = []
+
+        # Given a limited number of processes per node, the first task is to
+        # partition up the data for processing.
+        for iteration, data_group in enumerate(group_files(
+            self.logger,
+            clusterdesc,
+            os.path.join(self.inputs['working_directory'], self.inputs['job_name']),
+            int(self.inputs['max_bands_per_node']),
+            self.inputs['args']
+        )):
+            self.logger.info("Calling vdsmaker")
+            vds_file = os.path.join(
+                self.config.get("layout", "vds_directory"), self.inputs['gvds']
+            )
+            self.run_task('vdsmaker', data_group, gvds=vds_file, unlink=False)
+
+            # Patch GVDS filename into parset
+            self.logger.debug("Setting up MWImager configuration")
+            temp_parset_filename = utilities.patch_parset(
+                self.inputs['parset'],
+                {
+                    'dataset': os.path.join(
+                        self.config.get('layout', 'vds_directory'), self.inputs['gvds']
+                    )
+                },
+                self.config.get('layout', 'parset_directory')
+            )
+
+            # Individual subband logs go in a temporary directory
+            # to be sorted out later.
+            log_root = os.path.join(tempfile.mkdtemp(), self.inputs['log'])
+            self.logger.debug("Logs dumped with root %s" % (log_root))
+
+            # Initscript for basic LOFAR utilities
+            env = utilities.read_initscript(self.inputs['initscript'])
+            # Also add the path for cimager.sh
+            env['PATH'] = "%s:%s" % (self.inputs['askapsoft_path'], env['PATH'])
+
+            # For the overall MWimgager log
+            log_location = "%s/%s" % (
+                self.config.get('layout', 'log_directory'),
+                self.inputs['log']
+            )
+            self.logger.debug("Logging to %s" % (log_location))
+
+            mwimager_cmd = [
+                self.inputs['executable'],
+                temp_parset_filename,
+                self.config.get('cluster', 'clusterdesc'),
+                os.path.join(
+                    self.inputs['working_directory'],
+                    self.inputs['job_name']
+                ),
+                log_root
+            ]
+            if self.inputs['casa'] is True or self.inputs['casa'] == "True":
+                mwimager_cmd.insert(1, '-casa')
+            try:
+                self.logger.info("Running MWImager")
+                self.logger.debug("Executing: %s" % " ".join(mwimager_cmd))
+                if not self.inputs['dry_run']:
+                    with utilities.log_time(self.logger):
+                        with closing(open(log_location + '-' + str(iteration), 'w')) as log:
+                            result = subprocess.check_call(
+                                mwimager_cmd,
+                                env=env,
+                                stdout=log,
+                                stderr=log,
+                                close_fds=True
+                            )
+                else:
+                    self.logger.info("Dry run: execution skipped")
+                    result = 0
+            except subprocess.CalledProcessError:
+                self.logger.exception("Call to mwimager failed")
+                result = 1
+            finally:
+                os.unlink(temp_parset_filename)
+
+            # Now parse the log files to:
+            # 1: find the name of the images that have been written
+            # 2: save the logs in appropriate places
+            # This is ugly!
+            self.logger.info("Parsing logfiles")
+            for log_file in glob.glob("%s%s" % (log_root, "*")):
+                self.logger.debug("Processing %s" % (log_file))
+                ms_name, image_name = "", ""
+                with closing(open(log_file)) as file:
+                    for line in file.xreadlines():
+                        if 'Cimager.Images.Names' in line.strip():
+                            try:
+                                image_name = line.strip().split("=")[1].lstrip("['").rstrip("]'")
+                                break
+                            except IndexError:
+                                pass
+                    file.seek(0)
+                    for line in file.xreadlines():
+                        split_line = line.split('=')
+                        if split_line[0] == "Cimager.dataset":
+                            ms_name = os.path.basename(split_line[1].rstrip())
+                            break
+                if not image_name:
+                    self.logger.info("Couldn't identify image for %s "% (log_file))
+                else:
+                    self.logger.debug("Found image: %s" % (image_name))
+                    self.outputs["data"].append(image_name)
+                if not ms_name:
+                    self.logger.info("Couldn't identify file for %s" % (log_file))
+                else:
+                    destination = "%s/%s/%s" % (
+                        self.config.get('layout', 'log_directory'),
+                        ms_name,
+                        self.inputs['log']
+                    )
+                    self.logger.debug(
+                        "Moving logfile %s to %s" % (log_file, destination)
+                    )
+                    utilities.move_log(log_file, destination)
+            try:
+                self.logger.debug("Removing temporary log directory")
+                os.rmdir(os.path.dirname(log_root))
+            except OSError, failure:
+                self.logger.info("Failed to remove temporary directory")
+                self.logger.debug(failure)
+                try:
+                    utilities.move_log(os.path.dirname(log_root), log_location)
+                except:
+                    pass
+
+        return result
+
+if __name__ == '__main__':
+    sys.exit(mwimager().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py b/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py
new file mode 100644
index 0000000000000000000000000000000000000000..e09b556020895b04049f4be2721ed8f99beba788
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py
@@ -0,0 +1,98 @@
+from __future__ import with_statement
+import sys, os
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class pyraprunner(LOFARrecipe):
+    """
+    Provides all the basic infrastructure for applying a pyrap-based filter to
+    code on the cluster, distributed using an IPython task client.
+    """
+    def __init__(self):
+        super(pyraprunner, self).__init__()
+        self.optionparser.add_option(
+            '--suffix',
+            dest="suffix",
+            help="Suffix to add to trimmed data (default: overwrite existing)"
+        )
+
+    def _generate_arguments(self):
+        return ''
+
+    def go(self):
+        super(pyraprunner, self).go()
+
+        ms_names = self.inputs['args']
+
+        tc, mec = self._get_cluster()
+        function_name = self.__class__.__name__ + "_remote"
+        mec.push_function(
+            {
+                function_name: run_node,
+                "build_available_list": utilities.build_available_list,
+                "clear_available_list": utilities.clear_available_list
+            }
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            outnames = []
+            for ms_name in ms_names:
+                outnames.append(ms_name + self.inputs['suffix'])
+                execute_string = "result = %s(ms_name, \"%s\", %s)" % (
+                    function_name, outnames[-1], self._generate_arguments()
+                )
+                task = LOFARTask(
+                    execute_string,
+                    push=dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        ms_name=ms_name,
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info(
+                "Waiting for all %s tasks to complete" %
+                (self.__class__.__name__)
+            )
+            tc.barrier(tasks)
+
+
+        failure = False
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed" % (task))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+        self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(pyraprunner().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py
new file mode 100644
index 0000000000000000000000000000000000000000..567f6873875c4829bd49a8b234c335a6d4b2535a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py
@@ -0,0 +1,91 @@
+from __future__ import with_statement
+import sys, os
+
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+
+def run_qcheck(infile, pluginlist, outputdir):
+    from lofarrecipe.nodes.qcheck import qcheck_node
+    return qcheck_node(loghost=loghost, logport=logport).run(
+        infile,
+        pluginlist,
+        outputdir
+    )
+
+class qcheck(LOFARrecipe):
+    def __init__(self):
+        super(qcheck, self).__init__()
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--plugins',
+            dest="plugins",
+            help="[Expert use] Quality check plugins"
+        )
+
+    def go(self):
+        super(qcheck, self).go()
+        self.logger.info("Quality check system starting")
+
+        self.outputs['data'] = [
+            os.path.join(
+                self.inputs['working_directory'], self.inputs['job_name'], filename
+            )
+            for filename in self.inputs['args']
+        ]
+        plugins = utilities.string_to_list(self.inputs['plugins'])
+        self.logger.info("Using plugins: %s" % (str(plugins)))
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_qcheck=run_qcheck,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "qcheck")
+        mec.push(dict(filenames=self.outputs['data']))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            for image_name in self.outputs['data']:
+                task = LOFARTask(
+                    "result = run_qcheck(infile, pluginlist, outputdir)",
+                    push=dict(
+                        infile=image_name,
+                        pluginlist=plugins,
+                        outputdir=self.config.get('layout', 'results_directory'),
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(image_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (image_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all qcheck tasks to complete")
+            tc.barrier(tasks)
+
+            for task in tasks:
+                tc.get_task_result(task)
+
+            mec.execute("clear_available_list(\"%s\")" % (available_list,))
+            self.logger.info("qcheck done")
+
+if __name__ == '__main__':
+    sys.exit(eval(os.path.splitext(os.path.basename(sys.argv[0]))[0])().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README
new file mode 100644
index 0000000000000000000000000000000000000000..e7152d38e659459637ff431db2feff799e5cb698
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README
@@ -0,0 +1,4 @@
+Simple image quality check, designed for use in (deprecated) pipeline qcheck
+recipe.
+
+Original by Evert Rol.
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/__init__.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb99a3cf6425562cadc98111eb8d51a3faf4b873
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py
@@ -0,0 +1,200 @@
+from pyrap import tables as ptables
+import numpy
+import logging
+import sys
+import os.path
+import pylab
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+from matplotlib.figure import Figure
+
+
+DEFAULTS = {}
+DEFAULTS['fieldnames'] = ['logtable', 'coords', 'units']
+DEFAULTS['colnames'] = ['map']
+IMAGEDIMS = {'y': 3, 'x': 4, 'polarization': 2, 'channel': 1}
+
+"""
+
+TODO:
+
+  - check for NaNs
+
+"""
+
+
+
+def check_basics(image, loggers):
+    logger = loggers["main"]
+    assert image.ndim == 5, "image does not have 5 dimensions"
+
+
+def clip_image(image, niter=0, clip=(-3, 3)):
+    if niter > 0:
+        mean = image.mean()
+        sigma = numpy.sqrt(image.var())
+        return clip_image(image[(image > mean+clip[0]*sigma)&(image < mean+clip[1]*sigma)],
+                   niter=niter-1, clip=clip)
+    return image
+
+
+def check_stats(image, filename, loggers, plot=False):
+    BINCOUNTS = 1000
+    MINNBINS = 20
+    CLIPCOUNTS = 0.001
+    NSIGMA = 0.1
+    NMAXSIGMA = 10.0
+    POLAXIS = ['I', 'Q', 'U', 'V']
+    if plot:
+        figure = Figure()
+        canvas = FigureCanvas(figure)
+    npols = image.shape[IMAGEDIMS['polarization']]
+    nchannels = image.shape[IMAGEDIMS['channel']]
+    nsubplotrows = nsubplotcols = int(numpy.sqrt(npols))
+    if nsubplotcols * nsubplotrows < npols:
+        nsubplotrows += 1
+    for npol in range(npols):
+        if plot:
+            axes = figure.add_subplot(nsubplotrows, nsubplotcols, npol+1)
+        for nchan in range(nchannels):
+            twodimage = image[0, nchan, npol, :, :]
+            flatimage = twodimage.flatten()
+            mean = flatimage.mean()
+            variance = flatimage.var()
+            stddev = numpy.sqrt(variance)
+            median = numpy.median(flatimage)
+            imgmin, imgmax = min(flatimage), max(flatimage)
+            loggers["main"].info("%d.%d.minimum = %.4e" % (npol+1, nchan+1,
+                                                        imgmin))
+            loggers["main"].info("%d.%d.maximum = %.4e" % (npol+1, nchan+1, 
+                                                        imgmax))
+            loggers["main"].info("%d.%d.mean = %.4e" % (npol+1, nchan+1, mean))
+            loggers["main"].info("%d.%d.median = %.4e" % (
+                    npol+1, nchan+1, median))
+            loggers["main"].info("%d.%d.Standard deviation = %.4e" % (
+                    npol+1, nchan+1,stddev))
+            # Keep only the bins with a minimum number of counts,
+            # so we can 'fit' a Gaussian distribution to calculate the mode
+            nbins = (int(flatimage.size/BINCOUNTS)
+                     if flatimage.size > 1e5 else MINNBINS)
+            counts, bins = numpy.histogram(flatimage, nbins)
+            clipped = {}
+            clipped['indices'] = counts > max(counts)*CLIPCOUNTS
+            clipped['counts'] = counts[clipped['indices']]
+            clipped['bins'] = bins[clipped['indices']]
+            if plot:
+                axes.plot(bins[numpy.invert(clipped['indices'])],
+                          counts[numpy.invert(clipped['indices'])], 'ob')
+            clippedimage = flatimage[(flatimage >= min(clipped['bins'])) &
+                                     (flatimage <= max(clipped['bins']))]
+            nbins = (int(clippedimage.size/BINCOUNTS)
+                     if clippedimage.size > 1e5 else MINNBINS)
+            counts, bins = numpy.histogram(clippedimage, nbins)
+            bins = (bins[0:-1] + bins[1:])/2.  # new behaviour in numpy 1.2
+            mode = sum(bins * counts)/sum(counts)
+            width = (numpy.sqrt(abs(sum( (bins - mode)**2 * counts) /
+                                    sum(counts))))
+            loggers["main"].info("%d.%d.mode = %.4e" % (npol+1, nchan+1, mode))
+            # Also calculate the statistics for a clipped image, ie
+            # only the background (no sources)
+            clippedimage = clip_image(flatimage, niter=3, clip=(-1, 1))
+            mean = clippedimage.mean()
+            variance = clippedimage.var()
+            stddev = numpy.sqrt(variance)
+            median = numpy.median(clippedimage)
+            imgmin, imgmax = min(clippedimage), max(clippedimage)
+            loggers["main"].info("%d.%d.background-minimum = %.4e" % (
+                    npol+1, nchan+1, imgmin))
+            loggers["main"].info("%d.%d.background-maximum = %.4e" % (
+                    npol+1, nchan+1, imgmax))
+            loggers["main"].info("%d.%d.background-mean = %.4e" % (
+                    npol+1, nchan+1, mean))
+            loggers["main"].info("%d.%d.background-median = %.4e" % (
+                    npol+1, nchan+1, median))
+            loggers["main"].info("%d.%d.background-stddev = %.4e" % (
+                    npol+1, nchan+1, stddev))
+            # Verify that mode, background mean & background median are within
+            # a few background sigma from each other:
+            if abs(mean-median) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                        " Background mean and background median are more "
+                        "than %.1f standard deviations different" % NSIGMA)
+            if abs(mean-mode) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Mode and background mean are more than %.1f "
+                    "standard deviations different" % NSIGMA)
+            if abs(mode-median) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Mode and background median are more than %.1f "
+                    "standard deviations different" % NSIGMA)
+            if imgmax < 0:
+                loggers["warn"].warn(" Background maximum is negative")
+            if imgmin > 0:
+                loggers["warn"].warn(" Background minimum is positive")
+            if imgmax > NMAXSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Background maximum is more than %.1f the "
+                    "standard deviation")
+            if imgmin < -NMAXSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Background minimum is less than %.1f the "
+                    "standard deviation")
+    
+            if plot:
+                axes.plot(bins, counts, 'ob')
+                axes.plot(bins, max(counts) * numpy.exp(-(bins-mode)**2 /
+                                                         (2 * width**2)), '-g')
+    if plot:
+        canvas.print_figure(plot)
+
+    
+def setup_logging(logfile):
+    loggers = {'main': logging.getLogger('main'),
+               'warn': logging.getLogger('warn')}
+    handlers = {'main': logging.FileHandler(logfile, mode="w"),
+                'warn': logging.StreamHandler()}
+    formatters = {'main': logging.Formatter("%(message)s"),
+                  'warn': logging.Formatter("%(levelname)s: %(message)s")}
+    handlers['main'].setFormatter(formatters['main'])
+    handlers['warn'].setFormatter(formatters['warn'])
+    loggers['main'].addHandler(handlers['main'])
+    loggers['warn'].addHandler(handlers['warn'])
+    loggers['main'].setLevel(logging.INFO)
+    loggers['warn'].setLevel(logging.WARNING) # warnings only
+    return loggers
+
+    
+def run(filename, logfile=None, plot=False, outputdir=False, loggers=False):
+    if not logfile:
+        logfile = filename + "_stats.log"
+    if not isinstance(plot, basestring):
+        plot = filename + "_histo.pdf"
+    if outputdir:
+        plot = os.path.join(outputdir, os.path.basename(plot))
+    if not loggers:
+        loggers = setup_logging(logfile)
+    try:
+        table = ptables.table(filename, ack=False)
+    except RuntimeError:  # pyrap is just a wrapper around C++, so no proper exceptions are thrown
+        loggers['main'].error("Error: image %s not properly opened" % filename)
+        return
+    names = {}
+    for part in ('col', 'field'):
+        partname = part + 'names'
+        names[part] = table.__getattribute__(partname)()
+        for defaultname in DEFAULTS[partname]:
+            if defaultname not in names[part]:
+                # use 'warn' logger instead? 
+                # But script can't continue with this fault,
+                # so should quit
+                raise KeyError("%s not in %snames" % (defaultname, part))
+    imgcol = table.col('map')
+    image = imgcol.getcol()
+    check_basics(image, loggers)
+    check_stats(image, filename, loggers, plot=plot)
+
+
+if __name__ == '__main__':
+    args = sys.argv[1:]
+    if len(args) != 1:
+        sys.exit(1)
+    run(args[0], plot=True)
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py b/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4f566c383a23c7107a3807fc3c280563acb1e97
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py
@@ -0,0 +1,92 @@
+import sys, os
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+from lofarpipe.support.ipython import LOFARTask
+import lofarpipe.support.utilities as utilities
+
+from tkp_lib.dataset import DataSet
+
+def sextract(image, dataset):
+    # Run on engine to source extract
+    from lofarrecipe.nodes.sextractor import sextract
+    return sextract(image, dataset)
+
+class sextractor(LOFARrecipe):
+    def __init__(self):
+        super(sextractor, self).__init__()
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+
+    def go(self):
+        self.logger.info("Starting source extraction run")
+        super(sextractor, self).go()
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                sextract=sextract,
+                build_available_list=utilities.build_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # We read the GVDS file to find the names of all the data files we're
+        # going to process, then push this list out to the engines so they can
+        # let us know which we have available
+        image_names = [
+            "%s/%s" % (self._input_or_default('working_directory'), image)
+            for image in self.inputs['args']
+        ]
+
+        # Construct list of available files on engines
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "sextractor")
+        mec.push(dict(filenames=image_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        tasks = []
+
+        dataset = DataSet(self.inputs['job_name'])
+
+        for image_name in image_names:
+            task = LOFARTask(
+                "result = sextract(image_name, dataset)",
+                push=dict(
+                    image_name=image_name,
+                    dataset=dataset,
+                ),
+                pull="result",
+                depend=utilities.check_for_path,
+                dependargs=(image_name, available_list)
+            )
+            self.logger.info("Scheduling processing of %s" % (image_name,))
+            tasks.append(tc.run(task))
+        self.logger.info("Waiting for all source extraction tasks to complete")
+        tc.barrier(tasks)
+        for task in tasks:
+            ##### Print failing tasks?
+            ##### Abort if all tasks failed?
+            res = tc.get_task_result(task)
+            self.logger.info(res)
+            if res.failure:
+                print res.failure
+
+        mec.push_function(
+            dict(
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        # Save space on engines by clearing out old file lists
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+
+        self.logger.info("Source extraction done")
+
+if __name__ == '__main__':
+    sys.exit(sextractor().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py b/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py
new file mode 100644
index 0000000000000000000000000000000000000000..57fdec55ff2bb2040d00ca98329df8f6645783dc
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py
@@ -0,0 +1,122 @@
+from __future__ import with_statement
+from contextlib import closing
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+import os, os.path, glob, subprocess, sys, numpy, shutil, errno, re
+
+# SE tools
+from tkp_lib.dataset   import DataSet
+from tkp_lib.image     import ImageData
+from tkp_lib.accessors import FitsFile
+from tkp_lib.dbplots   import plotAssocCloudByXSource
+from tkp_lib.dbregion  import createRegionByImage
+import tkp_lib.database as database
+
+associations = """
+SELECT
+    x1.xtrsrcid, x1.ra, x1.decl, x1.i_peak, x1.i_int, c.catname, c1.ra, c1.decl, a1.assoc_distance_arcsec
+FROM
+    extractedsources x1
+LEFT OUTER JOIN
+    assoccatsources a1 ON x1.xtrsrcid = a1.xtrsrc_id
+LEFT OUTER JOIN
+    catalogedsources c1 ON a1.assoc_catsrc_id = c1.catsrcid
+LEFT OUTER JOIN
+    catalogs c ON c.catid = c1.cat_id
+WHERE
+    image_id = %d
+ORDER BY
+    x1.I_Peak;
+"""
+
+class simple_se(LOFARrecipe):
+    """
+    Run source extraction on FITS images on the front-end.
+    Dump ds9 region files of found sources & WENSS sources.
+    Dump text file of assocations with catalogue sources.
+
+    Designed to be run e.g. on an averaged image at the end of a pipeline run.
+    """
+
+    def __init__(self):
+        super(simple_se, self).__init__()
+        self.optionparser.add_option(
+            '--detected-regions',
+            dest="detected_regions",
+            help="Filename for region file of local detections",
+            default="detected.reg"
+        )
+        self.optionparser.add_option(
+            '--wenss-regions',
+            dest="wenss_regions",
+            help="Filename for region file of WENSS detections",
+            default="wenss.reg"
+        )
+        self.optionparser.add_option(
+            '--associations',
+            dest="associations",
+            help="Filename for association list",
+            default="association.list"
+        )
+
+    def go(self):
+        self.logger.info("Starting source identification")
+        super(simple_se, self).go()
+
+        ds_name = "%s-%s" % (self.inputs['job_name'], self.inputs['start_time'])
+        self.logger.info("Creating dataset %s" % (ds_name,))
+        dataset = DataSet(ds_name)
+        src_ids = []
+        for file in self.inputs['args']:
+            self.logger.info("Processing %s" % (file,))
+            image = ImageData(FitsFile(file), dataset=dataset)
+            self.logger.info("Running source finder")
+            sr = image.sextract(det=5, anl=2)
+            with closing(database.connection()) as con:
+                self.logger.debug("Saving results to database")
+                sr.savetoDB(con)
+                self.logger.info("Generating source associations")
+                database.assocXSrc2XSrc(image.id, con)
+                database.assocXSrc2Cat(image.id, con)
+                self.logger.info("Querying for region file")
+                createRegionByImage(image.id[0], con,
+                    os.path.join(
+                        os.path.dirname(file),
+                        self.inputs['detected_regions']
+                    ), logger=self.logger
+                )
+                with closing(con.cursor()) as cur:
+                    self.logger.info("Querying for association list")
+                    my_query = associations % (image.id)
+                    self.logger.debug(my_query)
+                    cur.execute(my_query)
+                    with open(
+                        os.path.join(
+                            os.path.dirname(file),
+                            self.inputs['associations']
+                        ),
+                        'w'
+                    ) as output_file:
+                        for line in cur.fetchall():
+                            output_file.write(str(line) + '\n')
+                            src_ids.append(line[0])
+
+        # Diagnostic plot for each extracted source
+        self.logger.info("Generating associations plots")
+        # Use set to uniqify the list of src_ids
+        src_ids = list(set(src_ids))
+        with closing(database.connection()) as con:
+            for src_id in src_ids:
+                self.logger.debug("Generating associations plot for src %d" % src_id)
+                plotAssocCloudByXSource(
+                    src_id, con, os.path.dirname(self.inputs['args'][0])
+                )
+
+            self.outputs['data'] = None
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(eval(os.path.splitext(os.path.basename(sys.argv[0]))[0])().main())
+
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py b/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py
new file mode 100644
index 0000000000000000000000000000000000000000..71b5b16e8c042f80d5d450e7092821bd8b30c38a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py
@@ -0,0 +1,27 @@
+import sys
+from lofarpipe.support.pyraprunner import pyraprunner
+
+class trimmer(pyraprunner):
+    def __init__(self):
+        super(trimmer, self).__init__()
+        self.optionparser.add_option(
+            '--start-seconds',
+            dest="start_seconds",
+            type="float",
+            help="Seconds to trim from start of data"
+        )
+        self.optionparser.add_option(
+            '--end-seconds',
+            dest="end_seconds",
+            type="float",
+            help="Seconds to trim from end of data"
+        )
+
+    def _generate_arguments(self):
+        return "%f, %f" %  (
+            float(self.inputs['start_seconds']),
+            float(self.inputs['end_seconds'])
+        )
+
+if __name__ == '__main__':
+    sys.exit(trimmer().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py b/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..0df58c549ff590b4247f440e31c34a48c30817ff
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py
@@ -0,0 +1,175 @@
+from __future__ import with_statement
+import sys, os, tempfile, errno
+import subprocess
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class vdsmaker(LOFARrecipe):
+    def __init__(self):
+        super(vdsmaker, self).__init__()
+        self.optionparser.add_option(
+            '-g', '--gvds',
+            dest="gvds",
+            help="Output file name"
+        )
+        self.optionparser.add_option(
+            '--directory',
+            dest="directory",
+            help="Directory for output files"
+        )
+        self.optionparser.add_option(
+            '--makevds',
+            dest="makevds",
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        )
+        self.optionparser.add_option(
+            '--combinevds',
+            dest="combinevds",
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        )
+        self.optionparser.add_option(
+            '--unlink',
+            help="Unlink VDS files after combining",
+            default="True"
+        )
+
+    def go(self):
+        super(vdsmaker, self).go()
+
+        ms_names = self.inputs['args']
+        if self.inputs['unlink'] == "False":
+            self.inputs['unlink'] = False
+
+        try:
+            os.makedirs(self.inputs['directory'])
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                make_vds=run_node,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Build VDS files for each of the newly created MeasurementSets
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "dppp-vds")
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            vdsnames = []
+            for ms_name in ms_names:
+                vdsnames.append(
+                    "%s/%s.vds" % (self.inputs['directory'], os.path.basename(ms_name))
+                )
+                task = LOFARTask(
+                    "result = make_vds(ms_name, clusterdesc, vds_name, executable)",
+                    push=dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        ms_name=ms_name,
+                        vds_name=vdsnames[-1],
+                        clusterdesc=clusterdesc,
+                        executable=self.inputs['makevds'],
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all makevds tasks to complete")
+            tc.barrier(tasks)
+
+        # Save space on engines by clearing out old file lists
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+        failure = False
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed" % (task))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+
+        # Combine VDS files to produce GDS
+        self.logger.info("Combining VDS files")
+        executable = self.inputs['combinevds']
+        gvds_out = self.inputs['gvds']
+        try:
+            command = [executable, gvds_out] + vdsnames
+            combineproc = subprocess.Popen(
+                command,
+                close_fds=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sour, serr = combineproc.communicate()
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(combineproc.returncode, command)
+            self.outputs['gvds'] = gvds_out
+        except subprocess.CalledProcessError, cpe:
+            self.logger.exception("combinevds failed with status %d: %s" % (cpe.returncode, serr))
+            failure = True
+        except OSError, failure:
+            self.logger.warn("Caught OSError")
+            try:
+                if failure.errno == errno.EMFILE:
+                    count = 0
+                    for x in xrange(0, os.sysconf('SC_OPEN_MAX')):
+                        try:
+                            self.logger.debug("open file %d: %s" (x, str(os.fstat(x))))
+                            count += 1
+                        except:
+                            pass
+                    self.logger.info("Had %d open files" % (count,))
+                elif failure.errno == errno.ENOMEM:
+                    self.logger.info("Failed to run: %s" % str(command))
+                    import operator
+                    total = reduce(operator.add, (len(x) for x in command))
+                    self.logger.debug("Num args: %d, num characters: %d" % (len(command), total))
+                    try:
+                        p = subprocess.Popen(['free'], stdout=subprocess.PIPE)
+                        sout, serr = p.communicate()
+                        self.logger.free(sout)
+                    except:
+                        self.logger.warn("Failed to spawn free")
+                    self.logger.exception(failure)
+                else:
+                    self.logger.exception(failure)
+            finally:
+                failure = True
+        finally:
+            if self.inputs["unlink"]:
+                self.logger.debug("Unlinking temporary files")
+                for file in vdsnames:
+                    os.unlink(file)
+            self.logger.info("vdsmaker done")
+        if failure:
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(vdsmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/flag_baseline.py b/CEP/Pipeline/recipes/sip/master/flag_baseline.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fc16ab9c48f444f3231e4a39f2505ddfd2a41f1
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/flag_baseline.py
@@ -0,0 +1,87 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                        Baseline flagger recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from tempfile import mkstemp
+from cPickle import dump
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class flag_baseline(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Accept a list of baselines (in the format used by NDPPP logging).
+
+    Flag them in all MeasurementSets.
+    """
+    inputs = {
+        'baselines': ingredient.ListField(
+            '--baselines',
+            help="Baselines (in NDPPP format, eg 1&1)"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting flag_baseline run")
+        super(flag_baseline, self).go()
+
+        #       Serialise list of baselines to disk for compute nodes to pick up
+        # ----------------------------------------------------------------------
+        fd, baseline_filename = mkstemp(
+            dir=self.config.get("layout", "job_directory")
+        )
+        baseline_file = os.fdopen(fd, "w")
+        dump(self.inputs["baselines"], baseline_file)
+        baseline_file.close()
+
+        #                 try block ensures baseline_filename is always unlinked
+        # ----------------------------------------------------------------------
+        try:
+            #                       Load file <-> compute node mapping from disk
+            # ------------------------------------------------------------------
+            self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+            data = load_data_map(self.inputs['args'][0])
+
+            command = "python %s" % (self.__file__.replace('master', 'nodes'))
+            jobs = []
+            for host, ms in data:
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            ms,
+                            baseline_filename
+                        ]
+                    )
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        finally:
+            os.unlink(baseline_filename)
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(flag_baseline().main())
diff --git a/CEP/Pipeline/recipes/sip/master/make_flaggable.py b/CEP/Pipeline/recipes/sip/master/make_flaggable.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecc1d7b9b58fd8820a48379ddee04cc20b03ca84
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/make_flaggable.py
@@ -0,0 +1,68 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                Make an MS flaggable; wraps makeFLAGwritable (but doesn't fork)
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class make_flaggable(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Update the storage manager on an MS to make the flag column writable.
+    """
+    inputs = {
+        'makeflagwritable': ingredient.ExecField(
+            '--makeFLAGwritable',
+            help="Path to makeFLAGwritable script",
+            default='/opt/LofIm/daily/lofar/bin/makeFLAGwritable'
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting make_flaggable run")
+        super(make_flaggable, self).go()
+
+        #                       Load file <-> compute node mapping from disk
+        # ------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        self.inputs['makeflagwritable']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(make_flaggable().main())
diff --git a/CEP/Pipeline/recipes/sip/master/new_dppp.py b/CEP/Pipeline/recipes/sip/master/new_dppp.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fb9148975e28d1b6b117d674c33a36317a0afe9
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/new_dppp.py
@@ -0,0 +1,172 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                         New DPPP recipe: fixed node allocation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from itertools import cycle
+from contextlib import nested
+from collections import defaultdict
+
+import collections
+import sys
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.parset import Parset
+
+class new_dppp(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Runs DPPP (either ``NDPPP`` or -- in the unlikely event it's required --
+    ``IDPPP``) on a number of MeasurementSets. This is used for compressing
+    and/or flagging data
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="The full path to the relevant DPPP executable"
+        ),
+        'initscript': ingredient.FileField(
+            '--initscript',
+            help="The full path to an (Bourne) shell script which will intialise the environment (ie, ``lofarinit.sh``)"
+        ),
+        'parset': ingredient.FileField(
+            '-p', '--parset',
+            help="The full path to a DPPP configuration parset. The ``msin`` and ``msout`` keys will be added by this recipe"
+        ),
+        'suffix': ingredient.StringField(
+            '--suffix',
+            default=".dppp",
+            help="Added to the input filename to generate the output filename"
+        ),
+        'working_directory': ingredient.StringField(
+            '-w', '--working-directory',
+            help="Working directory used on output nodes. Results will be written here"
+        ),
+        # NB times are read from vds file as string
+        'data_start_time': ingredient.StringField(
+            '--data-start-time',
+            default="None",
+            help="Start time to be passed to DPPP; used to pad data"
+        ),
+        'data_end_time': ingredient.StringField(
+            '--data-end-time',
+            default="None",
+            help="End time to be passed to DPPP; used to pad data"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            default=8,
+            help="Maximum number of simultaneous processes per output node"
+        ),
+        'nthreads': ingredient.IntField(
+            '--nthreads',
+            default=2,
+            help="Number of threads per (N)DPPP process"
+        ),
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Filename into which a mapfile describing the output data will be written"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="If ``True``, pre-existing output files will be removed before processing starts. If ``False``, the pipeline will abort if files already exist with the appropriate output filenames"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="The full path to a mapfile describing the processed data"
+        ),
+        'fullyflagged': ingredient.ListField(
+            help="A list of all baselines which were completely flagged in any of the input MeasurementSets"
+        )
+    }
+
+
+    def go(self):
+        self.logger.info("Starting DPPP run")
+        super(new_dppp, self).go()
+
+        #                Keep track of "Total flagged" messages in the DPPP logs
+        # ----------------------------------------------------------------------
+        self.logger.searchpatterns["fullyflagged"] = "Fully flagged baselines"
+
+        #                            Load file <-> output node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'])
+        data = load_data_map(self.inputs['args'][0])
+
+
+        #       We can use the same node script as the "old" IPython dppp recipe
+        # ----------------------------------------------------------------------
+        command = "python %s" % (
+            self.__file__.replace('master', 'nodes').replace('new_dppp', 'dppp')
+        )
+        outnames = collections.defaultdict(list)
+        jobs = []
+        for host, ms in data:
+            outnames[host].append(
+                os.path.join(
+                    self.inputs['working_directory'],
+                    self.inputs['job_name'],
+                    os.path.basename(ms.rstrip('/')) + self.inputs['suffix']
+                )
+            )
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        outnames[host][-1],
+                        self.inputs['parset'],
+                        self.inputs['executable'],
+                        self.inputs['initscript'],
+                        self.inputs['data_start_time'],
+                        self.inputs['data_end_time'],
+                        self.inputs['nthreads'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        #                                  Log number of fully flagged baselines
+        # ----------------------------------------------------------------------
+        matches = self.logger.searchpatterns["fullyflagged"].results
+        self.logger.searchpatterns.clear() # finished searching
+        stripchars = "".join(set("Fully flagged baselines: "))
+        baselinecounter = defaultdict(lambda: 0)
+        for match in matches:
+            for pair in (
+                pair.strip(stripchars) for pair in match.getMessage().split(";")
+            ):
+                baselinecounter[pair] += 1
+        self.outputs['fullyflagged'] = baselinecounter.keys()
+
+        if self.error.isSet():
+            self.logger.warn("Failed DPPP process detected")
+            return 1
+        else:
+            parset = Parset()
+            for host, filenames in outnames.iteritems():
+                parset.addStringVector(host, filenames)
+            parset.writeFile(self.inputs['mapfile'])
+            self.outputs['mapfile'] = self.inputs['mapfile']
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(new_dppp().main())
diff --git a/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py b/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f593a9de6d4344bf4fb04db82bef60d69934a93
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py
@@ -0,0 +1,140 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                     New vdsmaker recipe: fixed node allocation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import sys
+import os
+import tempfile
+import errno
+import subprocess
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.pipelinelogging import log_process_output
+
+class new_vdsmaker(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Generate a GVDS file (and, optionally, individual VDS files per subband;
+    see the ``unlink`` input parameter) describing a collection of
+    MeasurementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'gvds': ingredient.StringField(
+            '-g', '--gvds',
+            help="File name for output GVDS file"
+        ),
+        'directory': ingredient.DirectoryField(
+            '--directory',
+            help="Directory for output GVDS file"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="Full path to makevds executable"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--combinevds',
+            help="Full path to combinevds executable"
+        ),
+        'unlink': ingredient.BoolField(
+            '--unlink',
+            help="Unlink VDS files after combining",
+            default=True
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'gvds': ingredient.FileField()
+    }
+
+    def go(self):
+        super(new_vdsmaker, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (
+            self.__file__.replace('master', 'nodes').replace('new_vdsmaker', 'vdsmaker')
+        )
+        jobs = []
+        vdsnames = []
+        for host, ms in data:
+            vdsnames.append(
+                "%s/%s.vds" % (self.inputs['directory'], os.path.basename(ms.rstrip('/')))
+            )
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        self.config.get('cluster', 'clusterdesc'),
+                        vdsnames[-1],
+                        self.inputs['makevds']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            self.logger.warn("Failed vdsmaker process detected")
+            return 1
+
+        # Combine VDS files to produce GDS
+        failure = False
+        self.logger.info("Combining VDS files")
+        executable = self.inputs['combinevds']
+        gvds_out = self.inputs['gvds']
+        try:
+            command = [executable, gvds_out] + vdsnames
+            combineproc = subprocess.Popen(
+                command,
+                close_fds=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sout, serr = combineproc.communicate()
+            log_process_output(executable, sout, serr, self.logger)
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(combineproc.returncode, command)
+            self.outputs['gvds'] = gvds_out
+        except subprocess.CalledProcessError, cpe:
+            self.logger.exception("combinevds failed with status %d: %s" % (cpe.returncode, serr))
+            failure = True
+        except OSError, e:
+            self.logger.error("Failed to spawn combinevds (%s)" % str(e))
+            failure = True
+        finally:
+            if self.inputs["unlink"]:
+                self.logger.debug("Unlinking temporary files")
+                for file in vdsnames:
+                    os.unlink(file)
+            self.logger.info("vdsmaker done")
+        if failure:
+            self.logger.info("Failure was set")
+            return 1
+        elif not self.outputs.complete():
+            self.logger.info("Outputs incomplete")
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(new_vdsmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/parmdb.py b/CEP/Pipeline/recipes/sip/master/parmdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcd8d3aa93081c3706db9f047828fbe9c425e069
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/parmdb.py
@@ -0,0 +1,115 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                  parmdb recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+import subprocess
+import shutil
+import tempfile
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.pipelinelogging import log_process_output
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+template = """
+create tablename="%s"
+adddef Gain:0:0:Ampl  values=1.0
+adddef Gain:1:1:Ampl  values=1.0
+adddef Gain:0:0:Real  values=1.0
+adddef Gain:1:1:Real  values=1.0
+adddef DirectionalGain:0:0:Ampl  values=1.0
+adddef DirectionalGain:1:1:Ampl  values=1.0
+adddef DirectionalGain:0:0:Real  values=1.0
+adddef DirectionalGain:1:1:Real  values=1.0
+adddef AntennaOrientation values=5.497787144
+quit
+"""
+
+class parmdb(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Add a parameter database to input MeasurementSets.
+
+    This recipe is called by the :class:`bbs.bbs` recipe; it may also be used
+    standalone.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="Full path to parmdbm executable",
+            default="/opt/LofIm/daily/lofar/bin/parmdbm"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting parmdb run")
+        super(parmdb, self).go()
+
+        self.logger.info("Generating template parmdb")
+        pdbdir = tempfile.mkdtemp(
+            dir=self.config.get("layout", "job_directory")
+        )
+        pdbfile = os.path.join(pdbdir, 'instrument')
+
+        try:
+            parmdbm_process = subprocess.Popen(
+                [self.inputs['executable']],
+                stdin=subprocess.PIPE,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sout, serr = parmdbm_process.communicate(template % pdbfile)
+            log_process_output("parmdbm", sout, serr, self.logger)
+        except OSError, e:
+            self.logger.error("Failed to spawn parmdbm: %s" % str(e))
+            return 1
+
+        #                     try-finally block to always remove temporary files
+        # ----------------------------------------------------------------------
+        try:
+            #                       Load file <-> compute node mapping from disk
+            # ------------------------------------------------------------------
+            self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+            data = load_data_map(self.inputs['args'][0])
+
+            command = "python %s" % (self.__file__.replace('master', 'nodes'))
+            jobs = []
+            for host, ms in data:
+                jobs.append(
+                    ComputeJob(host, command, arguments=[ms, pdbfile])
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        finally:
+            self.logger.debug("Removing template parmdb")
+            shutil.rmtree(pdbdir, ignore_errors=True)
+
+        if self.error.isSet():
+            self.logger.warn("Detected failed parmdb job")
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(parmdb().main())
diff --git a/CEP/Pipeline/recipes/sip/master/rficonsole.py b/CEP/Pipeline/recipes/sip/master/rficonsole.py
new file mode 100644
index 0000000000000000000000000000000000000000..a87a9d4dd17bb259eef82cbb60f0563045298aa2
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/rficonsole.py
@@ -0,0 +1,127 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  rficonsole (AOflagger) recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from contextlib import nested
+from collections import defaultdict
+
+import sys
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+
+class rficonsole(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    The rficonsole recipe runs the rficonsole executable (flagger) across one
+    or more MeasurementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/opt/LofIm/daily/lofar/bin/rficonsole",
+            help="Full path to rficonsole executable"
+        ),
+        'strategy': ingredient.FileField(
+            '--strategy',
+            help="Full path to RFI strategy file",
+            optional=True
+        ),
+        'indirect_read': ingredient.BoolField(
+            '--indirect-read',
+            default=False,
+            help="Indirect baseline reader: re-write MS for efficiency"
+        ),
+        'skip_flagged': ingredient.BoolField(
+            '--skip-flagged',
+            default=True,
+            help="Ignore any MeasurementSet which has been flagged completely"
+        ),
+        'working_dir': ingredient.StringField(
+            '--working-dir',
+            default='/tmp',
+            help="Temporary rficonsole products are stored under this root on each of the remote machines. This directory should therefore be writable on each machine, but need not be shared across hosts"
+        ),
+        'nthreads': ingredient.IntField(
+            '--nthreads',
+            default=8,
+            help="Number of threads per rficonsole process"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            default=1,
+            help="Maximum number of simultaneous processes per node"
+        ),
+        'nmeasurementsets': ingredient.IntField(
+            '--nmeasurementsets',
+            optional=True,
+            help="Maximum number of MeasurementSets processed by a single rficonsole process"
+        ),
+    }
+
+    def go(self):
+        self.logger.info("Starting rficonsole run")
+        super(rficonsole, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'])
+        data = load_data_map(self.inputs['args'][0])
+
+        #        Jobs being dispatched to each host are arranged in a dict. Each
+        #            entry in the dict is a list of list of filnames to process.
+        # ----------------------------------------------------------------------
+        hostlist = defaultdict(lambda: list([[]]))
+        for host, filename in data:
+            if (
+                self.inputs.has_key('nmeasurementsets') and
+                len(hostlist[host][-1]) >= self.inputs['nmeasurementsets']
+            ):
+                hostlist[host].append([filename])
+            else:
+                hostlist[host][-1].append(filename)
+
+        if self.inputs.has_key('strategy'):
+            strategy = self.inputs['strategy']
+        else:
+            strategy = None
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, file_lists in hostlist.iteritems():
+            for file_list in file_lists:
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            self.inputs['executable'],
+                            self.inputs['nthreads'],
+                            strategy,
+                            self.inputs['indirect_read'],
+                            self.inputs['skip_flagged'],
+                            self.inputs['working_dir']
+                        ] + file_list
+                    )
+                )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            self.logger.warn("Failed rficonsole process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(rficonsole().main())
diff --git a/CEP/Pipeline/recipes/sip/master/skymodel.py b/CEP/Pipeline/recipes/sip/master/skymodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cfc667ba8223877b31abc3d56abda163dcff747
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/skymodel.py
@@ -0,0 +1,198 @@
+from __future__ import with_statement
+from contextlib import closing
+
+import sys
+
+import monetdb.sql as db
+from monetdb.sql import Error as Error
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+
+header_line = """\
+#(Name, Type, Ra, Dec, I, Q, U, V, MajorAxis, MinorAxis, Orientation, ReferenceFrequency='60e6', SpectralIndexDegree='0', SpectralIndex:0='0.0', SpectralIndex:1='0.0') = format
+"""
+
+query_skymodel = """
+SELECT t0.catsrcname, t0.src_type, ra2bbshms(t0.ra), decl2bbsdms(t0.decl), t0.i, t0.q, t0.u, t0.v, t0.MajorAxis, t0.MinorAxis, t0.Orientation, t0.ReferenceFrequency, t0.SpectralIndexDegree, t0.SpectralIndex_0
+FROM (
+    SELECT CAST(
+        TRIM(c1.catsrcname) AS VARCHAR(20)
+    ) AS catsrcname,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('POINT' AS VARCHAR(20))
+        ELSE CAST('GAUSSIAN' AS VARCHAR(20))
+    END AS src_type,
+    CAST(c1.ra AS VARCHAR(20)) AS ra,
+    CAST(c1.decl AS VARCHAR(20)) AS decl,
+    CAST(c1.i_int_avg AS VARCHAR(20)) AS i,
+    CAST(0 AS VARCHAR(20)) AS q,
+    CAST(0 AS VARCHAR(20)) AS u,
+    CAST(0 AS VARCHAR(20)) AS v,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CASE WHEN c1.major IS NULL
+            THEN CAST('' AS VARCHAR(20))
+            ELSE CAST(c1.major AS varchar(20))
+        END
+    END AS MajorAxis,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CASE WHEN c1.minor IS NULL
+            THEN CAST('' AS VARCHAR(20))
+            ELSE CAST(c1.minor AS varchar(20))
+        END
+    END AS MinorAxis,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CAST(c1.pa AS varchar(20))
+    END AS Orientation,
+    CAST(c1.freq_eff AS VARCHAR(20)) AS ReferenceFrequency,
+    CASE WHEN si.spindx_degree IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CAST(si.spindx_degree AS VARCHAR(20))
+    END AS SpectralIndexDegree,
+    CASE WHEN si.spindx_degree IS NULL
+        THEN CASE WHEN si.c0 IS NULL
+            THEN CAST(0 AS varchar(20))
+            ELSE CAST(si.c0 AS varchar(20))
+        END
+        ELSE CASE WHEN si.c0 IS NULL
+            THEN CAST('' AS varchar(20))
+            ELSE CAST(si.c0 AS varchar(20))
+        END
+    END AS SpectralIndex_0,
+    CASE WHEN si.c1 IS NULL
+        THEN CAST('' AS varchar(20))
+        ELSE CAST(si.c1 AS varchar(20))
+    END AS SpectralIndex_1
+    FROM catalogedsources c1
+    LEFT OUTER JOIN spectralindices si ON c1.catsrcid = si.catsrc_id
+        WHERE c1.cat_id BETWEEN %s AND %s
+        AND c1.ra BETWEEN %s AND %s
+        AND c1.decl BETWEEN %s AND %s
+        AND c1.i_int_avg > %s
+) t0
+"""
+
+query_central = """
+SELECT
+    catsrcname, i_int
+FROM
+    nearestneighborincat(%s,%s,'%s')
+"""
+
+
+class skymodel(BaseRecipe):
+    """
+    Extract basic sky model information from database
+    """
+    inputs = {
+        'db_host': ingredient.StringField(
+            '--db-host',
+            help="Host with MonetDB database instance",
+            default="ldb001"
+        ),
+        'db_port': ingredient.IntField(
+            '--db-port',
+            help="Host with MonetDB database instance",
+            default=50000
+        ),
+        'db_dbase': ingredient.StringField(
+            '--db-dbase',
+            help="Database name",
+            default="gsm"
+        ),
+        'db_user': ingredient.StringField(
+            '--db-user',
+            help="Database user",
+            default="gsm"
+        ),
+        'db_password': ingredient.StringField(
+            '--db-password',
+            help="Database password",
+            default="msss"
+        ),
+        'ra': ingredient.FloatField(
+            '--ra',
+            help='RA of image centre (degrees)'
+        ),
+        'dec': ingredient.FloatField(
+            '--dec',
+            help='dec of image centre (degrees)'
+        ),
+        'search_size': ingredient.FloatField(
+            '--search-size',
+            help='Distance to search in each of RA/dec (degrees)'
+        ),
+        'min_flux': ingredient.FloatField(
+            '--min-flux',
+            help="Integrated flus threshold, in Jy, for source selection"
+        ),
+        'skymodel_file': ingredient.StringField(
+            '--skymodel-file',
+            help="Output file for BBS-format sky model definition"
+        )
+    }
+
+    outputs = {
+        'source_name': ingredient.StringField(),
+        'source_flux': ingredient.FloatField()
+    }
+
+    def go(self):
+        self.logger.info("Building sky model")
+        super(skymodel, self).go()
+
+        ra_min = self.inputs['ra'] - self.inputs['search_size']
+        ra_max = self.inputs['ra'] + self.inputs['search_size']
+        dec_min = self.inputs['dec'] - self.inputs['search_size']
+        dec_max = self.inputs['dec'] + self.inputs['search_size']
+
+        try:
+            with closing(
+                db.connect(
+                    hostname=self.inputs["db_host"],
+                    port=int(self.inputs["db_port"]),
+                    database=self.inputs["db_dbase"],
+                    username=self.inputs["db_user"],
+                    password=self.inputs["db_password"]
+                )
+            ) as db_connection:
+                with closing(db_connection.cursor()) as db_cursor:
+                    db_cursor.execute(
+                        query_central % (float(self.inputs['ra']), float(self.inputs['dec']), "VLSS")
+                    )
+                    self.outputs["source_name"], self.outputs["source_flux"] = db_cursor.fetchone()
+                    self.logger.info("Central source is %s; flux %f" %
+                        (self.outputs["source_name"], self.outputs["source_flux"])
+                    )
+                    db_cursor.execute(
+                        query_skymodel % (
+                            4, 4, # Only using VLSS for now
+                            float(ra_min),
+                            float(ra_max),
+                            float(dec_min),
+                            float(dec_max),
+                            float(self.inputs['min_flux'])
+                        )
+                    )
+                    results = db_cursor.fetchall()
+
+        except db.Error, my_error:
+            self.logger.warn("Failed to build sky model: %s " % (my_error))
+            return 1
+
+        try:
+            with open(self.inputs['skymodel_file'], 'w') as file:
+                file.write(header_line)
+                file.writelines(", ".join(line) + ",\n" for line in results)
+        except Exception, e:
+            self.logger.warn("Failed to write skymodel file")
+            self.logger.warn(str(e))
+            return 1
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(skymodel().main())
diff --git a/CEP/Pipeline/recipes/sip/master/sourcedb.py b/CEP/Pipeline/recipes/sip/master/sourcedb.py
new file mode 100644
index 0000000000000000000000000000000000000000..b75da9c7b5ca7e7f144fbf1765251971122b0d98
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/sourcedb.py
@@ -0,0 +1,80 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                sourcedb recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class sourcedb(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Add a source database to input MeasurementSets.
+
+    This recipe is called by the :class:`bbs.bbs` recipe; it may also be used
+    standalone.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="Full path to makesourcedb executable",
+            default="/opt/LofIm/daily/lofar/bin/makesourcedb"
+        ),
+        'skymodel': ingredient.FileField(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="Input sky catalogue"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting sourcedb run")
+        super(sourcedb, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command, arguments=[
+                        self.inputs['executable'], ms, self.inputs['skymodel']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(sourcedb().main())
diff --git a/CEP/Pipeline/recipes/sip/master/storagemapper.py b/CEP/Pipeline/recipes/sip/master/storagemapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..3aa90a67fbbe22c2eea0345536ce45322f7247af
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/storagemapper.py
@@ -0,0 +1,63 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                        Generate a mapfile for processing data on storage nodes
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os.path
+from collections import defaultdict
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.parset import Parset
+from lofarpipe.support.utilities import create_directory
+import lofarpipe.support.lofaringredient as ingredient
+
+class storagemapper(BaseRecipe):
+    """
+    Parses a list of filenames and generates a mapfile suitable for processing
+    on storage nodes.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Full path (including filename) of mapfile to produce (clobbered if exists)"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="Full path (including filename) of generated mapfile"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting storagemapper run")
+        super(storagemapper, self).go()
+
+        #                          We read the storage node name out of the path
+        #     and append the local filename (ie, on the storage node) to the map
+        # ----------------------------------------------------------------------
+        data = defaultdict(list)
+        for filename in self.inputs['args']:
+            host = filename.split(os.path.sep)[3]
+            data[host].append(filename.split(host)[-1])
+
+        #                                 Dump the generated mapping to a parset
+        # ----------------------------------------------------------------------
+        parset = Parset()
+        for host, filenames in data.iteritems():
+            parset.addStringVector(host, filenames)
+
+        create_directory(os.path.dirname(self.inputs['mapfile']))
+        parset.writeFile(self.inputs['mapfile'])
+        self.outputs['mapfile'] = self.inputs['mapfile']
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(storagemapper().main())
diff --git a/CEP/Pipeline/recipes/sip/master/vdsreader.py b/CEP/Pipeline/recipes/sip/master/vdsreader.py
new file mode 100644
index 0000000000000000000000000000000000000000..968c62231fbb561f64117a6f8725df06e71e5605
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/vdsreader.py
@@ -0,0 +1,69 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                  vdsreader recipe: extract filenames + metadata from GVDS file
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.utilities import get_parset
+
+
+class vdsreader(BaseRecipe):
+    """
+    Read a GVDS file and return a list of the MS filenames referenced therein
+    together with selected metadata.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'gvds': ingredient.FileField(
+            '-g', '--gvds',
+            help="GVDS file to process"
+        )
+    }
+
+    outputs = {
+        'data': ingredient.ListField(help="List of MeasurementSet paths"),
+        'start_time': ingredient.StringField(help="Start time of observation"),
+        'end_time': ingredient.StringField(help="End time of observation"),
+        'pointing': ingredient.DictField(help="Observation pointing direction")
+    }
+
+    def go(self):
+        self.logger.info("Starting vdsreader run")
+        super(vdsreader, self).go()
+
+        try:
+            gvds = get_parset(self.inputs['gvds'])
+        except:
+            self.logger.error("Unable to read G(V)DS file")
+            raise
+
+        self.logger.info("Building list of measurementsets")
+        ms_names = [
+            gvds.getString("Part%d.FileName" % (part_no,))
+            for part_no in xrange(gvds.getInt("NParts"))
+        ]
+        self.logger.debug(ms_names)
+
+        self.outputs['data'] = ms_names
+        try:
+            self.outputs['start_time'] = gvds.getString('StartTime')
+            self.outputs['end_time'] = gvds.getString('EndTime')
+        except:
+            self.logger.warn("Failed to read start/end time from GVDS file")
+        try:
+            self.outputs['pointing'] = {
+                'type': gvds.getStringVector('Extra.FieldDirectionType')[0],
+                'dec': gvds.getStringVector('Extra.FieldDirectionDec')[0],
+                'ra': gvds.getStringVector('Extra.FieldDirectionRa')[0]
+            }
+        except:
+            self.logger.warn("Failed to read pointing information from GVDS file")
+        return 0
diff --git a/CEP/Pipeline/recipes/sip/nodes/bbs.py b/CEP/Pipeline/recipes/sip/nodes/bbs.py
new file mode 100644
index 0000000000000000000000000000000000000000..25fbeb4c86de4392fd2b19403926257a6c30c84b
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/bbs.py
@@ -0,0 +1,101 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  BBS (BlackBoard Selfcal) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+from tempfile import mkstemp, mkdtemp
+import os
+import sys
+import shutil
+
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import read_initscript
+from lofarpipe.support.utilities import get_mountpoint
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.parset import Parset
+
+class bbs(LOFARnodeTCP):
+    #                      Handles running a single BBS kernel on a compute node
+    # --------------------------------------------------------------------------
+    def run(
+        self, executable, initscript, infile, key, db_name, db_user, db_host
+    ):
+        #                           executable: path to KernelControl executable
+        #                           initscript:             path to lofarinit.sh
+        #                               infile:    MeasurementSet for processing
+        #       key, db_name, db_user, db_host:   database connection parameters
+        # ----------------------------------------------------------------------
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            #        Build a configuration parset specifying database parameters
+            #                                                     for the kernel
+            # ------------------------------------------------------------------
+            self.logger.debug("Setting up kernel parset")
+            filesystem = "%s:%s" % (os.uname()[1], get_mountpoint(infile))
+            fd, parset_filename = mkstemp()
+            kernel_parset = Parset()
+            for key, value in {
+                "ObservationPart.Filesystem": filesystem,
+                "ObservationPart.Path": infile,
+                "BBDB.Key": key,
+                "BBDB.Name": db_name,
+                "BBDB.User": db_user,
+                "BBDB.Host": db_host,
+                "ParmLog": "",
+                "ParmLoglevel": "",
+                "ParmDB.Sky": os.path.join(infile, "sky"),
+                "ParmDB.Instrument": os.path.join(infile, "instrument")
+            }.iteritems():
+                kernel_parset.add(key, value)
+            kernel_parset.writeFile(parset_filename)
+            os.close(fd)
+            self.logger.debug("Parset written to %s" % (parset_filename,))
+
+
+            #                                                     Run the kernel
+            #               Catch & log output from the kernel logger and stdout
+            # ------------------------------------------------------------------
+            working_dir = mkdtemp()
+            env = read_initscript(self.logger, initscript)
+            try:
+                cmd = [executable, parset_filename, "0"]
+                self.logger.debug("Executing BBS kernel")
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable),
+                ):
+                    bbs_kernel_process = Popen(
+                        cmd, stdout=PIPE, stderr=PIPE, cwd=working_dir
+                    )
+                    sout, serr = bbs_kernel_process.communicate()
+                log_process_output("BBS kernel", sout, serr, self.logger)
+                if bbs_kernel_process.returncode != 0:
+                    raise CalledProcessError(
+                        bbs_kernel_process.returncode, executable
+                    )
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                os.unlink(parset_filename)
+                shutil.rmtree(working_dir)
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(bbs(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/cimager.py b/CEP/Pipeline/recipes/sip/nodes/cimager.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b1d715c61f61c18b801399586793372f8e85181
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/cimager.py
@@ -0,0 +1,138 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                         ASKAPsoft cimager node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+from tempfile import mkdtemp
+import os
+import sys
+import shutil
+
+from pyrap.quanta import quantity
+from pyrap.tables import table
+
+from lofarpipe.support.pipelinelogging import CatchLog4CXX
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.parset import Parset, patch_parset, get_parset
+
+class cimager(LOFARnodeTCP):
+    #                 Handles running a single cimager process on a compute node
+    # --------------------------------------------------------------------------
+    def run(self, imager_exec, vds, parset, resultsdir, start_time, end_time):
+        #       imager_exec:                          path to cimager executable
+        #               vds:           VDS file describing the data to be imaged
+        #            parset:                                imager configuration
+        #        resultsdir:                         place resulting images here
+        #        start_time:                        )    time range to be imaged
+        #          end_time:                        )   in seconds (may be None)
+        # ----------------------------------------------------------------------
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % (vds,))
+
+            #    Bail out if destination exists (can thus resume a partial run).
+            #                                            Should be configurable?
+            # ------------------------------------------------------------------
+            parset_data = Parset(parset)
+            image_names = parset_data.getStringVector("Cimager.Images.Names")
+            for image_name in image_names:
+                outputfile = os.path.join(resultsdir, image_name + ".restored")
+                self.logger.info(outputfile)
+                if os.path.exists(outputfile):
+                    self.logger.info("Image already exists: aborting.")
+                    return 0
+            try:
+                working_dir = mkdtemp()
+
+                #   If a time range has been specified, copy that section of the
+                #                                  input MS and only image that.
+                # --------------------------------------------------------------
+                query = []
+                if start_time:
+                    self.logger.debug("Start time is %s" % start_time)
+                    start_time = quantity(float(start_time), 's')
+                    query.append("TIME > %f" % start_time.get('s').get_value())
+                if end_time:
+                    self.logger.debug("End time is %s" % end_time)
+                    end_time = quantity(float(end_time), 's')
+                    query.append("TIME < %f" % end_time.get('s').get_value())
+                query = " AND ".join(query)
+                if query:
+                    #                             Select relevant section of MS.
+                    # ----------------------------------------------------------
+                    self.logger.debug("Query is %s" % query)
+                    output = os.path.join(working_dir, "timeslice.MS")
+                    vds_parset = get_parset(vds)
+                    t = table(vds_parset.getString("FileName"))
+                    t.query(query, name=output)
+                    #       Patch updated information into imager configuration.
+                    # ----------------------------------------------------------
+                    parset = patch_parset(parset,
+                        {
+                            'Cimager.dataset': output
+                        }
+                    )
+                else:
+                    self.logger.debug("No time range selected")
+
+                self.logger.debug("Running cimager")
+                with CatchLog4CXX(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(vds)
+                ):
+                    cimager_process = Popen(
+                        [imager_exec, "-inputs", parset],
+                        stdout=PIPE, stderr=PIPE, cwd=working_dir
+                    )
+                    sout, serr = cimager_process.communicate()
+                log_process_output("cimager", sout, serr, self.logger)
+                if cimager_process.returncode != 0:
+                    raise CalledProcessError(
+                        cimager_process.returncode, imager_exec
+                    )
+
+                #        Dump the resulting images in the pipeline results area.
+                #    I'm not aware of a foolproof way to predict the image names
+                #                that will be produced, so we read them from the
+                #                      parset and add standard cimager prefixes.
+                # --------------------------------------------------------------
+                parset_data = Parset(parset)
+                image_names = parset_data.getStringVector("Cimager.Images.Names")
+                prefixes = [
+                    "image", "psf", "residual", "weights", "sensitivity"
+                ]
+                self.logger.debug("Copying images to %s" % resultsdir)
+                for image_name in image_names:
+                    for prefix in prefixes:
+                        filename = image_name.replace("image", prefix, 1)
+                        shutil.move(
+                            os.path.join(working_dir, filename),
+                            os.path.join(resultsdir, filename)
+                        )
+                    if parset_data.getBool('Cimager.restore'):
+                        shutil.move(
+                            os.path.join(working_dir, image_name + ".restored"),
+                            os.path.join(resultsdir, image_name + ".restored")
+                        )
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                shutil.rmtree(working_dir)
+                if query:
+                    #                     If we created a new parset, delete it.
+                    # ----------------------------------------------------------
+                    os.unlink(parset)
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(cimager(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py b/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbe77474176a7d5ea13a7ac54dc8dabf344c75d0
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py
@@ -0,0 +1,47 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                           count_timesteps node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+import os.path
+import sys
+
+from pyrap.tables import taql
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class count_timesteps(LOFARnodeTCP):
+    """
+    Return the first and last values in the TIME column.
+    """
+    def run(self, infile):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            try:
+                self.outputs['start_time'] = taql(
+                    "CALC MIN([SELECT TIME from %s])" % infile
+                )[0]
+                self.outputs['end_time'] = taql(
+                    "CALC MAX([SELECT TIME from %s])" % infile
+                )[0]
+            except Exception, e:
+                self.logger.error(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(count_timesteps(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py
new file mode 100644
index 0000000000000000000000000000000000000000..8478e836537d3b1d1c050236c2a7cdb40771a541
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py
@@ -0,0 +1,68 @@
+# Python standard library
+from __future__ import with_statement
+from contextlib import closing
+from subprocess import check_call, CalledProcessError
+from dateutil.parser import parse as parse_date
+from datetime import timedelta
+import os.path, tempfile, shutil, time
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import patch_parset, create_directory, log_time
+from pipeline.support.lofarexceptions import ExecutableMissing
+import pipeline.support.utilities as utilities
+
+CASA_DATE_FORMAT = "%Y/%m/%d/%H:%M:%S.000"
+
+class casapy_node(LOFARnode):
+    def run(self, infile, parset, start_time, end_time, increment):
+        # Time execution of this job
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % (infile,))
+
+            start_time = parse_date(start_time)
+            end_time   = parse_date(end_time)
+
+            self.logger.debug("Start time: %s, end time: %s" % (str(start_time), str(end_time)))
+            increment = timedelta(0, increment)
+
+            process_start = start_time
+            while process_start < end_time:
+                process_end = process_start + increment
+                if process_end > end_time:
+                    td = end_time - process_start
+                    self.logger.info(
+                        "Note: final image is %.3f seconds long" % (
+                            td.days * 86400 + td.seconds + td.microseconds / 1e6
+                        )
+                    )
+                    process_end = end_time
+                time_range = "\'%s~%s\'" % (
+                    process_start.strftime(CASA_DATE_FORMAT),
+                    process_end.strftime(CASA_DATE_FORMAT)
+                )
+                self.logger.debug("Now processing %s" % (time_range))
+
+                tmp_parset_filename = patch_parset(
+                    parset, {
+                        'Selection.timerange': time_range,
+                        'Images.name': '-' + str(int(time.mktime(process_start.timetuple()))),
+                        'dataset': infile
+                    }
+                )
+
+                try:
+                    result = check_call([
+                        os.path.expanduser('~rol/sw/bin/casapy'),
+                        tmp_parset_filename,
+                    ])
+                except CalledProcessError, e:
+                    self.logger.error(str(e))
+                    self.logger.error("Failed dataset was %s %s" % (infile, time_range))
+                    raise Exception
+                finally:
+                    # Clean up tempoerary files.
+                    os.unlink(tmp_parset_filename)
+
+                process_start += increment
+
+            return result
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a2a43cdf7324e929f46a4022e1a84cd1e8aadf7
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py
@@ -0,0 +1,17 @@
+from __future__ import with_statement
+import pyrap.tables
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import log_time
+
+class makecolumns_node(LOFARnode):
+    """
+    Add imaging columns to a given MS using pyrap.
+    """
+    def run(self, file):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (file))
+            try:
+                pyrap.tables.addImagingColumns(file)
+            except ValueError:
+                self.logger.debug('Add imaging columns failed: already exist?')
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..c165e51626d0d4497dc397c6424e440cd18a5f44
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py
@@ -0,0 +1,14 @@
+import subprocess
+from lofarpipe.support.lofarnode import LOFARnode
+
+class dummy_echo_parallel(LOFARnode):
+    def run(self, filename, executable):
+        self.logger.info("Processing %s" % (filename))
+        execute = [executable, filename]
+
+        my_process = subprocess.Popen(execute, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        sout, serr = my_process.communicate()
+        self.logger.info("stdout: " + sout)
+        self.logger.info("stderr: " + serr)
+
+        return filename
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d212e9bee656f306ea0a46928d1186630970c83
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py
@@ -0,0 +1,27 @@
+from __future__ import with_statement
+from pyrap.tables import table
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class excluder(LOFARnode):
+    """
+    Remove data from the given station from the input MS.
+    """
+    def run(self, input, output, *stations):
+        try:
+            t = table(input)
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
+        try:
+            a = table(t.getkeyword('ANTENNA').split()[1])
+            station_ids = [a.getcol('NAME').index(name) for name in stations]
+            selection = t.query(
+                "ANTENNA1 not in %s and ANTENNA2 not in %s" %
+                (str(station_ids), str(station_ids))
+            )
+            selection.copy(output, deep=True).close()
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5a6c49d2c6a72fc2f723049abef84f94aa8647b
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py
@@ -0,0 +1,31 @@
+from __future__ import with_statement
+from pyrap.tables import table
+import numpy
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class flagger(LOFARnode):
+    """
+    Flag out CORRECTED_DATA greater than some maximum value.
+    """
+    def run(self, input, output, max_value):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (input))
+            try:
+                t = table(input)
+                t2 = t.copy(output, deep=True)
+                t2.close()
+                t = table(output, readonly=False)
+            except Exception, e:
+                self.logger.error(str(e))
+                raise e
+            try:
+                for i, data in enumerate(t.getcol('CORRECTED_DATA')):
+                    if max([abs(val) for val in data[0]]) > max_value:
+                        t.putcell('FLAG', i, numpy.array([[True, True, True, True]]))
+                        t.putcell('FLAG_ROW', i, True)
+                t.close()
+            except Exception, e:
+                self.logger.error(str(e))
+                raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py
new file mode 100644
index 0000000000000000000000000000000000000000..da2c5f6ad73e1c23ee43b89b64e136462b05f42b
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py
@@ -0,0 +1,51 @@
+from __future__ import with_statement
+import os, imp, logging, errno
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import log_time
+
+class qcheck_node(LOFARnode):
+    """
+    Run quality check modules on an image.
+    """
+    def run(self, infile, pluginlist, outputdir):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (infile))
+
+            try:
+                os.makedirs(outputdir)
+            except OSError, failure:
+                if failure.errno != errno.EEXIST:
+                    raise
+
+            file_handler = logging.FileHandler(os.path.join(
+                    outputdir,
+                    os.path.basename(infile) + ".qcheck.log"
+                ),
+                mode='w'
+            )
+            file_handler.setFormatter(logging.Formatter("%(message)s"))
+            file_logger = logging.getLogger('main')
+            file_logger.addHandler(file_handler)
+            file_logger.setLevel(logging.INFO)
+            pipeline_logger = logging.getLogger(self.logger.name + "." + os.path.basename(infile))
+            pipeline_logger.setLevel(logging.WARN)
+
+            loggers = {'main': file_logger, 'warn': pipeline_logger}
+
+            for plugin in pluginlist:
+                try:
+                    qcheck = imp.load_source('qcheck', plugin)
+                except ImportError:
+                    self.logger.warn("Quality check module (%s) not found" % (plugin))
+                try:
+                    qcheck.run(infile, outputdir=outputdir, loggers=loggers)
+                except Exception, e:
+                    self.logger.warn("Quality check failed on %s" % (infile))
+                    self.logger.exception(str(e))
+
+            # Tidy up for the next image
+            file_handler.flush()
+            loggers['main'].remove_handler(file_handler)
+
+        return 0
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf87b51e9f75d3eeab8309eef21545e343771a3a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py
@@ -0,0 +1,59 @@
+# Python standard library
+from __future__ import with_statement
+from contextlib import closing
+from subprocess import check_call
+from tempfile import mkdtemp
+from ConfigParser import SafeConfigParser as ConfigParser
+from shutil import rmtree
+import os.path
+
+# Root directory for config file
+from pipeline import __path__ as config_path
+
+from tkp_lib.accessors import FitsFile
+from tkp_lib.image import ImageData
+from tkp_lib.database import connection
+
+from tkp_lib.dataset import DataSet
+
+def sextract(filename, dataset):
+    raise NotImplementedError
+    # Hack around MonetDB concurrency issues(!)
+    import time, random
+    time.sleep(random.randint(0,60))
+
+    try:
+        config = ConfigParser()
+        config.read("%s/pipeline.cfg" % (config_path[0],))
+        image2fits = config.get('sextractor', 'image2fits')
+
+        tempdir = mkdtemp(dir='/data/swinbank')
+        fitsfile = os.path.join(tempdir, os.path.basename(filename) + ".fits")
+
+        command_line = [image2fits, "in=%s" % (os.path.basename(filename)), "out=%s" % (fitsfile)]
+        cwd = os.path.dirname(filename)
+
+        check_call(
+            command_line,
+            cwd=os.path.dirname(filename),
+            close_fds=True
+        )
+
+        image = ImageData(FitsFile(fitsfile), dataset=dataset)
+    except Exception, inst:
+        return "ERROR: %s on %s, %s" % (str((type(inst))), platform.node(), fitsfile)
+
+    sr = image.sextract()
+    with closing(connection()) as con:
+        sr.savetoDB(con)
+    
+    rmtree(tempdir)
+    return "%s found %d sources" % (filename, len(sr))
+
+if __name__ == "__main__":
+    from sys import argv
+    dataset = DataSet("command line")
+    try:
+        sextract(argv[1], dataset)
+    except:
+        print "Usage: sextractor [filename]"
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py
new file mode 100644
index 0000000000000000000000000000000000000000..391a76ec89fb2986bdc057040263f5faba9a06e9
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py
@@ -0,0 +1,32 @@
+from __future__ import with_statement
+from pyrap.tables import table
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class trimmer(LOFARnode):
+    """
+    Remove data from the start and/or end of a MeasurementSet.
+    """
+    def run(self, input, output, start_seconds, end_seconds):
+        # Remove data from the start and/or end of a MeasurementSet.
+        copy_columns = ",".join([
+            'UVW', 'FLAG', 'FLAG_CATEGORY', 'WEIGHT', 'SIGMA', 'ANTENNA1',
+            'ANTENNA2', 'ARRAY_ID', 'DATA_DESC_ID', 'EXPOSURE', 'FEED1', 'FEED2',
+            'FIELD_ID', 'FLAG_ROW', 'INTERVAL', 'OBSERVATION_ID', 'PROCESSOR_ID',
+            'SCAN_NUMBER', 'STATE_ID', 'TIME', 'TIME_CENTROID', 'DATA',
+            'WEIGHT_SPECTRUM'
+        ])
+        try:
+            t = table(input)
+            selection = t.query(
+                "TIME > %.16f AND TIME < %.16f" % (
+                    t.getcol('TIME')[0] + float(start_seconds),
+                    t.getcol('TIME')[-1] - float(end_seconds)
+                ),
+                columns=copy_columns
+            )
+            selection.copy(output, deep=True)
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/dppp.py b/CEP/Pipeline/recipes/sip/nodes/dppp.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2f6fc27da64f2dd4c4d5d2fbbcbbf752b2ea36d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/dppp.py
@@ -0,0 +1,122 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                        DPPP (Data Pre-Procesing Pipeline) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import CalledProcessError
+from logging import getLogger
+import sys
+import os.path
+import tempfile
+import shutil
+
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.utilities import patch_parset
+from lofarpipe.support.utilities import read_initscript
+from lofarpipe.support.utilities import create_directory
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+
+class dppp(LOFARnodeTCP):
+    def run(
+        self, infile, outfile, parset, executable, initscript,
+        start_time, end_time, nthreads, clobber
+    ):
+        # Time execution of this job
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if clobber == "True":
+                self.logger.info("Checking for and removing previous output")
+                shutil.rmtree(outfile, ignore_errors=True)
+
+            self.logger.debug("Time interval: %s %s" % (start_time, end_time))
+
+            #                                             Initialise environment
+            #                 Limit number of threads used, per request from GvD
+            # ------------------------------------------------------------------
+            env = read_initscript(self.logger, initscript)
+            if nthreads == "None": nthreads = 1
+            self.logger.debug("Using %s threads for NDPPP" % nthreads)
+            env['OMP_NUM_THREADS'] = str(nthreads)
+
+            #    If the input and output filenames are the same, DPPP should not
+            #       write a new MS, but rather update the existing one in-place.
+            #              This is achieved by setting msout to an empty string.
+            # ------------------------------------------------------------------
+            if outfile == infile:
+                outfile = "\"\""
+            else:
+                create_directory(os.path.dirname(outfile))
+
+            #       Patch the parset with the correct input/output MS names and,
+            #                                   if available, start & end times.
+            #                            The uselogger option enables log4cplus.
+            # ------------------------------------------------------------------
+            patch_dictionary = {
+                'msin': infile,
+                'msout': outfile,
+                'uselogger': 'True'
+            }
+            if start_time and start_time != "None":
+                patch_dictionary['msin.starttime'] = start_time
+            if end_time and end_time != "None":
+                patch_dictionary['msin.endtime'] = end_time
+            try:
+                temp_parset_filename = patch_parset(parset, patch_dictionary)
+            except Exception, e:
+                self.logger.error(e)
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+
+                working_dir = tempfile.mkdtemp()
+                cmd = [executable, temp_parset_filename, '1']
+
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable),
+                ) as logger:
+                    #     Catch NDPPP segfaults (a regular occurance), and retry
+                    # ----------------------------------------------------------
+                    if outfile != infile:
+                        cleanup_fn = lambda : shutil.rmtree(outfile, ignore_errors=True)
+                    else:
+                        cleanup_fn = lambda : None
+                    catch_segfaults(
+                        cmd, working_dir, env, logger, cleanup=cleanup_fn
+                    )
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                #        CalledProcessError isn't properly propagated by IPython
+                # --------------------------------------------------------------
+                self.logger.error(str(e))
+                return 1
+            except Exception, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                os.unlink(temp_parset_filename)
+                shutil.rmtree(working_dir)
+
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(dppp(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py b/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py
new file mode 100644
index 0000000000000000000000000000000000000000..16ce3cd68423c379f63bc5549164d1e66dc868ac
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py
@@ -0,0 +1,78 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                             flag_baseline node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+from cPickle import load
+import os.path
+import sys
+
+from pyrap.tables import taql, table
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class flag_baseline(LOFARnodeTCP):
+    """
+    Completely flag a series of baselines in a MeasurementSet.
+    """
+    def run(self, infile, baseline_filename):
+        """
+        baseline_filename points to a file continaing a pickled array of
+        antenna pairs.
+        """
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if not os.path.exists(baseline_filename):
+                self.logger.error(
+                    "baseline file %s not found" % (baseline_filename)
+                )
+                return 1
+
+            with open(baseline_filename) as file:
+                baselines = load(file)
+
+            antenna1, antenna2 = [], []
+            for baseline in baselines:
+                ant1, ant2 = baseline.split("&")
+                antenna1.append(int(ant1))
+                antenna2.append(int(ant2))
+
+            cmd = "UPDATE %s SET FLAG=True WHERE any(ANTENNA1=%s and ANTENNA2=%s)" % \
+                (infile, str(antenna1), str(antenna2))
+            self.logger.info("Running TaQL: " + cmd)
+
+            try:
+                taql(cmd)
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+            # QUICK HACK: Also flag last timestep
+            t = table(infile)
+            maxtime = t.getcol('TIME').max()
+            t.close()
+            cmd = "UPDATE %s SET FLAG=True WHERE TIME=%f" % (infile, maxtime)
+            self.logger.info("Running TaQL: " + cmd)
+            try:
+                taql(cmd)
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(flag_baseline(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py b/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd5048caf48c4e4b0b5113795fc4d7e8082610bd
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py
@@ -0,0 +1,45 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                            make_flaggable node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+import os.path
+import sys
+import imp
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class make_flaggable(LOFARnodeTCP):
+    def run(self, infile, makeFLAGwritable):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if not os.path.exists(makeFLAGwritable):
+                self.logger.error(
+                    "file %s not found" % (makeFLAGwritable)
+                )
+                return 1
+
+            try:
+                mFw_module = imp.load_source('mFw_module', makeFLAGwritable)
+                mFw_module.makeFlagWritable(infile, '')
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(make_flaggable(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/parmdb.py b/CEP/Pipeline/recipes/sip/nodes/parmdb.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f65a80392b92b952489c1a891facffcb38e673a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/parmdb.py
@@ -0,0 +1,31 @@
+from __future__ import with_statement
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+import shutil, os.path
+import sys
+
+class parmdb(LOFARnodeTCP):
+    def run(self, infile, pdb):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            output = os.path.join(infile, os.path.basename(pdb))
+
+            # Remove any old parmdb database
+            shutil.rmtree(output, ignore_errors=True)
+
+            # And copy the new one into place
+            shutil.copytree(pdb, output)
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(parmdb(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/rficonsole.py b/CEP/Pipeline/recipes/sip/nodes/rficonsole.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9869abff65bbdb1049ff4244dc07d70d82163a0
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/rficonsole.py
@@ -0,0 +1,73 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                    rficonsole (AOflagger) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import CalledProcessError
+import sys
+import os.path
+import shutil
+import tempfile
+
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+
+class rficonsole(LOFARnodeTCP):
+    def run(self, executable, nthreads, strategy, indirect, skip_flagged, wd, *infiles):
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % " ".join(infiles))
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+
+                working_dir = tempfile.mkdtemp(dir=wd)
+                cmd = [executable, "-j", str(nthreads)]
+                if strategy:
+                    if os.path.exists(strategy):
+                        cmd.extend(["-strategy", strategy])
+                    else:
+                        raise Exception("Strategy definition not available")
+                if indirect:
+                    cmd.extend(["-indirect-read"])
+                if skip_flagged:
+                    cmd.extend(["-skip-flagged"])
+                cmd.extend(infiles)
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name,
+                    os.path.basename(executable)
+                ) as logger:
+                    catch_segfaults(cmd, working_dir, None, logger)
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            except Exception, e:
+                self.logger.exception(e)
+                return 1
+            finally:
+                # Try and clean up the working directory, but don't worry if
+                # it fails -- it might not have been greated before throwing
+                # the exception
+                try:
+                    shutil.rmtree(working_dir)
+                except:
+                    pass
+
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(rficonsole(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/sourcedb.py b/CEP/Pipeline/recipes/sip/nodes/sourcedb.py
new file mode 100644
index 0000000000000000000000000000000000000000..220fdb5f594016e55f4afe23a97cb79d3617ea6e
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/sourcedb.py
@@ -0,0 +1,52 @@
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+import shutil
+import os.path
+import tempfile
+import sys
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.utilities import catch_segfaults
+
+
+class sourcedb(LOFARnodeTCP):
+    def run(self, executable, infile, catalogue):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            output = os.path.join(infile, "sky")
+
+            # Remove any old sky database
+            shutil.rmtree(output, ignore_errors=True)
+
+            working_dir = tempfile.mkdtemp()
+            try:
+                cmd = [executable, "format=<", "in=%s" % (catalogue), "out=%s" % (output)]
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable)
+                ) as logger:
+                    catch_segfaults(cmd, working_dir, None, logger)
+            except CalledProcessError, e:
+                # For CalledProcessError isn't properly propagated by IPython
+                # Temporary workaround...
+                self.logger.error(str(e))
+                return 1
+            finally:
+                shutil.rmtree(working_dir)
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(sourcedb(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py b/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py
new file mode 100644
index 0000000000000000000000000000000000000000..2152b1c414fa4bb5ff30a1013849a1d82ba1462c
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py
@@ -0,0 +1,50 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                  vdsmaker node
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+import os
+import sys
+
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+from lofarpipe.support.utilities import create_directory, log_time
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class vdsmaker(LOFARnodeTCP):
+    """
+    Make a VDS file for the input MS in a specificed location.
+    """
+    def run(self, infile, clusterdesc, outfile, executable):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+                cmd = [executable, clusterdesc, infile, outfile]
+                result = catch_segfaults(cmd, None, None, self.logger).returncode
+                self.outputs["result"] = result
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                # For CalledProcessError isn't properly propagated by IPython
+                # Temporary workaround...
+                self.logger.error(str(e))
+                return 1
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(vdsmaker(jobid, jobhost, jobport).run_with_stored_arguments())