From 4d303b0aaaf4e18d2e543d27adb283a0e510cd0a Mon Sep 17 00:00:00 2001
From: Marcel Loose <loose@astron.nl>
Date: Fri, 18 Feb 2011 16:53:25 +0000
Subject: [PATCH] Bug 1626: Import of Pipeline Framework version 1.0 from USG
 repository.

---
 .gitattributes                                |  48 ++
 CEP/Pipeline/deploy/fabfile.py                |  62 ++
 CEP/Pipeline/deploy/ipcontroller.sh           |  31 +
 CEP/Pipeline/deploy/ipengine.sh               |  69 ++
 CEP/Pipeline/deploy/start_cluster.py          |  36 +
 CEP/Pipeline/deploy/stop_cluster.py           |  33 +
 .../examples/definition/dummy/pipeline.cfg    |  13 +
 .../examples/definition/dummy/pipeline.py     |  12 +
 .../docs/examples/definition/dummy/tasks.cfg  |   3 +
 .../definition/dummy_parallel/pipeline.cfg    |  25 +
 .../definition/dummy_parallel/pipeline.py     |  12 +
 .../definition/dummy_parallel/tasks.cfg       |   3 +
 .../definition/sip2/parsets/mwimager.parset   |  24 +
 .../sip2/parsets/ndppp.1.initial.parset       |  31 +
 .../sip2/parsets/ndppp.1.postbbs.parset       |  26 +
 .../sip2/parsets/uv-plane-cal-beam.parset     |  37 +
 .../examples/definition/sip2/pipeline.cfg     |  28 +
 .../docs/examples/definition/sip2/sip.py      | 177 +++++
 .../docs/examples/definition/sip2/tasks.cfg   |  57 ++
 .../examples/definition/sip2/to_process.py    | 247 ++++++
 .../docs/examples/model_parsets/README        |   4 +
 .../docs/examples/model_parsets/bbs.parset    |  49 ++
 .../docs/examples/model_parsets/bbs.skymodel  |   2 +
 .../docs/examples/model_parsets/casapy.parset |  29 +
 .../docs/examples/model_parsets/dppp1.parset  |  18 +
 .../docs/examples/model_parsets/dppp2.parset  |  18 +
 .../docs/examples/model_parsets/dppp3.parset  |  18 +
 .../docs/examples/model_parsets/dppp4.parset  |  18 +
 .../examples/model_parsets/mwimager.parset    |  25 +
 .../model_parsets/mwimager.pulsar.parset      |  25 +
 .../docs/examples/model_parsets/ndppp.parset  |  27 +
 CEP/Pipeline/docs/notes/2010-11-15-grid.rst   | 140 ++++
 .../notes/2010-12-08-handover_discussion.rst  | 100 +++
 CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py   |  26 +
 CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py  |  34 +
 CEP/Pipeline/docs/pulsar_demo/3-logging.py    |  47 ++
 CEP/Pipeline/docs/pulsar_demo/4-helpers.py    |  54 ++
 CEP/Pipeline/docs/pulsar_demo/5-recipe.py     |  12 +
 .../docs/pulsar_demo/6-remotecommand.py       |  52 ++
 .../docs/pulsar_demo/7-ingredients.py         |  68 ++
 .../docs/pulsar_demo/8-ingredients2.py        |  78 ++
 CEP/Pipeline/docs/pulsar_demo/9-config.py     |  68 ++
 CEP/Pipeline/docs/pulsar_demo/intro.rst       | 113 +++
 CEP/Pipeline/docs/sphinx/Makefile             |  75 ++
 .../docs/sphinx/source/.static/lofar.ico      | Bin 0 -> 4286 bytes
 .../docs/sphinx/source/author/index.rst       | 731 ++++++++++++++++++
 CEP/Pipeline/docs/sphinx/source/conf.py       | 243 ++++++
 .../docs/sphinx/source/developer/index.rst    |  88 +++
 .../sphinx/source/developer/lofarpipe.rst     |  19 +
 .../source/developer/lofarpipe/cuisine.rst    |  62 ++
 .../source/developer/lofarpipe/support.rst    |  19 +
 .../lofarpipe/support/distribution.rst        |  70 ++
 .../lofarpipe/support/ingredients.rst         |  53 ++
 .../developer/lofarpipe/support/logging.rst   | 112 +++
 .../developer/lofarpipe/support/recipes.rst   |  49 ++
 .../developer/lofarpipe/support/utility.rst   |  98 +++
 .../source/developer/lofarpipe/tests.rst      |  17 +
 .../developer/lofarpipe/tests/ingredients.rst |   6 +
 .../docs/sphinx/source/developer/todo.rst     |  61 ++
 CEP/Pipeline/docs/sphinx/source/index.rst     |  92 +++
 CEP/Pipeline/docs/sphinx/source/logo.jpg      | Bin 0 -> 12323 bytes
 .../source/overview/dependencies/index.rst    |  72 ++
 .../sphinx/source/overview/overview/index.rst |  91 +++
 .../source/overview/pipeline-flowchart.odg    | Bin 0 -> 15891 bytes
 .../source/overview/pipeline-flowchart.png    | Bin 0 -> 85797 bytes
 .../sphinx/source/pipelines/sip/index.rst     |  24 +
 .../source/pipelines/sip/quickstart/index.rst | 210 +++++
 .../source/pipelines/sip/recipes/bbs.rst      |   8 +
 .../source/pipelines/sip/recipes/cimager.rst  |  11 +
 .../pipelines/sip/recipes/datamapper.rst      |   8 +
 .../source/pipelines/sip/recipes/dppp.rst     |   8 +
 .../source/pipelines/sip/recipes/index.rst    |  23 +
 .../source/pipelines/sip/recipes/parmdb.rst   |   8 +
 .../pipelines/sip/recipes/rficonsole.rst      |   8 +
 .../source/pipelines/sip/recipes/sip.rst      |  42 +
 .../source/pipelines/sip/recipes/sourcedb.rst |   8 +
 .../pipelines/sip/recipes/storagemapper.rst   |   8 +
 .../source/pipelines/sip/recipes/vdsmaker.rst |   6 +
 .../pipelines/sip/recipes/vdsreader.rst       |   8 +
 CEP/Pipeline/docs/sphinx/source/todo.rst      |   7 +
 .../sphinx/source/user/installation/index.rst |  15 +
 .../source/user/installation/installation.rst |  69 ++
 .../source/user/installation/quickstart.rst   | 155 ++++
 .../source/user/usage/configuration.rst       | 115 +++
 .../docs/sphinx/source/user/usage/index.rst   |  17 +
 .../docs/sphinx/source/user/usage/jobs.rst    |   7 +
 .../docs/sphinx/source/user/usage/layout.rst  |  51 ++
 .../docs/sphinx/source/user/usage/running.rst | 108 +++
 .../docs/sphinx/source/user/usage/tasks.rst   |  43 ++
 CEP/Pipeline/framework/lofarpipe/__init__.py  |   0
 .../framework/lofarpipe/cuisine/COPYING       | 340 ++++++++
 .../framework/lofarpipe/cuisine/README        |   8 +
 .../framework/lofarpipe/cuisine/WSRTrecipe.py | 366 +++++++++
 .../framework/lofarpipe/cuisine/__init__.py   |   0
 .../framework/lofarpipe/cuisine/cook.py       | 264 +++++++
 .../framework/lofarpipe/cuisine/files.py      |  19 +
 .../framework/lofarpipe/cuisine/ingredient.py |   7 +
 .../framework/lofarpipe/cuisine/job_parser.py |  68 ++
 .../framework/lofarpipe/cuisine/message.py    |  78 ++
 .../framework/lofarpipe/cuisine/parset.py     |  87 +++
 .../framework/lofarpipe/cuisine/pipeline.py   | 324 ++++++++
 .../cuisine/pipeline_manager_config.py        |   2 +
 .../framework/lofarpipe/support/__init__.py   |   0
 .../framework/lofarpipe/support/baserecipe.py | 231 ++++++
 .../lofarpipe/support/clusterdesc.py          |  78 ++
 .../lofarpipe/support/clusterhandler.py       | 127 +++
 .../lofarpipe/support/clusterlogger.py        |  38 +
 .../framework/lofarpipe/support/control.py    |  45 ++
 .../framework/lofarpipe/support/group_data.py |  99 +++
 .../framework/lofarpipe/support/ipython.py    |  56 ++
 .../framework/lofarpipe/support/jobserver.py  | 168 ++++
 .../lofarpipe/support/lofarexceptions.py      |  28 +
 .../lofarpipe/support/lofaringredient.py      | 353 +++++++++
 .../framework/lofarpipe/support/lofarnode.py  | 119 +++
 .../lofarpipe/support/lofarrecipe.py          |  13 +
 .../framework/lofarpipe/support/mac.py        | 202 +++++
 .../framework/lofarpipe/support/parset.py     | 114 +++
 .../lofarpipe/support/pipelinelogging.py      | 269 +++++++
 .../lofarpipe/support/remotecommand.py        | 307 ++++++++
 .../framework/lofarpipe/support/stateful.py   |  96 +++
 .../framework/lofarpipe/support/utilities.py  | 231 ++++++
 .../framework/lofarpipe/tests/__init__.py     |   0
 .../lofarpipe/tests/lofaringredient.py        | 216 ++++++
 CEP/Pipeline/framework/setup.py               |  16 +
 CEP/Pipeline/mac/Makefile                     |  45 ++
 CEP/Pipeline/mac/README                       |   1 +
 CEP/Pipeline/mac/ep/__init__.py               |   0
 CEP/Pipeline/mac/ep/control/__init__.py       |  69 ++
 CEP/Pipeline/mac/ep/echo/__init__.py          |  17 +
 CEP/Pipeline/mac/ep/interface/__init__.py     |  22 +
 CEP/Pipeline/mac/include/controlwrappers.h    | 313 ++++++++
 CEP/Pipeline/mac/include/echowrappers.h       |  46 ++
 CEP/Pipeline/mac/include/ep_interface.h       |  42 +
 CEP/Pipeline/mac/include/eventwrappers.h      |  32 +
 CEP/Pipeline/mac/include/lofar_config.h       | 167 ++++
 .../mac/mac_wrapper/pipeline_wrapper.py       |  63 ++
 CEP/Pipeline/mac/src/ep_control.cc            | 131 ++++
 CEP/Pipeline/mac/src/ep_echo.cc               |  24 +
 CEP/Pipeline/mac/src/ep_interface.cc          |  17 +
 CEP/Pipeline/mac/src/eventwrappers.cc         |  53 ++
 CEP/Pipeline/mac/test_ep.py                   |  97 +++
 .../recipes/examples/master/example.py        |  60 ++
 .../examples/master/example_parallel.py       |  25 +
 .../examples/nodes/example_parallel.py        |  20 +
 CEP/Pipeline/recipes/sip/master/bbs.py        | 415 ++++++++++
 CEP/Pipeline/recipes/sip/master/cimager.py    | 310 ++++++++
 .../recipes/sip/master/count_timesteps.py     |  62 ++
 CEP/Pipeline/recipes/sip/master/datamapper.py |  81 ++
 .../recipes/sip/master/deprecated/casapy.py   | 186 +++++
 .../sip/master/deprecated/collector.py        | 140 ++++
 .../recipes/sip/master/deprecated/colmaker.py |  67 ++
 .../recipes/sip/master/deprecated/copier.py   |  50 ++
 .../recipes/sip/master/deprecated/dppp.py     | 151 ++++
 .../master/deprecated/dummy_echo_parallel.py  |  63 ++
 .../recipes/sip/master/deprecated/excluder.py |  18 +
 .../recipes/sip/master/deprecated/flagger.py  |  68 ++
 .../recipes/sip/master/deprecated/mwimager.py | 214 +++++
 .../sip/master/deprecated/pyraprunner.py      |  98 +++
 .../recipes/sip/master/deprecated/qcheck.py   |  91 +++
 .../sip/master/deprecated/qcheck/README       |   4 +
 .../sip/master/deprecated/qcheck/__init__.py  |   0
 .../sip/master/deprecated/qcheck/qcheck.py    | 200 +++++
 .../sip/master/deprecated/sextractor.py       |  92 +++
 .../sip/master/deprecated/simple_se.py        | 122 +++
 .../recipes/sip/master/deprecated/trimmer.py  |  27 +
 .../recipes/sip/master/deprecated/vdsmaker.py | 175 +++++
 .../recipes/sip/master/flag_baseline.py       |  87 +++
 .../recipes/sip/master/make_flaggable.py      |  68 ++
 CEP/Pipeline/recipes/sip/master/new_dppp.py   | 172 +++++
 .../recipes/sip/master/new_vdsmaker.py        | 140 ++++
 CEP/Pipeline/recipes/sip/master/parmdb.py     | 115 +++
 CEP/Pipeline/recipes/sip/master/rficonsole.py | 127 +++
 CEP/Pipeline/recipes/sip/master/skymodel.py   | 198 +++++
 CEP/Pipeline/recipes/sip/master/sourcedb.py   |  80 ++
 .../recipes/sip/master/storagemapper.py       |  63 ++
 CEP/Pipeline/recipes/sip/master/vdsreader.py  |  69 ++
 CEP/Pipeline/recipes/sip/nodes/bbs.py         | 101 +++
 CEP/Pipeline/recipes/sip/nodes/cimager.py     | 138 ++++
 .../recipes/sip/nodes/count_timesteps.py      |  47 ++
 .../recipes/sip/nodes/deprecated/casapy.py    |  68 ++
 .../recipes/sip/nodes/deprecated/colmaker.py  |  17 +
 .../nodes/deprecated/dummy_echo_parallel.py   |  14 +
 .../recipes/sip/nodes/deprecated/excluder.py  |  27 +
 .../recipes/sip/nodes/deprecated/flagger.py   |  31 +
 .../recipes/sip/nodes/deprecated/qcheck.py    |  51 ++
 .../sip/nodes/deprecated/sextractor.py        |  59 ++
 .../recipes/sip/nodes/deprecated/trimmer.py   |  32 +
 CEP/Pipeline/recipes/sip/nodes/dppp.py        | 122 +++
 .../recipes/sip/nodes/flag_baseline.py        |  78 ++
 .../recipes/sip/nodes/make_flaggable.py       |  45 ++
 CEP/Pipeline/recipes/sip/nodes/parmdb.py      |  31 +
 CEP/Pipeline/recipes/sip/nodes/rficonsole.py  |  73 ++
 CEP/Pipeline/recipes/sip/nodes/sourcedb.py    |  52 ++
 CEP/Pipeline/recipes/sip/nodes/vdsmaker.py    |  50 ++
 194 files changed, 15326 insertions(+)
 create mode 100644 CEP/Pipeline/deploy/fabfile.py
 create mode 100755 CEP/Pipeline/deploy/ipcontroller.sh
 create mode 100755 CEP/Pipeline/deploy/ipengine.sh
 create mode 100755 CEP/Pipeline/deploy/start_cluster.py
 create mode 100755 CEP/Pipeline/deploy/stop_cluster.py
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy/pipeline.py
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py
 create mode 100644 CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/sip.py
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg
 create mode 100644 CEP/Pipeline/docs/examples/definition/sip2/to_process.py
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/README
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/bbs.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/casapy.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/dppp1.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/dppp2.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/dppp3.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/dppp4.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/mwimager.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset
 create mode 100644 CEP/Pipeline/docs/examples/model_parsets/ndppp.parset
 create mode 100644 CEP/Pipeline/docs/notes/2010-11-15-grid.rst
 create mode 100644 CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/3-logging.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/4-helpers.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/5-recipe.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/7-ingredients.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/9-config.py
 create mode 100644 CEP/Pipeline/docs/pulsar_demo/intro.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/Makefile
 create mode 100644 CEP/Pipeline/docs/sphinx/source/.static/lofar.ico
 create mode 100644 CEP/Pipeline/docs/sphinx/source/author/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/conf.py
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/developer/todo.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/logo.jpg
 create mode 100644 CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg
 create mode 100644 CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/todo.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/installation/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/index.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/running.rst
 create mode 100644 CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst
 create mode 100644 CEP/Pipeline/framework/lofarpipe/__init__.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/COPYING
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/README
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/__init__.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/cook.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/files.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/message.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/parset.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/__init__.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/baserecipe.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/control.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/group_data.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/ipython.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/jobserver.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/lofarnode.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/mac.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/parset.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/remotecommand.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/stateful.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/support/utilities.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/tests/__init__.py
 create mode 100644 CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py
 create mode 100644 CEP/Pipeline/framework/setup.py
 create mode 100644 CEP/Pipeline/mac/Makefile
 create mode 100644 CEP/Pipeline/mac/README
 create mode 100644 CEP/Pipeline/mac/ep/__init__.py
 create mode 100644 CEP/Pipeline/mac/ep/control/__init__.py
 create mode 100644 CEP/Pipeline/mac/ep/echo/__init__.py
 create mode 100644 CEP/Pipeline/mac/ep/interface/__init__.py
 create mode 100644 CEP/Pipeline/mac/include/controlwrappers.h
 create mode 100644 CEP/Pipeline/mac/include/echowrappers.h
 create mode 100644 CEP/Pipeline/mac/include/ep_interface.h
 create mode 100644 CEP/Pipeline/mac/include/eventwrappers.h
 create mode 100644 CEP/Pipeline/mac/include/lofar_config.h
 create mode 100644 CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py
 create mode 100644 CEP/Pipeline/mac/src/ep_control.cc
 create mode 100644 CEP/Pipeline/mac/src/ep_echo.cc
 create mode 100644 CEP/Pipeline/mac/src/ep_interface.cc
 create mode 100644 CEP/Pipeline/mac/src/eventwrappers.cc
 create mode 100644 CEP/Pipeline/mac/test_ep.py
 create mode 100644 CEP/Pipeline/recipes/examples/master/example.py
 create mode 100644 CEP/Pipeline/recipes/examples/master/example_parallel.py
 create mode 100644 CEP/Pipeline/recipes/examples/nodes/example_parallel.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/bbs.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/cimager.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/count_timesteps.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/datamapper.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/casapy.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/collector.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/copier.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/dppp.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/excluder.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/flagger.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/qcheck/__init__.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/flag_baseline.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/make_flaggable.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/new_dppp.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/new_vdsmaker.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/parmdb.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/rficonsole.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/skymodel.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/sourcedb.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/storagemapper.py
 create mode 100644 CEP/Pipeline/recipes/sip/master/vdsreader.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/bbs.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/cimager.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/count_timesteps.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/dppp.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/flag_baseline.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/make_flaggable.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/parmdb.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/rficonsole.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/sourcedb.py
 create mode 100644 CEP/Pipeline/recipes/sip/nodes/vdsmaker.py

diff --git a/.gitattributes b/.gitattributes
index d4f4e455aaf..8634b2ef641 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -572,6 +572,54 @@ CEP/ParmDB/test/tsetupsourcedb.in_gds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms0.vds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms1.vds -text
 CEP/ParmDB/test/tsetupsourcedb.in_ms2.vds -text
+CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel -text
+CEP/Pipeline/docs/notes/2010-11-15-grid.rst -text
+CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst -text
+CEP/Pipeline/docs/pulsar_demo/intro.rst -text
+CEP/Pipeline/docs/sphinx/source/.static/lofar.ico -text
+CEP/Pipeline/docs/sphinx/source/author/index.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/index.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst -text
+CEP/Pipeline/docs/sphinx/source/developer/todo.rst -text
+CEP/Pipeline/docs/sphinx/source/index.rst -text
+CEP/Pipeline/docs/sphinx/source/logo.jpg -text svneol=unset#image/jpeg
+CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst -text
+CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst -text
+CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg -text
+CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png -text svneol=unset#image/png
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst -text
+CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst -text
+CEP/Pipeline/docs/sphinx/source/todo.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/index.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst -text
+CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/index.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/running.rst -text
+CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst -text
 CMake/FindAskapSoft.cmake -text
 CMake/FindJNI.cmake -text
 CMake/FindValgrind.cmake -text
diff --git a/CEP/Pipeline/deploy/fabfile.py b/CEP/Pipeline/deploy/fabfile.py
new file mode 100644
index 00000000000..a9292ca727d
--- /dev/null
+++ b/CEP/Pipeline/deploy/fabfile.py
@@ -0,0 +1,62 @@
+from fabric.api import env, hosts, run, put, get, require
+from lofarpipe.support.clusterdesc import ClusterDesc
+from lofarpipe.support.clusterdesc import get_compute_nodes, get_head_node
+import os.path
+
+from ConfigParser import SafeConfigParser as ConfigParser
+
+# Support function
+def _get_config(filename):
+    if not os.path.isfile(filename):
+        raise IOError, "Configuration file not found"
+    config = ConfigParser()
+    config.read(filename)
+    return config
+
+# These functions actually do the work
+def head_node(configfile='~/.pipeline.cfg'):
+    clusterdesc = ClusterDesc(
+        _get_config(
+            os.path.expanduser(configfile)
+        ).get('cluster', 'clusterdesc')
+    )
+    env.hosts = get_head_node(clusterdesc)
+
+def compute_nodes(configfile='~/.pipeline.cfg'):
+    clusterdesc = ClusterDesc(
+        _get_config(
+            os.path.expanduser(configfile)
+        ).get('cluster', 'clusterdesc')
+    )
+    env.hosts = get_compute_nodes(clusterdesc)
+
+def start_controller(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[head_node])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    controller_ppath = _get_config(configfile).get('deploy', 'controller_ppath')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipcontroller.sh %s start %s" % (script_path, controlpath, controller_ppath), shell=False)
+
+def stop_controller(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[head_node])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipcontroller.sh %s stop" % (script_path, controlpath), shell=False)
+
+def start_engine(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[compute_nodes])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    engine_ppath = _get_config(configfile).get('deploy', 'engine_ppath')
+    engine_lpath = _get_config(configfile).get('deploy', 'engine_lpath')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipengine.sh %s start %s %s" % (script_path, controlpath, engine_ppath, engine_lpath), shell=False)
+
+def stop_engine(configfile='~/.pipeline.cfg'):
+    configfile = os.path.expanduser(configfile)
+    require('hosts', provided_by=[compute_nodes])
+    controlpath = _get_config(configfile).get('DEFAULT', 'runtime_directory')
+    script_path = _get_config(configfile).get('deploy', 'script_path')
+    run("bash %s/ipengine.sh %s stop" % (script_path, controlpath), shell=False)
diff --git a/CEP/Pipeline/deploy/ipcontroller.sh b/CEP/Pipeline/deploy/ipcontroller.sh
new file mode 100755
index 00000000000..2ef38f548e1
--- /dev/null
+++ b/CEP/Pipeline/deploy/ipcontroller.sh
@@ -0,0 +1,31 @@
+CONTROLPATH=$1
+export PYTHONPATH=$3
+
+mkdir -p $CONTROLPATH
+
+PIDFILE=$CONTROLPATH/ipc.pid
+
+case "$2" in
+  start) 
+         if [ ! -f $PIDFILE ]; then
+             /sbin/start-stop-daemon --start -b -m --pidfile $PIDFILE \
+               --exec /opt/pipeline/dependencies/bin/ipcontroller -- -xy \
+               --engine-furl-file=$CONTROLPATH/engine.furl \
+               --task-furl-file=$CONTROLPATH/task.furl \
+               --multiengine-furl-file=$CONTROLPATH/multiengine.furl \
+               --logfile=$CONTROLPATH/ipc.log
+         else
+            echo "ipcontroller already running on `hostname`"
+         fi
+         ;;
+  stop)
+         /sbin/start-stop-daemon --stop --pidfile $PIDFILE
+         rm $PIDFILE
+         ;;
+  *)
+         echo "Usage: $0 <controlpath> {start|stop}" >&2
+         exit 1
+         ;;
+esac
+
+exit 0
diff --git a/CEP/Pipeline/deploy/ipengine.sh b/CEP/Pipeline/deploy/ipengine.sh
new file mode 100755
index 00000000000..21889095d49
--- /dev/null
+++ b/CEP/Pipeline/deploy/ipengine.sh
@@ -0,0 +1,69 @@
+CONTROLPATH=$1
+export PYTHONPATH=$3
+export LD_LIBRARY_PATH=$4
+FURL=$5
+NPROC=$6
+PIDPATH=$CONTROLPATH/engines/`hostname`
+
+mkdir -p $PIDPATH
+
+case "$2" in
+  start) 
+         if [ $FURL ]; then
+             FURLFILE=`mktemp`
+             TMPFURL=1
+             echo $FURL > $FURLFILE
+         else
+             FURLFILE=$CONTROLPATH/engine.furl
+         fi
+         if [ $NPROC ] && [ $NPROC -eq $NPROC ]; then
+            NPROC=$NPROC
+         else
+            NPROC=`cat /proc/cpuinfo | grep ^processor | wc -l`
+         fi
+         for PROC in `seq 1 $NPROC`
+            do
+                 PIDFILE=$PIDPATH/ipengine$PROC.pid
+                 if [ ! -f $PIDFILE ]; then
+                     /sbin/start-stop-daemon --start -b -m               \
+                       --pidfile $PIDFILE                                \
+                       --exec /opt/pipeline/dependencies/bin/ipengine -- \
+                       --furl-file=$FURLFILE --logfile=$PIDPATH/log
+                     start_time=`date +%s`
+                     while :
+                        do
+                            sleep 0.1
+                            PID=`cat $PIDFILE 2> /dev/null`
+                            grep "engine registration succeeded" $PIDPATH/log$PID.log > /dev/null 2>&1
+                            if [ $? -eq 0 ]; then
+                                break
+                            fi
+                            # Time out after 30 seconds
+                            end_time=`date +%s`
+                            if [ $(($end_time-$start_time)) -ge 30 ]; then
+                                break
+                            fi
+                        done
+#                     ps -p `cat $PIDFILE` > /dev/null || echo "ipengine failed to start on `hostname`"
+                else
+                    echo "ipengine already running on `hostname`"
+                fi
+            done
+         if [ $TMPFURL ]; then
+             rm $FURLFILE
+         fi
+         ;;
+  stop)
+         for PIDFILE in $PIDPATH/ipengine*.pid
+             do
+                 /sbin/start-stop-daemon --stop --pidfile $PIDFILE --oknodo
+                 rm $PIDFILE
+             done
+         ;;
+  *)
+         echo "Usage: $0 <controlpath> {start|stop}" >&2
+         exit 1
+         ;;
+esac
+
+exit 0
diff --git a/CEP/Pipeline/deploy/start_cluster.py b/CEP/Pipeline/deploy/start_cluster.py
new file mode 100755
index 00000000000..159abaf7833
--- /dev/null
+++ b/CEP/Pipeline/deploy/start_cluster.py
@@ -0,0 +1,36 @@
+#!/usr/bin/python
+
+"""
+Start IPython cluster.
+"""
+
+import sys, logging, os
+from optparse import OptionParser
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofarpipe.support.clusterhandler import ClusterHandler
+
+parser = OptionParser()
+parser.add_option(
+    "--config", help="Pipeline configuration file", default="~/.pipeline.cfg"
+)
+parser.add_option(
+    "--num-engines", help="Number of engines per node", default=8
+)
+options, args = parser.parse_args()
+
+my_logger = logging.getLogger()
+stream_handler = logging.StreamHandler(sys.stdout)
+formatter = logging.Formatter(
+    "%(asctime)s %(levelname)-7s: %(message)s",
+    "%Y-%m-%d %H:%M:%S"
+)
+stream_handler.setFormatter(formatter)
+my_logger.addHandler(stream_handler)
+my_logger.setLevel(logging.DEBUG)
+
+config = ConfigParser()
+config.read(os.path.expanduser(options.config))
+
+clusterhandler = ClusterHandler(config)
+clusterhandler.start_cluster(options.num_engines)
+
diff --git a/CEP/Pipeline/deploy/stop_cluster.py b/CEP/Pipeline/deploy/stop_cluster.py
new file mode 100755
index 00000000000..35156167e67
--- /dev/null
+++ b/CEP/Pipeline/deploy/stop_cluster.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+
+"""
+Stop IPython cluster.
+"""
+
+import sys, logging, os
+from optparse import OptionParser
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofarpipe.support.clusterhandler import ClusterHandler
+
+parser = OptionParser()
+parser.add_option(
+    "--config", help="Pipeline configuration file", default="~/.pipeline.cfg"
+)
+options, args = parser.parse_args()
+
+my_logger = logging.getLogger()
+stream_handler = logging.StreamHandler(sys.stdout)
+formatter = logging.Formatter(
+    "%(asctime)s %(levelname)-7s: %(message)s",
+    "%Y-%m-%d %H:%M:%S"
+)
+stream_handler.setFormatter(formatter)
+my_logger.addHandler(stream_handler)
+my_logger.setLevel(logging.DEBUG)
+
+config = ConfigParser()
+config.read(os.path.expanduser(options.config))
+
+clusterhandler = ClusterHandler(config)
+clusterhandler.stop_cluster()
+
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg
new file mode 100644
index 00000000000..754dc685c7c
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.cfg
@@ -0,0 +1,13 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime
+recipe_directories = [/opt/pipeline/recipes/]
+lofarroot =  /opt/LofIm/daily/lofar
+default_working_directory = /data/scratch/swinbank
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+log_directory = %(job_directory)s/logs/%(start_time)s
+vds_directory = %(job_directory)s/vds
+parset_directory = %(job_directory)s/parsets
+results_directory = %(job_directory)s/results/%(start_time)s
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py
new file mode 100644
index 00000000000..2b667bd2c60
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/pipeline.py
@@ -0,0 +1,12 @@
+"""
+This is a very simple pipeline definition for demonstration purposes only.
+"""
+import sys
+from lofarpipe.support.control import control
+
+class demo(control):
+    def pipeline_logic(self):
+        self.run_task("dummy_echo", self.inputs['args'])
+
+if __name__ == '__main__':
+    sys.exit(demo().main())
diff --git a/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg b/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg
new file mode 100644
index 00000000000..32dbceeed8b
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy/tasks.cfg
@@ -0,0 +1,3 @@
+[dummy_echo]
+recipe = dummy_echo
+
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg
new file mode 100644
index 00000000000..a788b895737
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.cfg
@@ -0,0 +1,25 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime
+recipe_directories = [/opt/pipeline/recipes/]
+lofarroot =  /opt/LofIm/daily/lofar
+default_working_directory = /data/scratch/swinbank
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+log_directory = %(job_directory)s/logs/%(start_time)s
+vds_directory = %(job_directory)s/vds
+parset_directory = %(job_directory)s/parsets
+results_directory = %(job_directory)s/results/%(start_time)s
+
+[cluster]
+clustername = imaging
+clusterdesc = %(runtime_directory)s/sub3.clusterdesc
+task_furl = %(runtime_directory)s/task.furl
+multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+[deploy]
+script_path = /opt/pipeline/framework/bin
+controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages
+engine_lpath = /opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py
new file mode 100644
index 00000000000..2b667bd2c60
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/pipeline.py
@@ -0,0 +1,12 @@
+"""
+This is a very simple pipeline definition for demonstration purposes only.
+"""
+import sys
+from lofarpipe.support.control import control
+
+class demo(control):
+    def pipeline_logic(self):
+        self.run_task("dummy_echo", self.inputs['args'])
+
+if __name__ == '__main__':
+    sys.exit(demo().main())
diff --git a/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg b/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg
new file mode 100644
index 00000000000..1364e50e152
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/dummy_parallel/tasks.cfg
@@ -0,0 +1,3 @@
+[dummy_echo]
+recipe = dummy_echo_parallel
+
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset
new file mode 100644
index 00000000000..c1ced0e8374
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/mwimager.parset
@@ -0,0 +1,24 @@
+Images.stokes = [I]
+Images.shape = [1024, 1024]
+Images.cellSize = [60, 60]
+Images.directionType = J2000
+Solver.type = Dirty
+Solver.algorithm = MultiScale
+Solver.niter = 1
+Solver.gain = 1.0
+Solver.verbose = True
+Solver.scales = [0, 3]
+Gridder.type = WProject
+Gridder.wmax = 1000
+Gridder.nwplanes = 40
+Gridder.oversample = 1
+Gridder.maxsupport = 4096
+Gridder.limitsupport = 0
+Gridder.cutoff = 0.001
+Gridder.nfacets = 1
+datacolumn = CORRECTED_DATA
+minUV = 1.0
+ncycles = 0
+restore = True
+restore_beam = [1, 1, 0]
+
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset
new file mode 100644
index 00000000000..3de6ef6b6ac
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.initial.parset
@@ -0,0 +1,31 @@
+uselogger = True
+msin.startchan = 8
+msin.nchan = 240
+msin.datacolumn = DATA     # is the default
+msout.datacolumn = DATA    # is the default
+
+steps = [preflag,flag1,avg1,flag2,avg2,count]   # if defined as [] the MS will be copied and NaN/infinite will be  flagged
+
+
+preflag.type=preflagger    # This step will flag the autocorrelations. Note that they are not flagged by default by NDPPP
+preflag.corrtype=auto
+
+flag1.type=madflagger
+flag1.threshold=4
+flag1.freqwindow=31
+flag1.timewindow=5
+flag1.correlations=[0,3]   # only flag on XX and YY
+
+avg1.type = squash
+avg1.freqstep = 256        # it compresses the data by a factor of 256 in frequency
+avg1.timestep = 1          # is the default
+
+flag2.type=madflagger
+flag2.threshold=3
+flag2.timewindow=51
+
+avg2.type = squash
+avg2.timestep = 5          #it compresses the data by a factor of 5 in time
+
+count.type = counter
+count.showfullyflagged = True
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset
new file mode 100644
index 00000000000..c0176c4d6c0
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/ndppp.1.postbbs.parset
@@ -0,0 +1,26 @@
+msin.startchan = 0
+msin.nchan = 1
+msin.datacolumn = CORRECTED_DATA
+msout.datacolumn = CORRECTED_DATA
+
+steps = [clip1,flag3,flag4,flag5,flag6]   # if defined as [] the MS will be copied and NaN/infinite will be  flagged
+
+clip1.type=preflagger
+clip1.amplmax=**inserted by pipeline**
+
+flag3.type=madflagger
+flag3.threshold=4
+flag3.timewindow=5
+flag3.correlations=[0,3]   # only flag on XX and YY
+
+flag4.type=madflagger
+flag4.threshold=3
+flag4.timewindow=25
+
+flag5.type=madflagger
+flag5.threshold=3
+flag5.timewindow=51
+
+flag6.type=madflagger
+flag6.threshold=3
+flag6.timewindow=71
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset b/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset
new file mode 100644
index 00000000000..bcdc9bf4b43
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/parsets/uv-plane-cal-beam.parset
@@ -0,0 +1,37 @@
+Strategy.ChunkSize = 0
+Strategy.Steps = [solve, correct, subtract]
+
+Step.solve.Operation = SOLVE
+Step.solve.Model.Sources = []
+Step.solve.Model.Gain.Enable = T
+Step.solve.Model.Cache.Enable = T
+Step.solve.Solve.Parms = ["Gain:0:0:*","Gain:1:1:*"]
+Step.solve.Solve.CellSize.Freq = 0
+Step.solve.Solve.CellSize.Time = 1
+Step.solve.Solve.CellChunkSize = 10
+Step.solve.Solve.Options.MaxIter = 20
+Step.solve.Solve.Options.EpsValue = 1e-9
+Step.solve.Solve.Options.EpsDerivative = 1e-9
+Step.solve.Solve.Options.ColFactor = 1e-9
+Step.solve.Solve.Options.LMFactor = 1.0
+Step.solve.Solve.Options.BalancedEqs = F
+Step.solve.Solve.Options.UseSVD = T
+Step.solve.Model.Beam.Enable = T
+Step.solve.Model.Beam.StationConfig.Name = LBA_OUTER
+Step.solve.Model.Beam.StationConfig.Path = /home/zwieten/StationConfig/
+Step.solve.Model.Beam.Element.Type = HAMAKER_LBA
+
+Step.correct.Operation = CORRECT
+Step.correct.Model.Sources = [**central_source**]
+Step.correct.Model.Gain.Enable = T
+Step.correct.Output.Column = CORRECTED_DATA
+Step.correct.Model.Beam.Enable = T
+Step.correct.Model.Beam.StationConfig.Name = LBA_OUTER
+Step.correct.Model.Beam.StationConfig.Path = /home/zwieten/StationConfig/
+Step.correct.Model.Beam.Element.Type = HAMAKER_LBA
+
+Step.subtract.Operation = SUBTRACT
+Step.subtract.Output.Column = SUBTRACTED_DATA
+Step.subtract.Model.Sources = [**central_source**]
+Step.subtract.Model.Gain.Enable = F
+Step.subtract.Model.Beam.Enable = F
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg b/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg
new file mode 100644
index 00000000000..2abb670c25b
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/pipeline.cfg
@@ -0,0 +1,28 @@
+[DEFAULT]
+runtime_directory = /home/swinbank/Work/pipeline_runtime_full
+recipe_directories = [/opt/pipeline/recipes]
+task_files = [%(cwd)s/tasks.cfg]
+
+[layout]
+job_directory = %(runtime_directory)s/jobs/%(job_name)s
+
+[cluster]
+clusterdesc = /home/swinbank/cdesc/full.clusterdesc
+task_furl = %(runtime_directory)s/task.furl
+multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+[deploy]
+script_path = /opt/pipeline/framework/bin
+controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages
+engine_lpath = /opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib
+
+[logging]
+log_file = %(runtime_directory)s/jobs/%(job_name)s/logs/%(start_time)s/pipeline.log
+format = %(asctime)s %(levelname)-7s %(name)s: %(message)s
+datefmt = %Y-%m-%d %H:%M:%S
+
+# Expert users only need venture here...
+#[remote]
+#method = paramiko
+#key_filename = /home/swinbank/.ssh/id_rsa.pipeline.pub
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/sip.py b/CEP/Pipeline/docs/examples/definition/sip2/sip.py
new file mode 100644
index 00000000000..d76f36b0340
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/sip.py
@@ -0,0 +1,177 @@
+"""
+This is a model imaging pipeline definition.
+
+Although it should be runnable as it stands, the user is encouraged to copy it
+to a job directory and customise it as appropriate for the particular task at
+hand.
+"""
+from __future__ import with_statement
+from contextlib import closing
+from itertools import repeat
+import sys
+import os
+
+from pyrap.quanta import quantity
+
+from lofarpipe.support.control import control
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.parset import patched_parset
+from lofarpipe.support.lofaringredient import ListField
+
+class sip(control):
+    outputs = {
+        'images': ListField()
+    }
+
+    def pipeline_logic(self):
+        from to_process import datafiles # datafiles is a list of MS paths.
+        with log_time(self.logger):
+            # Build a map of compute node <-> data location on storage nodes.
+            storage_mapfile = self.run_task(
+                "datamapper", datafiles
+            )['mapfile']
+
+            # Produce a GVDS file describing the data on the storage nodes.
+            self.run_task('vdsmaker', storage_mapfile)
+
+            # Read metadata (start, end times, pointing direction) from GVDS.
+            vdsinfo = self.run_task("vdsreader")
+
+            # NDPPP reads the data from the storage nodes, according to the
+            # map. It returns a new map, describing the location of data on
+            # the compute nodes.
+            ndppp_results = self.run_task(
+                "ndppp",
+                storage_mapfile,
+                parset=os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.initial.parset"
+                ),
+                data_start_time=vdsinfo['start_time'],
+                data_end_time=vdsinfo['end_time']
+            )
+
+            # Remove baselines which have been fully-flagged in any individual
+            # subband.
+            compute_mapfile = self.run_task(
+                "flag_baseline",
+                ndppp_results['mapfile'],
+                baselines=ndppp_results['fullyflagged']
+            )['mapfile']
+
+            # Build a sky model ready for BBS & return the name & flux of the
+            # central source.
+            ra = quantity(vdsinfo['pointing']['ra']).get_value('deg')
+            dec = quantity(vdsinfo['pointing']['dec']).get_value('deg')
+            central = self.run_task(
+                "skymodel", ra=ra, dec=dec, search_size=2.5
+            )
+
+            # Patch the name of the central source into the BBS parset for
+            # subtraction.
+            with patched_parset(
+                self.task_definitions.get("bbs", "parset"),
+                {
+                    'Step.correct.Model.Sources': "[ \"%s\" ]" % (central["source_name"]),
+                    'Step.subtract.Model.Sources': "[ \"%s\" ]" % (central["source_name"])
+                }
+            ) as bbs_parset:
+                # BBS modifies data in place, so the map produced by NDPPP
+                # remains valid.
+                self.run_task("bbs", compute_mapfile, parset=bbs_parset)
+
+            # Now, run DPPP three times on the output of BBS. We'll run
+            # this twice: once on CORRECTED_DATA, and once on
+            # SUBTRACTED_DATA. Clip anything at more than 5 times the flux of
+            # the central source.
+            with patched_parset(
+                os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.postbbs.parset"
+                ),
+                {
+                    "clip1.amplmax": str(5 * central["source_flux"])
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+            ) as corrected_ndppp_parset:
+                for i in repeat(None, 3):
+                    self.run_task(
+                        "ndppp",
+                        compute_mapfile,
+                        parset=corrected_ndppp_parset,
+                        suffix=""
+                    )
+
+            with patched_parset(
+                os.path.join(
+                    self.config.get("layout", "parset_directory"),
+                    "ndppp.1.postbbs.parset"
+                ),
+                {
+                    "msin.datacolumn": "SUBTRACTED_DATA",
+                    "msout.datacolumn": "SUBTRACTED_DATA",
+                    "clip1.amplmax": str(5 * central["source_flux"])
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+            ) as subtracted_ndppp_parset:
+                for i in repeat(None, 3):
+                    self.run_task(
+                        "ndppp",
+                        compute_mapfile,
+                        parset=subtracted_ndppp_parset,
+                        suffix=""
+                    )
+
+            # Image CORRECTED_DATA.
+            self.logger.info("Imaging CORRECTED_DATA")
+
+            # Patch the pointing direction recorded in the VDS file into
+            # the parset for the cimager.
+            with patched_parset(
+                self.task_definitions.get("cimager", "parset"),
+                {
+                    'Images.ra': quantity(vdsinfo['pointing']['ra']).formatted("time"),
+                    'Images.dec': quantity(vdsinfo['pointing']['dec']).formatted("angle")
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+
+            ) as imager_parset:
+                # And run cimager.
+                self.outputs['images'] = self.run_task(
+                    "cimager", compute_mapfile,
+                    parset=imager_parset,
+                    results_dir=os.path.join(
+                        self.config.get("layout", "results_directory"),
+                        "corrected"
+                    )
+                )['images']
+
+            # Image SUBTRACTED_DATA.
+            self.logger.info("Imaging SUBTRACTED_DATA")
+
+            # Patch the pointing direction recorded in the VDS file into
+            # the parset for the cimager, and change the column to be
+            # imaged.
+            with patched_parset(
+                self.task_definitions.get("cimager", "parset"),
+                {
+                    'Images.ra': quantity(vdsinfo['pointing']['ra']).formatted("time"),
+                    'Images.dec': quantity(vdsinfo['pointing']['dec']).formatted("angle"),
+                    'datacolumn': "SUBTRACTED_DATA"
+
+                },
+                output_dir=self.config.get("layout", "parset_directory")
+
+            ) as subtracted_imager_parset:
+                # And run cimager.
+                self.outputs['images'] = self.run_task(
+                    "cimager", compute_mapfile,
+                    parset=subtracted_imager_parset,
+                    results_dir=os.path.join(
+                        self.config.get("layout", "results_directory"),
+                        "subtracted"
+                    )
+                )['images']
+
+if __name__ == '__main__':
+    sys.exit(sip().main())
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg b/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg
new file mode 100644
index 00000000000..0e652aa54ae
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/tasks.cfg
@@ -0,0 +1,57 @@
+[datamapper]
+recipe = datamapper
+mapfile = %(runtime_directory)s/jobs/%(job_name)s/parsets/storage_mapfile
+
+[vdsmaker]
+recipe = new_vdsmaker
+directory = %(runtime_directory)s/jobs/%(job_name)s/vds
+gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/inputs.gvds
+makevds = %(lofarroot)s/bin/makevds
+combinevds = %(lofarroot)s/bin/combinevds
+
+[vdsreader]
+recipe = vdsreader
+gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/inputs.gvds
+
+[skymodel]
+recipe = skymodel
+min_flux = 0.5
+skymodel_file = %(runtime_directory)s/jobs/%(job_name)s/parsets/bbs.skymodel
+
+[ndppp]
+recipe = new_dppp
+executable = %(lofarroot)s/bin/NDPPP
+initscript = %(lofarroot)s/lofarinit.sh
+working_directory = /data/scratch/swinbank
+dry_run = False
+mapfile = %(runtime_directory)s/jobs/%(job_name)s/parsets/compute_mapfile
+
+[flag_baseline]
+recipe = flag_baseline
+
+[bbs]
+recipe = bbs
+initscript = %(lofarroot)s/lofarinit.sh
+control_exec = %(lofarroot)s/bin/GlobalControl
+kernel_exec = %(lofarroot)s/bin/KernelControl
+parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/uv-plane-cal-beam.parset
+key = bbs_%(job_name)s
+db_host = ldb001
+db_name = swinbank
+db_user = postgres
+
+[sourcedb]
+recipe = sourcedb
+executable = %(lofarroot)s/bin/makesourcedb
+skymodel = %(runtime_directory)s/jobs/%(job_name)s/parsets/bbs.skymodel
+
+[parmdb]
+recipe = parmdb
+executable = %(lofarroot)s/bin/parmdbm
+
+[cimager]
+recipe = cimager
+imager_exec = /opt/LofIm/daily/askapsoft/bin/cimager.sh
+convert_exec = /opt/LofIm/daily/lofar/bin/convertimagerparset
+parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/mwimager.parset
+results_dir = %(runtime_directory)s/jobs/%(job_name)s/results/%(start_time)s
diff --git a/CEP/Pipeline/docs/examples/definition/sip2/to_process.py b/CEP/Pipeline/docs/examples/definition/sip2/to_process.py
new file mode 100644
index 00000000000..3b9d758c6eb
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/definition/sip2/to_process.py
@@ -0,0 +1,247 @@
+# Quick way of priming the pipeline with a list of datafiles to be processed.
+# Generate this file by, eg:
+#
+# $ obsid=L2010_08322; for i in `seq 0 7`; do ssh lce`printf %03d $((i*9+1))` ls /net/sub$((i+1))/lse*/data*/$obsid/* | grep SB >> to_process.py; done
+#
+# then tweak with your favourite text editor.
+
+datafiles = [
+'/net/sub1/lse001/data3/L2010_08567/SB0.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB10.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB11.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB12.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB13.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB14.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB15.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB16.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB1.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB2.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB3.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB4.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB5.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB6.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB7.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB8.MS',
+'/net/sub1/lse001/data3/L2010_08567/SB9.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB17.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB18.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB19.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB20.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB21.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB22.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB23.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB24.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB25.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB26.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB27.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB28.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB29.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB30.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB31.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB32.MS',
+'/net/sub1/lse002/data3/L2010_08567/SB33.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB34.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB35.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB36.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB37.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB38.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB39.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB40.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB41.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB42.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB43.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB44.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB45.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB46.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB47.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB48.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB49.MS',
+'/net/sub1/lse003/data3/L2010_08567/SB50.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB51.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB52.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB53.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB54.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB55.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB56.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB57.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB58.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB59.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB60.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB61.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB62.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB63.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB64.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB65.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB66.MS',
+'/net/sub3/lse007/data3/L2010_08567/SB67.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB68.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB69.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB70.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB71.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB72.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB73.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB74.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB75.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB76.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB77.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB78.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB79.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB80.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB81.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB82.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB83.MS',
+'/net/sub3/lse008/data3/L2010_08567/SB84.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB100.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB101.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB85.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB86.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB87.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB88.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB89.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB90.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB91.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB92.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB93.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB94.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB95.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB96.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB97.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB98.MS',
+'/net/sub3/lse009/data3/L2010_08567/SB99.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB102.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB103.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB104.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB105.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB106.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB107.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB108.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB109.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB110.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB111.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB112.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB113.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB114.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB115.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB116.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB117.MS',
+'/net/sub4/lse010/data3/L2010_08567/SB118.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB119.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB120.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB121.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB122.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB123.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB124.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB125.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB126.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB127.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB128.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB129.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB130.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB131.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB132.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB133.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB134.MS',
+'/net/sub4/lse011/data3/L2010_08567/SB135.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB136.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB137.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB138.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB139.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB140.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB141.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB142.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB143.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB144.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB145.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB146.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB147.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB148.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB149.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB150.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB151.MS',
+'/net/sub4/lse012/data3/L2010_08567/SB152.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB153.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB154.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB155.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB156.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB157.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB158.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB159.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB160.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB161.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB162.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB163.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB164.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB165.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB166.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB167.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB168.MS',
+'/net/sub6/lse016/data3/L2010_08567/SB169.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB170.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB171.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB172.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB173.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB174.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB175.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB176.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB177.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB178.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB179.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB180.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB181.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB182.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB183.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB184.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB185.MS',
+'/net/sub6/lse017/data3/L2010_08567/SB186.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB187.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB188.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB189.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB190.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB191.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB192.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB193.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB194.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB195.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB196.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB197.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB198.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB199.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB200.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB201.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB202.MS',
+'/net/sub6/lse018/data3/L2010_08567/SB203.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB204.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB205.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB206.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB207.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB208.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB209.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB210.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB211.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB212.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB213.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB214.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB215.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB216.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB217.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB218.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB219.MS',
+'/net/sub8/lse022/data3/L2010_08567/SB220.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB221.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB222.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB223.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB224.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB225.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB226.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB227.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB228.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB229.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB230.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB231.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB232.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB233.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB234.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB235.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB236.MS',
+'/net/sub8/lse023/data3/L2010_08567/SB237.MS',
+]
diff --git a/CEP/Pipeline/docs/examples/model_parsets/README b/CEP/Pipeline/docs/examples/model_parsets/README
new file mode 100644
index 00000000000..31f91bfeda8
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/README
@@ -0,0 +1,4 @@
+Model parsets for use in the standard imaging pipeline.
+
+These are collected here for reference only; this is not their permanent home.
+They will probably need to be customised to the particular job in question.
diff --git a/CEP/Pipeline/docs/examples/model_parsets/bbs.parset b/CEP/Pipeline/docs/examples/model_parsets/bbs.parset
new file mode 100644
index 00000000000..617e9953867
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/bbs.parset
@@ -0,0 +1,49 @@
+Strategy.Stations = []
+Strategy.InputColumn = DATA
+Strategy.TimeWindow = []
+Strategy.ChunkSize = 2500
+Strategy.UseSolver = F
+Strategy.Correlation.Selection = CROSS
+Strategy.Correlation.Type = []
+Strategy.Steps = [solve, subtract, correct]
+
+Step.solve.Baselines.Station1 = []
+Step.solve.Baselines.Station2 = []
+Step.solve.Model.Sources      = [3C196]
+Step.solve.Model.Gain.Enable  = T
+Step.solve.Correlation.Selection = CROSS
+Step.solve.Correlation.Type = []
+Step.solve.Operation        = SOLVE
+Step.solve.Output.Column    =
+Step.solve.Solve.Parms           = ["Gain:0:0:*", "Gain:1:1:*"]
+Step.solve.Solve.ExclParms       = []
+Step.solve.Solve.CalibrationGroups = []
+Step.solve.Solve.CellSize.Freq          = 0
+Step.solve.Solve.CellSize.Time          = 1
+Step.solve.Solve.CellChunkSize          = 25
+Step.solve.Solve.PropagateSolutions     = F
+Step.solve.Solve.Options.MaxIter        = 20
+Step.solve.Solve.Options.EpsValue       = 1e-9
+Step.solve.Solve.Options.EpsDerivative  = 1e-9
+Step.solve.Solve.Options.ColFactor      = 1e-9
+Step.solve.Solve.Options.LMFactor       = 1.0
+Step.solve.Solve.Options.BalancedEqs    = F
+Step.solve.Solve.Options.UseSVD         = T
+
+Step.subtract.Baselines.Station1 = []
+Step.subtract.Baselines.Station2 = []
+Step.subtract.Model.Sources      = [3C196]
+Step.subtract.Model.Gain.Enable  = T
+Step.subtract.Correlation.Selection = CROSS
+Step.subtract.Correlation.Type = []
+Step.subtract.Operation      = SUBTRACT
+Step.subtract.Output.Column  =
+
+Step.correct.Baselines.Station1 = []
+Step.correct.Baselines.Station2 = []
+Step.correct.Model.Sources      = []
+Step.correct.Model.Gain.Enable  = T
+Step.correct.Correlation.Selection = CROSS
+Step.correct.Correlation.Type = []
+Step.correct.Operation      = CORRECT
+Step.correct.Output.Column  = CORRECTED_DATA
diff --git a/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel b/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel
new file mode 100644
index 00000000000..a8fb5935573
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/bbs.skymodel
@@ -0,0 +1,2 @@
+# (Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='55.468e6', SpectralIndexDegree='0', SpectralIndex:0='0.0', SpectralIndex:1='0.0', Major, Minor, Phi) = format
+3C196, POINT, 08:13:36.062300,  +48.13.02.24900, 1.0
diff --git a/CEP/Pipeline/docs/examples/model_parsets/casapy.parset b/CEP/Pipeline/docs/examples/model_parsets/casapy.parset
new file mode 100644
index 00000000000..53794179c27
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/casapy.parset
@@ -0,0 +1,29 @@
+Images.stokes = [I] 
+Images.shape = [1024, 1024]
+Images.cellSize = [10, 10]
+Images.ra = 08h13m36.06 
+Images.dec = 48.13.02.25 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = Hogbom
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject
+Gridder.wmax = 50000
+Gridder.nwplanes = 5
+Gridder.oversample = 1
+Gridder.padding = 1
+Gridder.maxsupport = 1024
+Gridder.limitsupport = 0
+Gridder.cutoff = 0.001 
+Images.nfacets = 1
+Selection.select = True
+Selection.antenna = '*&*'
+weighting = natural
+datacolumn = CORRECTED_DATA
+minUV = 1.0 
+ncycles = 0 
+restore = True
+restore_beam = [10arcsec, 10arcsec, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset
new file mode 100644
index 00000000000..96e4a2897b4
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp1.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 31
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 1
+start = 8
+step = 240
+threshold = 3
+timewindow = 5
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset
new file mode 100644
index 00000000000..db137577494
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp2.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 3
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset
new file mode 100644
index 00000000000..678899a071f
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp3.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 4
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset b/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset
new file mode 100644
index 00000000000..678899a071f
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/dppp4.parset
@@ -0,0 +1,18 @@
+allcolumns = False
+bandpass = 0
+fixed = 0
+flagger = 4
+algorithm = 0
+existing = True
+freqwindow = 1
+min = 0
+max = 0
+nchan = 240
+skipflags = True
+squasher = 0
+start = 8
+step = 240
+threshold = 4
+timewindow = 901
+timestep = 1
+clusterdesc =
diff --git a/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset b/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset
new file mode 100644
index 00000000000..760cf772696
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/mwimager.parset
@@ -0,0 +1,25 @@
+Images.stokes = [I] 
+Images.shape = [2048, 2048] 
+Images.cellSize = [10, 10] 
+Images.ra = 08h13m36.06 
+Images.dec = 48.13.02.25 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = MultiScale 
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject 
+Gridder.wmax = 66521
+Gridder.nwplanes = 257 
+Gridder.oversample = 1 
+Gridder.maxsupport = 4096
+Gridder.limitsupport = 4096
+Gridder.cutoff = 0.001 
+Gridder.nfacets = 1 
+datacolumn = CORRECTED_DATA
+minUV = 1.0 
+ncycles = 0 
+restore = False 
+restore_beam = [1, 1, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset b/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset
new file mode 100644
index 00000000000..45a51e8290d
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/mwimager.pulsar.parset
@@ -0,0 +1,25 @@
+Images.stokes = [I] 
+Images.shape = [2048, 2048] 
+Images.cellSize = [10, 10] 
+Images.ra = 03h32m59.37 
+Images.dec = 54.34.43.57 
+Images.directionType = J2000 
+Solver.type = Dirty
+Solver.algorithm = MultiScale 
+Solver.niter = 1
+Solver.gain = 1.0 
+Solver.verbose = True 
+Solver.scales = [0, 3] 
+Gridder.type = WProject 
+Gridder.wmax = 10000
+Gridder.nwplanes = 257 
+Gridder.oversample = 1 
+Gridder.maxsupport = 400
+Gridder.limitsupport = 400
+Gridder.cutoff = 0.001 
+Gridder.nfacets = 1 
+datacolumn = DATA
+minUV = 1.0 
+ncycles = 0 
+restore = True
+restore_beam = [0.003, 0.003, 0] 
diff --git a/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset b/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset
new file mode 100644
index 00000000000..ce2559f3dd8
--- /dev/null
+++ b/CEP/Pipeline/docs/examples/model_parsets/ndppp.parset
@@ -0,0 +1,27 @@
+msin.startchan = 8
+msin.nchan = 240
+msin.datacolumn = DATA
+msout.datacolumn = DATA
+steps = [flag1,flag2,avg1,flag3]
+# Squashing pass to average all channels into one
+avg1.type = squash
+avg1.freqstep = 240
+avg1.timestep = 1
+# Flagging pass 1 (on unsquashed data with medium time window, XX & YY only)
+flag1.type=madflagger
+flag1.threshold=2
+flag1.freqwindow=101
+flag1.timewindow=1
+flag1.correlations=[0,3]
+# Flagging pass 2 (on unsquashed data with medium freq window, XX & YY only)
+flag2.type=madflagger
+flag2.threshold=2
+flag2.freqwindow=1
+flag2.timewindow=101
+flag2.correlations=[0,3]
+# Flagging pass 3 (on squashed data with wide time window, all corr)
+flag3.type=madflagger
+flag3.threshold=3
+flag3.freqwindow=1
+flag3.timewindow=901
+flag3.correlations=[0,1,2,3]
diff --git a/CEP/Pipeline/docs/notes/2010-11-15-grid.rst b/CEP/Pipeline/docs/notes/2010-11-15-grid.rst
new file mode 100644
index 00000000000..31e9a4b4f3d
--- /dev/null
+++ b/CEP/Pipeline/docs/notes/2010-11-15-grid.rst
@@ -0,0 +1,140 @@
+==================
+LOFAR Grid Meeting
+==================
+----------------
+15 November 2010
+----------------
+
+Introduction to LOFAR Pipeline Software
+---------------------------------------
+
+- What is a pipeline?
+
+  - Shepherds a given dataset through an analysis process.
+  - Pipelines are not necessarily well defined. For instance, we're still
+    working on figuring out exactly what the "standard imaging pipeline"
+    should do -- when should it flag? How much should the data be compressed?,
+    and so on. Therefore, a pipeline is defined by stringing together a series
+    of standard processing blocks (or "recipes"), togther with some
+    configuration information.
+
+- The LOFAR framework
+
+  - Written in pure Python.
+  - Structured as a series of "recipes", each one of which performs a given
+    processing step. For example, one recipe might run NDPPP (flagging &
+    compression of the data), another might run BBS (calibration).
+  - Recipes can call other recipes as part of their processing run.
+  - The pipeline itself is just defined as another recipe, and calls other
+    recipes as required.
+
+- Recipes
+
+  - Must be able to handle running arbitrary tasks.
+  - For example, some recipes wrap external code (like NDPPP), which has no
+    programmatic interface: the recipe can simply set up & spawn the process,
+    then wait to collect its return code.
+  - Other recipes can perform processing in pure Python: for example, they
+    might manipulate a dataset using pyrap.
+  - This means there is no "standard" way a recipe must be structured; this is
+    up to the programmer defining it.
+  - The framework provides many helpers and shortcuts for common functionality
+    required in defining a recipe, though.
+
+- Parallelisation
+
+  - In general, the pipeline tasks are embarassingly parallel: we are
+    processing hundreds of independent subbands at the same time. Therefore,
+    the pipeline simply distributes independent jobs to the compute nodes and
+    collects the results.
+  - Previous versions of the framework used IPython to do this. This is still
+    available, but its use is not now encouraged.
+  - The pipeline provides its own simple job dispatch system:
+
+    - A recipe on the head node is associated with a "node script", again in
+      pure Python, which runs on the compute nodes. The node script may take a
+      number of arguments (usually, for eg, the name of the data file to
+      processes).
+    - The recipe can use SSH or mpirun to start the node scripts in parallel
+      across all compute nodes, handing each one the appropriate parameters.
+    - Adding a different dispatch mechanism is easy, providing it supports
+      similar semantics (ie, start a command and wait for it to finish).
+    - There are mechanisms to schedule somewhat intelligently, eg limiting the
+      number of simultaneous jobs which are started per node.
+    - After scheduling a bunch of jobs on the compute nodes, the pipeline will
+      then wait for them to complete. Success is signalled via exit status of
+      the job.
+
+  - Cluster layout
+
+    - The layout of the LOFAR cluster is described by means of a "clusterdesc"
+      file.
+    - The pipeline can read this clusterdesc file to obtain the name of the
+      cluster head node and the compute nodes which are available for it to send
+      jobs to.
+
+  - "Mapping" of data
+
+    - Not all data on the LOFAR storage nodes can be accessed by each compute
+      node. Instead, the compute nodes in a given subcluster can only access
+      the storage nodes in that cluster.
+    - In general, the imaging pipeline is started by simply passing it a list
+      of paths to the data to be processed.
+    - It is possible to work out from the path to a storage node which
+      subcluster it is a part of, and hence which compute nodes can access the
+      data.
+    - Processing is scheduled on compute nodes which can access the data in a
+      round-robin fashion.
+    - This understanding of paths is (obviously) fragile, but the LOFAR system
+      provides little other useful metadata to work from. 
+    - The "mapper" task which decides which compute node should process each
+      input dataset is easily extendable/replacable to meet other
+      requirements.
+
+- Logging
+
+  - Logging is performed using the standard Python logging module.
+  - Output is configurable as part of the pipeline configuration file. In
+    general, logging is set to a disk file and stdout on the head node.
+  - As might be expected, you can specify the verbosity etc.
+  - The compute nodes log to the head node using TCP connections (ie, using
+    Python's logging.SocketHandler). The head node then adds any data they
+    send to its own logging stream.
+  - There is a built-in mechanism to scan the logging stream for arbitrary
+    regular expressions, and thereby flag potential problems etc.
+  - There is (currently) no other channel by which the compute nodes send
+    feedback (beyond their exit status) to the head node (but I hope to
+    implement a simple mechanism based on passing pickled Python objects over
+    a TCP stream shortly).
+
+- Results
+
+  - Broadly speaking, the imaging pipeline picks data up from the storage
+    nodes and processes it, writing results onto scratch disks on the compute
+    nodes.
+  - Intermediate data products are kept on the compute nodes -- for example,
+    output from NDPPP will be processed by BBS and the imager on the same
+    node, thus minimising data transport.
+  - The resulting images will usually be copied to a shared storage location
+    for collection/archiving.
+  - If required, a copying step for intermediate data products could also be
+    added.
+
+- Dependencies
+
+  - The system makes extensive use of the Python standard library
+  - The only non-core Python component required for basic operation is the
+    lofar.parameterset Python module, available from LOFARSOFT.
+  - Obviously, individual recipes may have more demanding requirements, either
+    because they need to call external programs or they require libraries like
+    pyrap for their own processing.
+
+- Documentation and further information
+
+  - Available from USG Subversion:
+    http://usg.lofar.org/svn/code/trunk/src/pipeline/.
+  - Uses a standard, distutils-based installation.
+  - Documentation (in Sphinx format, and still a work in progress) is also
+    available from that location.
+  - An online snapshot of the documentation is at
+    http://urchin.earth.li/~jds/pipeline/.
diff --git a/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst b/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst
new file mode 100644
index 00000000000..9366750f2bb
--- /dev/null
+++ b/CEP/Pipeline/docs/notes/2010-12-08-handover_discussion.rst
@@ -0,0 +1,100 @@
+==================
+Pipeline Framework
+==================
+Points for Discussion
+=====================
+8 December 2010
+===============
+
+Job management
+--------------
+
+- Each pipeline "job" (ie, a combination of pipeline definition & a
+  particular dataset) is given a job identifier (a free form string, but
+  generally based on the obsid)
+- Each job may be run multiple times; logs, results, etc are filed by a
+  combination of job identifier and pipeline start time
+
+Configuration
+-------------
+
+- pipeline.cfg file (in Python ConfigParser format) is used to configure the
+  pipeline system.
+- Includes things like where to search for recipes, what log format to use,
+  etc.
+
+Recipes & Tasks
+---------------
+
+- Continue to use "recipes" to describe individual components, as in Cuisine
+  (indeed, ultimately derives from Cuisine's WSRTrecipe).
+- We add the concept of a "task", which is a short-cut to running a
+  recipe with a particular set of parameters; access via self.run_task()
+  method in recipe.
+- Tasks are defined through another configuration file (tasks.cfg).
+- Entirely optional: everything can be done the "old-fashioned" way of
+  specifying inputs directly in the recipe. But tasks can make the recipe
+  code cleaner, and help separate configuration details from code.
+
+Inputs, outputs, type checking
+------------------------------
+
+- Input and output dicts for recipes will be checked for completeness and
+  data types.
+- This enables the pipeline to check inputs for correctness before running a
+  recipe, rather than failing part through.
+- A recipe must declare any inputs it expects as part of its definition.
+- Acceptable types are based on simple classes. Basics (strings, ints,
+  floats...) are already defined as part of the framework; the recipe author
+  is encouraged to specify (eg "this must be a float with value between 0.3
+  and 1.5").
+- Default and/or optional inputs can also be declared.
+- Uniform parser and the same type-checking is applied reading inputs from 
+  command line options or from the tasks definitions.
+
+Distribution
+------------
+
+- The pipeline includes its own distribution system, whereby recipes (running
+  on the head node) can call node scripts (running on other machines).
+- Node scripts are dispatched using either SSH or mpirun (depending on
+  pipeline.cfg; SSH by default).
+- Pickleable Python objects can be sent to and retrieved from the nodes, so
+  complex configuration data or results can be exchanged.
+- The pipeline can detect a failure on one node, and shut down the rest of the
+  system if required.
+- The recipes for rficonsole, NDPPP, BBS and cimager all use this system.
+  None of the current recipes use other systems (eg "startdistproc").
+- Helper functions make this system very easy for independent tasks (eg
+  running multiple instances of rficonsole, NDPPP). For more involved
+  workflows (eg BBS, where KernelControl and GlobalContol must be run
+  simultaneously), a more elaborate recipe is required.
+
+Parsets and tool configuration
+------------------------------
+
+- Most imaging pipeline tasks are configured by means of a parset.
+- Typically, the pipeline will take a template parset missing names of eg
+  input and output files, and fill in the blanks for each file to be
+  processed.
+- There are helper routines to do this in the pipeline framework
+  (utilities.patch_parset() et al).
+
+Logging
+-------
+
+- Pervasive use of Python logging module throughout.
+- Every recipe has a logger (self.logger), which can be used directly and
+  passed to (most) pipeline functions.
+- Logger is also made available on compute nodes when running in a
+  distributed way.
+
+Users
+-----
+
+- There is currently not one "standard" imaging pipeline definition. The
+  system is designed to be modular enough that processing chains can be
+  quickly in response to requests from commissioners.
+- As well as the imaging pipeline, the Pulsar and Transients groups have also
+  defined and are actively using pipelines in this framework.
+- The Pulsar pipeline is still dependent on the IPython system.
diff --git a/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py b/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py
new file mode 100644
index 00000000000..4208af8abaf
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/1-basic_fn.py
@@ -0,0 +1,26 @@
+import glob
+import subprocess
+import os
+
+def run(file_pattern, input_dir, output_file, clobber):
+    # Returns 0 for success, 1 for faliure
+
+    # Sanity checking checking
+    if not os.path.exists(input_dir):
+        return 1
+    if os.path.exists(output_file):
+        if clobber:
+            os.unlink(output_file)
+        else:
+            return 1
+
+    # Build list of input files
+    input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+    try:
+        # Run "montage" command
+        subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+    except Exception, e:
+        return 1
+
+    return 0
diff --git a/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py b/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py
new file mode 100644
index 00000000000..42a8f248f3f
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/2-lofarnode.py
@@ -0,0 +1,34 @@
+import sys
+import subprocess
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, file_pattern, input_dir, output_file, clobber):
+        # Returns 0 for success, 1 for faliure
+
+        # Sanity checking checking
+        if not os.path.exists(input_dir):
+            return 1
+        if os.path.exists(output_file):
+            if clobber:
+                os.unlink(output_file)
+            else:
+                return 1
+
+        # Build list of input files
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        try:
+            # Run "montage" command
+            subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+        except Exception, e:
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/3-logging.py b/CEP/Pipeline/docs/pulsar_demo/3-logging.py
new file mode 100644
index 00000000000..5f30d7717f4
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/3-logging.py
@@ -0,0 +1,47 @@
+import sys
+import subprocess
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, file_pattern, input_dir, output_file, clobber):
+        if not os.path.exists(input_dir):
+            self.logger.error("Input directory (%s) not found" % input_dir)
+            return 1
+
+        self.logger.info("Processing %s" % input_dir)
+
+        if os.path.exists(output_file):
+            if clobber:
+                self.logger.warn(
+                    "Deleting previous version of results: %s" % output_file
+                )
+                os.unlink(output_file)
+            else:
+                self.logger.error(
+                    "Refusing to overwrite existing file %s" % output_file
+                )
+                return 1
+
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        try:
+            # Run "montage" command
+            subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+        except Exception, e:
+            self.logger.error(str(e))
+            return 1
+
+        if not os.path.exists(output_file):
+            self.logger.error(
+                "Output file %s not created by montage exectuable" % output_file
+            )
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/4-helpers.py b/CEP/Pipeline/docs/pulsar_demo/4-helpers.py
new file mode 100644
index 00000000000..73a9684e129
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/4-helpers.py
@@ -0,0 +1,54 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                              "Thumbnail combine" pipeline demo
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+import glob
+import os
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import catch_segfaults
+
+class thumbnail_combine(LOFARnodeTCP):
+    def run(self, executable, file_pattern, input_dir, output_file, clobber):
+        if not os.path.exists(input_dir):
+            self.logger.error("Input directory (%s) not found" % input_dir)
+            return 1
+
+        self.logger.info("Processing %s" % input_dir)
+
+        if os.path.exists(output_file):
+            if clobber:
+                self.logger.warn(
+                    "Deleting previous version of results: %s" % output_file
+                )
+                os.unlink(output_file)
+            else:
+                self.logger.error(
+                    "Refusing to overwrite existing file %s" % output_file
+                )
+                return 1
+
+        input_files = glob.glob(os.path.join(input_dir, file_pattern))
+
+        command_line = [executable] + input_files + [output_file]
+        try:
+            catch_segfaults(command_line, None, None, self.logger)
+        except Exception, e:
+            self.logger.error(str(e))
+            return 1
+
+        if not os.path.exists(output_file):
+            self.logger.error(
+                "Output file %s not created by montage exectuable" % output_file
+            )
+            return 1
+
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/docs/pulsar_demo/5-recipe.py b/CEP/Pipeline/docs/pulsar_demo/5-recipe.py
new file mode 100644
index 00000000000..17099c22a23
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/5-recipe.py
@@ -0,0 +1,12 @@
+import sys
+from lofarpipe.support.baserecipe import BaseRecipe
+
+class thumbnail_combine(BaseRecipe):
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+        self.logger.info("This recipe does nothing")
+
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py b/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py
new file mode 100644
index 00000000000..2eec1929ebc
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/6-remotecommand.py
@@ -0,0 +1,52 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                              "Thumbnail combine" pipeline demo
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        # Hosts on which to execute
+        hosts = ['lce019']
+
+        # Path to node script
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+
+        # Build a list of jobs
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        "/usr/bin/montage",     # executable
+                        "*.th.png",             # file_pattern
+                        "/path/to/png/files",   # input_dir
+                        "/path/to/output.png",  # output_dir
+                        True                    # clobber
+                    ]
+                )
+            )
+
+        # And run them
+        self._schedule_jobs(jobs)
+
+        # The error flag is set if a job failed
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py b/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py
new file mode 100644
index 00000000000..d0e834b285a
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/7-ingredients.py
@@ -0,0 +1,68 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        ),
+        'target_hosts': ingredient.ListField(
+            '--target-hosts',
+            help="Remote hosts on which to execute"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        hosts = self.inputs['target_hosts']
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py b/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py
new file mode 100644
index 00000000000..96ca8f497e8
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/8-ingredients2.py
@@ -0,0 +1,78 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class HostNameList(ingredient.ListField):
+    @classmethod
+    def is_valid(value):
+        import socket
+        for hostname in value:
+            try:
+                socket.gethostbyname(hostname)
+            except:
+                return False
+        return True
+        
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        ),
+        'target_hosts': ingredient.HostNameList(
+            '--target-hosts',
+            help="Remote hosts on which to execute"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in self.inputs['target_hosts']:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/9-config.py b/CEP/Pipeline/docs/pulsar_demo/9-config.py
new file mode 100644
index 00000000000..df215fd19ca
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/9-config.py
@@ -0,0 +1,68 @@
+import sys
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+
+class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/usr/bin/montage",
+            help="montage executable"
+        ),
+        'file_pattern': ingredient.StringField(
+            '--file-pattern',
+            default="*.th.png",
+            help="File search pattern (glob)",
+        ),
+        'input_dir': ingredient.StringField(
+            '--input-dir',
+            help="Directory containing input files"
+        ),
+        'output_file': ingredient.StringField(
+            '--output-file',
+            help="Output filename"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="Clobber pre-existing output files"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting thumbnail_combine run")
+        super(thumbnail_combine, self).go()
+
+        hosts = get_compute_nodes(
+            ClusterDesc(self.config.get('cluster', "clusterdesc"))
+        )
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host in hosts:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        self.inputs['executable'],
+                        self.inputs['file_pattern'],
+                        self.inputs['input_dir'],
+                        self.inputs['output_file'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs)
+
+        if self.error.isSet():
+            self.logger.warn("Failed compute job process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(thumbnail_combine().main())
diff --git a/CEP/Pipeline/docs/pulsar_demo/intro.rst b/CEP/Pipeline/docs/pulsar_demo/intro.rst
new file mode 100644
index 00000000000..cb55aae30c0
--- /dev/null
+++ b/CEP/Pipeline/docs/pulsar_demo/intro.rst
@@ -0,0 +1,113 @@
+=======================
+Pipeline Framework Demo
+=======================
+17 December 2010
+================
+
+What is a pipeline?
+-------------------
+
+- A standardised way of interacting with the rest of LOFAR; logging, MAC/SAS
+  integration, etc.
+- Should make the developer's job *easier* (after an initial learning curve),
+  by providing a bunch of helper routines.
+- Provides sanity checking of inputs, outputs, etc.
+- Does not dictate exactly how you write your code: designed to enable the
+  developer as much freedom as necessary to get the job done. We have to
+  interface with a wide variety of external executables, scripts, Python
+  modules, ...
+
+Basic concepts
+--------------
+
+- Framework is pure Python, with minimal dependencies on external libraries.
+- Originated in the WSRT "Cuisine" framework, although almost no Cuisine code
+  is now actually used.
+- Also hardly used is IPython (although it's very hard to convince people it's
+  not the "IPython framework"!).
+- An inidividual "pipeline component" is wrapped in *recipe*: a Python
+  script which describes how to carry out that tasks.
+- Recipes take *inputs* and produce *outputs*. We do type & sanity checking
+  on the inputs and outputs.
+- Inputs will include some indication of the data to process (eg, a series
+  of filenames) and any configuration parameters needed (eg, tool
+  configuration parsets).
+- A *task* is a recipe + a set of configuration parameters. Separate
+  configuration from code; provide a shortcut for common processing jobs.
+- Recipes can be nested: for example, in the imaging pipeline, the *bbs*
+  recipe calls the *vdsmaker*, *parmdb* and *sourcedb* recipes as part of
+  its run.
+- A *pipeline* is just a recipe which calls a series of subsidiary recipes
+  in order. (Often with a little extra boilerplate to receive messages from
+  MAC/SAS etc.)
+
+Job management
+--------------
+
+- Each pipeline "job" (ie, a combination of pipeline definition & a
+  particular dataset) is given a job identifier (a free form string, but
+  generally based on the obsid).
+- Each job may be run multiple times; logs, results, etc are filed by a
+  combination of job identifier and pipeline start time
+
+Configuration
+-------------
+
+- pipeline.cfg file (in Python ConfigParser format) is used to configure the
+  pipeline system.
+- Includes things like where to search for recipes, what log format to use,
+  etc.
+
+Inputs, outputs, type checking
+------------------------------
+
+- Input and output dicts for recipes will be checked for completeness and
+  data types.
+- This enables the pipeline to check inputs for correctness before running a
+  recipe, rather than failing part through.
+- A recipe must declare any inputs it expects as part of its definition.
+- Acceptable types are based on simple classes. Basics (strings, ints,
+  floats...) are already defined as part of the framework; the recipe author
+  is encouraged to specify (eg "this must be a float with value between 0.3
+  and 1.5").
+- Default and/or optional inputs can also be declared.
+- Uniform parser and the same type-checking is applied reading inputs from
+  command line options or from the tasks definitions.
+
+Distribution
+------------
+
+- The pipeline includes its own distribution system, whereby recipes (running
+  on the head node) can call node scripts (running on other machines).
+- Node scripts are dispatched using either SSH or mpirun (depending on
+  pipeline.cfg; SSH by default).
+- Pickleable Python objects can be sent to and retrieved from the nodes, so
+  complex configuration data or results can be exchanged.
+- The pipeline can detect a failure on one node, and shut down the rest of the
+  system if required.
+- The recipes for rficonsole, NDPPP, BBS and cimager all use this system.
+  None of the current recipes use other systems (eg "startdistproc").
+- Helper functions make this system very easy for independent tasks (eg
+  running multiple instances of rficonsole, NDPPP). For more involved
+  workflows (eg BBS, where KernelControl and GlobalContol must be run
+  simultaneously), a more elaborate recipe is required.
+
+Parsets and tool configuration
+------------------------------
+
+- Most imaging pipeline tasks are configured by means of a parset.
+- Typically, the pipeline will take a template parset missing names of eg
+  input and output files, and fill in the blanks for each file to be
+  processed.
+- There are helper routines to do this in the pipeline framework
+  (utilities.patch_parset() et al).
+
+Logging
+-------
+
+- Pervasive use of Python logging module throughout.
+- Every recipe has a logger (self.logger), which can be used directly and
+  passed to (most) pipeline functions.
+- Logger is also made available on compute nodes when running in a
+  distributed way.
+- Basic integration with log4cplus/log4cxx as used by the LofarLogger.
diff --git a/CEP/Pipeline/docs/sphinx/Makefile b/CEP/Pipeline/docs/sphinx/Makefile
new file mode 100644
index 00000000000..39fe377f91c
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/Makefile
@@ -0,0 +1,75 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview over all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+
+clean:
+	-rm -rf build/*
+
+html:
+	mkdir -p build/html build/doctrees
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html
+	@echo
+	@echo "Build finished. The HTML pages are in build/html."
+
+pickle:
+	mkdir -p build/pickle build/doctrees
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+web: pickle
+
+json:
+	mkdir -p build/json build/doctrees
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) build/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	mkdir -p build/htmlhelp build/doctrees
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in build/htmlhelp."
+
+latex:
+	mkdir -p build/latex build/doctrees
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in build/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	mkdir -p build/changes build/doctrees
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes
+	@echo
+	@echo "The overview file is in build/changes."
+
+linkcheck:
+	mkdir -p build/linkcheck build/doctrees
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in build/linkcheck/output.txt."
diff --git a/CEP/Pipeline/docs/sphinx/source/.static/lofar.ico b/CEP/Pipeline/docs/sphinx/source/.static/lofar.ico
new file mode 100644
index 0000000000000000000000000000000000000000..7e31090573496e9def6a629f019d4c88f3dfcca2
GIT binary patch
literal 4286
zcmbVP3s_Xu7XJ4+^WZfM#zW*HAX=!2!c|d8B?(YO<ux-51ES?PKx&HJYib&(1eTd*
zg;^5$C`1M0rGN?|Dq57`)imv<W@?$2hgyJ)_q6sPWnbEB&-c$h=gdBPz5cb<9)Quu
z%M1J)+Z+HF0R0K{2#*u^^9PQ?LIIhXY^(AMyLjw*B_4-5b*OHO!OmO*-twx&S>ZZ5
z#M8Ja3_>J(;68u%hqvAmcG_PNlTc~WVBZZLYC7~Nef2CP`!)EsuO4M7G5Bzm4rQ}*
zaM5}D!0o@%L4pKGckwrLSb!-k6Q}z|qx7oYVte_nH8?Xwhw_{#6n`Iw>UJ&mU!IK3
zb-~D7^9(jPkxbzpu<V5rv*_?Q<hP(ju0i1|Ty`7|G|$74xf<+l)RNp-RGyDVQQ9JW
z=5qy-Dj(;a{b5Rt!^VVolsvD+2lJoBfqtjlTSZh>{>m?ZR+S~fu|6><|H^=R^EA|-
zBpq6{*!QCrr5j(yardQTnq=oxZSr}j9jQY}UMwnZ@)*UTwj&06eu%*P7viujy0vGW
ztl`fJy=Pq#cI$PtS3HiIxeg@zmI1}-M`WEgrEXV*0<`qdqIj7GmEUn6#pAF!8YOgH
zvDt{DDXBOVGYM}`_eHE<1DgJ_58_d<p7!iC;OHGKYT69=@ZZ-I9m+<>dSMAZc_bDk
z`^nbZ@i@{+ergKF>qYaiZ@@X5A7uWEJ{7K^Rh*5FJs!i0ug9S@D-I>=32Tk0j(rD>
zDhV0Ct5X6F)){cb9FMwA4NCLt(P4Yi<trf#4gZWq>Brn=J^e<0KJAZLuQ;Orq+u|M
zT<#lacG98vg;?x9PF%S~>u&SCcx<9j(h`X+>o1_wx&$Tn>TvHh5^&@!e<v1u&Mm`<
zeyhi{h&1~|qwEW^nc`9N;{<G~bVty9QBWFXqV+>tRqeJ@QD%%q`8kRm`R3>yE}OV<
zU5ASO@!0#49=RzR<jxI7cA5^Ws4CrmB;x2PvhlVSMa!EY$zMgIOEikBxNOdg2;^^e
zMc^z3(WHdJ?@cti#p1(vDJOs6eBpc`Tbo0%q3UsD%`BANu`c#ICN_Fjiig$vMOXL@
zR>M1rq|5H7K?2HmQl8&<kzIZCvtBK7is5T<ICz^Q3>#}4;hVw)!DPkkM$W?Fp)n{v
z-o?Xu(vSP*-2;PR%(Q`XDplIPB*Wrgg8|Ij8;l9?A|%suzx&BgLZLn!@15>sk}4UM
z0Tjp9c+}sCz?M_qh*-!(nMuiPN6toF0P*hxj}^B&1{L4wP&BU>MwsMKq|n|v0iqIV
zX~2vbT!-#B527m*-A}ro{43~CjKgAG7?zHRL0KvHJNM@hm@-8u<JC4QuV-=GKbm6A
zf74U0P~6YVM#UIAIFDx#k|{RHWa+~wDiVfH2KyRc3n$TGvjm@e4Z+h!FN8d24|mNR
zg#Shb-_Z+MIX)M{krf_=((8KidjvkncY&{;9h_`3g-<+VP<m8PHtQ`pSoZ0^uy42$
zR)MtHOo9o5FcHIt3oDT)>=duLB%s0=iER}cRQ#8C-82~m7elcws~K%dTHoy#tiw4*
z{$g|HfX&2VJ@ub9;_%mtQT52cX*-2VS%-}VB_+H*knNQJrKjg&hp$p58%6deQ*e1)
zbt<TB-^Y2?lc-7}-<|EM7hQdHGPa)&!-kKZz#I7?SfP6rpUX#JjwL=BsIkmWPGHk<
zD3xkon--OY4a!!<UOV0s%Z$7a=&3f69hXy39cm5RV8-s+4J4|B9*(aI-}WRux6u1H
zd5kpJ+ZKhw%ArVG=Y{bLT;QMj0;=r2FfParo`HQqVcWDx2w54zri=jFbqvge>Isy0
z&50;pbO9H{bkz0LP|Oo7b#Z_DB)qrC7F2EoiZ}6@iA=9%4>>+7)I3Tx@(A}4uL0CY
zkIg{UL`UWkC3f-B1et6k)~(~V4kNzIpxKE)fm_TnzXUVnf#u-1Dhiu(^vFy52LF>M
zV$V{&rM4XEoPfLr7x+;P5IB#MNry<4wX6CCcBl{K?nhiU?@6TZw&ke%hqYku$KzuW
z0V;9U*7kV%X)Zt1@=Ro&*c8A<(G3k|qFuFV8SEVHSO;OTF$m8`M54%IJJrGKgJH@d
z))2>$L@^)i;jWQo<5X|zLv=hK_<6~I@)xUwc001U9|__K$aOUd5FTzsqoBbG$36(1
z>x!ZCofMuP9t#cZc~r1VxWx6cj5mo7)9{kbc;qB&Q1v6XD->@ZuxDP=MPRs60ks>3
z;ug#b4R+;nf0Nzq@hD$Y3&}=ctT&nSR98;Iq<~GxYdGngfYMp9D65UcuFDbFO8ne0
z{gXaT;sDcPi|p>>lk!ZXkb&(?kHXnJ?mFV55qn1-cHAc1Rgqo9_ks~MTAnNXtVlg;
z>t<ZHXI3`-$mdUSesDN5pV&m~h$G&f)nR`-ofTRqVf&dWDA+`OX;~m!FisVp-o3Bo
z_~e$qn&sjQ`KtUm-j9jLr#sN095QJWRWOr9*IZnEh|YrtsYdb|_~mlcc*+GUUx=d7
zl9S|f7xatMqA*K~z0D-MV<I-62*851P8jq|Uu8(oden>7a7a+tI`gCQFC#aXJ>l{W
zOF~5?&qX@#Y`cLQa^&SP&fkyvRSR@p(NMhUOjvy@4%=s1!8ZIZFDc&4<OW||EOxA=
z{?tZ3Y95Q1Kj?*kd5lqtF^t0M)!Ys%`r-!trY8hMgB8nK9U@V1Sd093zwduS{+{H;
z2E+_t-^kC)G}yU;bB^Np9}kS1&1H7QTS%sQ8EQbue6sl_^^t2&;LR)*+*3uK-VD)p
z0<7*D`zXyOYKQ}WF}~|2Vss9x8(qfIalktq>sLq17CWiXTj2m#y8y*Z;(XaL-Wvn4
ze2Yx5PT_VCH>WC~^qGU*bY`h+Chp!EgY307=t;ek_daGIId%jF7Z+Qa^@Jy>U%Ar!
z^T^;=t5$LU;0w0~Ar%{Y_QbFR>Mwfwk2BRC9;xBj(nvM-rXNx=X_=8Qnqc1mvy~2|
z1)OUk$f;7nYgU(Un86Cxyx#DAIf7`So+X3{kd3AL2aW*DV(#()YbLND8DohyaG3EJ
zGIOZsUk+#MLLir`d=bIIrq(jDeJJ(C?;=rn-V@`W<L4EU6HNqHZRnD~xz6Lw>(Vqr
z0D<?f2adV5U>rU^V2-jLwrZ#y5|HZ{fsM~h!TMkYbFhc4U2m#M!-KJUEyd<m5MC-!
zLQQcIMiIK>&F%IlaDVao8*+C}r1|$ox6k+*hKvm6jSzMjH)Y-k3zkF0_v8IHIS?!I
zsJ^uNW5zU<(w@hazYF@!mhy5OUGaLzZBHU_9dv{qgugagT9^-fSQL^3ieE4K3S=V2
z1Y<?m1mrd}Lb96!q70xQ3}j49u+*e3`CJF?bKVCy5C3pj{$KJ{QU?9U%K%6|zjY@2
z9;ey2VWoiequA1je8I%t?(c!Ii83g(0+_`&JU3|>m&<LQOmHJ~`Gzk3Xh<0uU4JdP
zY{J2VmUn-CU-Q{O0fSsvh{>CMHRv7IM>(>GC<;NeAddFtm`E5#;5pD;Z-0LOq9jR|
zY9#59T9Q`M^|A};QuZ=QQY-5udv%87K30@m7kWy5EkRN-ExuSs>$A<CSJf?^S3M6%
zO+ncNwPYUaDVdi9)tk-fqPgAdY3^uGH@CJ+|8H%Tq}EH4bn`0hUQakqXeM-xp8!Vk
BD|r9_

literal 0
HcmV?d00001

diff --git a/CEP/Pipeline/docs/sphinx/source/author/index.rst b/CEP/Pipeline/docs/sphinx/source/author/index.rst
new file mode 100644
index 00000000000..24efb5d2337
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/author/index.rst
@@ -0,0 +1,731 @@
+**********************
+Recipe design tutorial
+**********************
+
+The pipeline framework provides a simple system for dispatching
+compute-intensive jobs to remote hosts. This includes sending and
+receiving complex data as part of the job. This section provides a tutorial
+introduction to writing a recipe which takes advantage of this capability.
+
+Problem specification
+=====================
+
+This tutorial addresses a simple real-world use case. This example was
+suggested by Anastasia Alexov, and addresses a requirement of the LOFAR Pulsar
+Pipeline.
+
+The pulsar pipeline runs across multiple compute nodes, generating a series of
+thumbnail plots on the local storage of each one -- that is, the plots are
+only accessible by a process running on the compute node, and are not exported
+via NFS or similar. The aim is to combine all the thumbnails on a given host
+into a single image, using the ``montage`` command provided by `ImageMagick
+<http://www.imagemagick.org/>`_. It is assumed that the thumbnails reside in
+the same path on each node.
+
+An initial implementation of the code which runs on each node was provided as
+a ``ksh`` script.
+
+.. code-block:: ksh
+   :linenos:
+
+   #!/bin/ksh
+
+   #find all the th.png files and convert them into a list to paste together using "montage".
+   
+   if [ -f combined.th.png ]
+   then
+      echo "WARNING: deleting previous version of results: combined.th.png"
+      rm combined.th.png
+   fi
+   
+   find ./ -name "*.th.png" -print  > /tmp/$$_combine_col1.txt
+   find ./ -name "*.th.png" -print  | sed -e 's/\// /g' -e 's/^.* //g' -e 's/.*_RSP/RSP/g' -e 's/\..*//g'  -e 's/_PSR//g' > /tmp/$$_combine_col2.txt
+   paste /tmp/$$_combine_col1.txt /tmp/$$_combine_col2.txt | awk '{print "-label "$2" "$1" "}' | tr -d '\n' | awk '{print "montage -background none "$0" combined.th.png"}' > combine_png.sh
+   rm /tmp/$$_combine_col1.txt /tmp/$$_combine_col2.txt
+   wc_convert=`wc -l combine_png.sh | awk '{print $1}'`
+   
+   if [[ $wc_convert > 0 ]]
+   then
+      chmod 777 combine_png.sh
+      echo "Executing the following comamnd: "
+      cat combine_png.sh
+      ./combine_png.sh
+      echo ""
+      echo "Results:  combined.th.png"
+      echo ""
+   else
+      echo ""
+      echo "No thumbnail (\*.th.png) files were found to combine."
+      echo ""
+   fi
+   
+   exit 0
+
+Per-node script
+===============
+
+First, we will consider the processing that must be done on each of the remote
+hosts. We start by converting the ``ksh`` script to a native Python version,
+then refining it to best take advantage of the framework capabilities.
+
+It may be worth emphasising that the conversion to Python is optional: an
+alternative approach would be to run code each node which simply spawned a
+copy of ``ksh`` and executed the script directly. In general, though,
+minimising forking is a wise approach -- and the Python code provides better
+opportunity to demosntrate the framework capabilities.
+
+First Python implementation
+---------------------------
+
+A simple Python implementation of functionality similar to that provided by
+the ``ksh`` script is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import glob
+   import subprocess
+   import os
+   
+   def run(file_pattern, input_dir, output_file, clobber):
+       # Returns 0 for success, 1 for faliure
+   
+       # Sanity checking
+       if not os.path.exists(input_dir):
+           return 1
+       if os.path.exists(output_file):
+           if clobber:
+               os.unlink(output_file)
+           else:
+               return 1
+   
+       # Build list of input files
+       input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+       try:
+           # Run "montage" command
+           subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+       except Exception, e:
+           return 1
+   
+       return 0
+   
+Note the following:
+
+- The Python version has been implemented as a function (``run``).
+
+- Success or failure is indicated by the return value of the function: in true
+  Unix fashion, ``0`` represents success.
+
+- We allow the user to specify whether the output should be overwritten using
+  the ``clobber`` argument.
+
+- The user can also specify the pattern of filenames to be searched for (so
+  this code can be more generic than the simple ``*.th.png`` in the ``ksh``
+  version).
+
+- Arguments also enable the user to specify both the directory to search for
+  thumbnail files, and the directory into which the output file should be
+  written.
+
+- For simplicity, we have not implemented the logic used to add titles to the
+  images (but extending the code to do so would be trivial).
+
+- Standard Python code is used to implement all the required functionality,
+  with no added complexity. In particular, Python's `subprocess
+  <http://docs.python.org/library/subprocess.html>`_ module is used to spawn the
+  ``montage`` command.
+
+Using the :class:`~lofarpipe.support.lofarnode.LOFARnodeTCP` class
+------------------------------------------------------------------
+
+To integrate the Python code developed above into the framework, some minimal
+changes are required. First, we take our ``run()`` function, and make it a
+method of a class derived from
+:class:`lofarpipe.support.lofarnode.LOFARnodeTCP`. Secondly, we add some
+boilerplate such that when the script is run from the command line, it takes
+three arguments, then instantiates the class we have defined and executes its
+:meth:`~lofarpipe.support.lofarnode.LOFARnodeTCP.run_with_stored_arguments`
+method. Note that the script then exits with the value returned by that
+method. The result is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import subprocess
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, file_pattern, input_dir, output_file, clobber):
+           # Returns 0 for success, 1 for faliure
+   
+           # Sanity checking checking
+           if not os.path.exists(input_dir):
+               return 1
+           if os.path.exists(output_file):
+               if clobber:
+                   os.unlink(output_file)
+               else:
+                   return 1
+   
+           # Build list of input files
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           try:
+               # Run "montage" command
+               subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+           except Exception, e:
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+Logging
+-------
+
+Within the :class:`lofarpipe.support.lofarnode.LOFARnode` environment, we
+now have access to some other framework-provided services. Chief amont these
+is logging. The script is therefore updated to be more robust against failures
+and to report progress to the logger.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import subprocess
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, file_pattern, input_dir, output_file, clobber):
+           if not os.path.exists(input_dir):
+               self.logger.error("Input directory (%s) not found" % input_dir)
+               return 1
+   
+           self.logger.info("Processing %s" % input_dir)
+   
+           if os.path.exists(output_file):
+               if clobber:
+                   self.logger.warn(
+                       "Deleting previous version of results: %s" % output_file
+                   )
+                   os.unlink(output_file)
+               else:
+                   self.logger.error(
+                       "Refusing to overwrite existing file %s" % output_file
+                   )
+                   return 1
+   
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           try:
+               # Run "montage" command
+               subprocess.check_call(['/usr/bin/montage'] + input_files + [output_file])
+           except Exception, e:
+               self.logger.error(str(e))
+               return 1
+   
+           if not os.path.exists(output_file):
+               self.logger.error(
+                   "Output file %s not created by montage exectuable" % output_file
+               )
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+
+Note that ``self.logger`` in the above is an instance of
+:class:`logging.logger` from the `Python standard library
+<http://docs.python.org/library/logging.html>`_, with all the features that
+implies. Any messages sent to the logger will be automatically integrated with
+the overall pipeline logging system.
+
+Helper functions
+----------------
+
+The pipeline framework provides some (entirely optional!) convenience
+functions which can help the recipe author address common use cases.
+
+The :func:`~lofarpipe.support.utilites.catch_segfaults` function, for example,
+can automatically recover and re-run an external command in the event that it
+results in a segmentation fault. This can be integrated into our existing
+script as follows.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   import glob
+   import os
+   
+   from lofarpipe.support.lofarnode import LOFARnodeTCP
+   from lofarpipe.support.utilities import catch_segfaults
+   
+   class thumbnail_combine(LOFARnodeTCP):
+       def run(self, executable, file_pattern, input_dir, output_file, clobber):
+           if not os.path.exists(input_dir):
+               self.logger.error("Input directory (%s) not found" % input_dir)
+               return 1
+   
+           self.logger.info("Processing %s" % input_dir)
+   
+           if os.path.exists(output_file):
+               if clobber:
+                   self.logger.warn(
+                       "Deleting previous version of results: %s" % output_file
+                   )
+                   os.unlink(output_file)
+               else:
+                   self.logger.error(
+                       "Refusing to overwrite existing file %s" % output_file
+                   )
+                   return 1
+   
+           input_files = glob.glob(os.path.join(input_dir, file_pattern))
+   
+           command_line = [executable] + input_files + [output_file]
+           try:
+               catch_segfaults(command_line, None, None, self.logger)
+           except Exception, e:
+               self.logger.error(str(e))
+               return 1
+   
+           if not os.path.exists(output_file):
+               self.logger.error(
+                   "Output file %s not created by montage exectuable" % output_file
+               )
+               return 1
+   
+           return 0
+   
+   if __name__ == "__main__":
+       jobid, jobhost, jobport = sys.argv[1:4]
+       sys.exit(thumbnail_combine(jobid, jobhost, jobport).run_with_stored_arguments())
+
+Note that we have also added the ``executable`` argument to define which
+external command should actually be run. There is no reason to avoid making
+the code as generic and reusable as possible!
+
+At this point, our node script is complete (at least in this simple form). To
+be useful, though, it needs to be executed across many different nodes as part
+of a pipeline. This is where the *recipe* needs to be defined.
+
+Defining the recipe
+===================
+
+As described in the :ref:`overview <framework-overview>`, a recipe is the
+basic building block of pipelines: they describe how to perform an individual
+unit of pipeline processing. In this case, our recipe will specify the inputs
+for the node script we have written above, dispatch the jobs to a number
+of compute nodes, and finally collect the results.
+
+A basic recipe
+--------------
+
+All pipeline recipes ultimately derive from
+:class:`lofarpipe.support.baserecipe.BaseRecipe`. A trivial example is shown
+below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   from lofarpipe.support.baserecipe import BaseRecipe
+   
+   class thumbnail_combine(BaseRecipe):
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+           self.logger.info("This recipe does nothing")
+   
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+This recipe does nothing except print a couple of lines to the log. However,
+note the following key features:
+
+- The control code for the recipe is all implemented within the ``go()``
+  method of a class derived from
+  :class:`lofarpipe.support.baserecipe.BaseRecipe`.
+
+- Within that environment, we have access to a logger, which works in exactly
+  the same way as it does on the node. (Enthusiasts may wish to note that this
+  is actually an instance of
+  :class:`lofarpipe.support.pipelinelogging.SearchingLogger`, but the practical
+  difference is minimal).
+
+- It is important to call the ``go()`` method of the superclass (as shown at
+  line 7) to ensure all the necessary initialisation is performed.
+
+- If called from the command line, we instantiate the object, call its
+  ``main()`` method, and exit with its return value.
+
+Dispatching remote jobs
+-----------------------
+
+One of the most fundamental aspects of the framework is its ability to
+dispatch jobs to remote hosts, and this is absolutely necessary for the
+problem under discussion. We can add this to the recipe as follows.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   
+   from lofarpipe.support.baserecipe import BaseRecipe
+   from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+   from lofarpipe.support.remotecommand import ComputeJob
+   
+   class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+   
+           # Hosts on which to execute
+           hosts = ['lce019']
+   
+           # Path to node script
+           command = "python %s" % (self.__file__.replace('master', 'nodes'))
+   
+           # Build a list of jobs
+           jobs = []
+           for host in hosts:
+               jobs.append(
+                   ComputeJob(
+                       host, command,
+                       arguments=[
+                           "/usr/bin/montage",     # executable
+                           "\*.th.png",            # file_pattern
+                           "/path/to/png/files",   # input_dir
+                           "/path/to/output.png",  # output_file
+                           True                    # clobber
+                       ]
+                   )
+               )
+   
+           # And run them
+           self._schedule_jobs(jobs)
+   
+           # The error flag is set if a job failed
+           if self.error.isSet():
+               self.logger.warn("Failed compute job process detected")
+               return 1
+           else:
+               return 0
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+This raises a number of relevant points to note.
+
+- The distribution system is activated for a given recipe by "mixin-in" the
+  :class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn` class to
+  its definition.
+
+- In this case, we execute on only one remote host (``lce019``, as defined at
+  line 13). However, as many as necessary could be defined.
+
+- Each remote processing job is defined as an instance of
+  :class:`~lofarpipe.support.remotecommand.ComputeJob`. It takes three
+  arguments: the name of the host on which to execute, the name of the command
+  to be run, and any arguments which should be passed to that command. These
+  are provided in lines 23 to 30.
+
+- The command to run can be any Python script. By convention, node scripts are
+  named such that the name can be derived from the recipe name as shown at line
+  16, but this is entirely up to the author.
+
+- The arguments provided to
+  :class:`~lofarpipe.support.remotecommand.ComputeJob` correspond exactly to
+  those defined in the node script, above.
+
+- After all the jobs have been defined, they are passed (as a list) to
+  :meth:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn._schedule_jobs`.
+  This blocks until all jobs have finished.
+
+- If a job fails, the ``error`` attribute (an instance of
+  :class:`threading.Event` from `Python's standard library
+  <http://docs.python.org/library/threading.html>`_ is set.  The recipe should
+  check for this and act appropriately.
+
+Ingredients
+-----------
+
+The recipe shown in the previous section contains many hard-coded elements:
+all the arguments to the compute job, the host on which to run, and so on.
+This is obviously inflexible and undesireable. We can overcome this using the
+*ingredients* system provided by the framework. An example is shown below.
+
+.. code-block:: python
+   :linenos:
+
+   import sys
+   
+   import lofarpipe.support.lofaringredient as ingredient
+   from lofarpipe.support.baserecipe import BaseRecipe
+   from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+   from lofarpipe.support.remotecommand import ComputeJob
+   
+   class thumbnail_combine(BaseRecipe, RemoteCommandRecipeMixIn):
+       inputs = {
+           'executable': ingredient.ExecField(
+               '--executable',
+               default="/usr/bin/montage",
+               help="montage executable"
+           ),
+           'file_pattern': ingredient.StringField(
+               '--file-pattern',
+               default="\*.th.png",
+               help="File search pattern (glob)",
+           ),
+           'input_dir': ingredient.StringField(
+               '--input-dir',
+               help="Directory containing input files"
+           ),
+           'output_file': ingredient.StringField(
+               '--output-file',
+               help="Output filename"
+           ),
+           'clobber': ingredient.BoolField(
+               '--clobber',
+               default=False,
+               help="Clobber pre-existing output files"
+           ),
+           'target_hosts': ingredient.ListField(
+               '--target-hosts',
+               help="Remote hosts on which to execute"
+           )
+       }
+   
+       def go(self):
+           self.logger.info("Starting thumbnail_combine run")
+           super(thumbnail_combine, self).go()
+   
+           hosts = self.inputs['target_hosts']
+           command = "python %s" % (self.__file__.replace('master', 'nodes'))
+           jobs = []
+           for host in hosts:
+               jobs.append(
+                   ComputeJob(
+                       host, command,
+                       arguments=[
+                           self.inputs['executable'],
+                           self.inputs['file_pattern'],
+                           self.inputs['input_dir'],
+                           self.inputs['output_file'],
+                           self.inputs['clobber']
+                       ]
+                   )
+               )
+           self._schedule_jobs(jobs)
+   
+           if self.error.isSet():
+               self.logger.warn("Failed compute job process detected")
+               return 1
+           else:
+               return 0
+   
+   if __name__ == '__main__':
+       sys.exit(thumbnail_combine().main())
+
+Using this system, the recipe author defines a list of inputs to the recipe.
+Each input is an instance of a class descended from
+:class:`lofarpipe.support.lofaringredients.Field`: the various sub-types of
+field enable the user to perform sanity-checking of inputs. For example, in
+the above, we can check that the executable provided really is an executable
+by making the relevant field an instance of
+:class:`~lofarpipe.support.lofaringredients.ExecField`, and that the
+``clobber`` value is really a bool by making its field
+:class:`~~lofarpipe.support.lofaringredients.BoolField`.  The
+:ref:`developer's guide <lofarpipe-ingredients>` provides a lot more
+information about the types of field available.
+
+Each of the ingredients is associated with a name in the ``inputs`` dict.
+Within the recipe, the values of the inputs are available as
+``self.inputs[FIELDNAME]``, as seen (for example) at line 43.
+
+The various inputs can take their values from a number of sources. For
+example, as we will see, inputs can be read from the command line, provided in
+a configuration file, or take the default value specified in their definition.
+Whatever the source, though, they are always made available to the recipe in a
+consistent way: a :class:`~~lofarpipe.support.lofaringredients.BoolField`
+*always* contains a bool, and so on.
+
+User-defined ingredients
+------------------------
+
+The ingredients system is designed to take care of as much error & sanity
+checking for the developer as is possible. It is therefore extensible: as well
+as checking for basic types as shown above, we can construct specialist fields
+to (for example) check that a given input falls within a particular range.
+
+In this case, we know that ``target_hosts`` should be a list of hostnames of
+machines to which jobs may be dispatched. Above, we used
+:class:`~lofarpipe.support.lofaringredients.ListField` to simply check that it
+is a list. However, with a little imagination, we can define a list that is
+guaranteed to contain only resolvable hostnames. For example:
+
+.. code-block:: python
+   :linenos:
+
+   import lofarpipe.support.lofaringredient as ingredient
+
+   class HostNameList(ingredient.ListField):
+       @classmethod
+       def is_valid(value):
+           import socket
+           for hostname in value:
+               try:
+                   socket.gethostbyname(hostname)
+               except:
+                   return False
+           return True
+  
+This checks that every element within the list is resolveable (using Python's
+standard :func:`socket.gethostbyname` function). We could incorporate it into
+the above recipe by simply changing line 33 to:
+
+.. code-block:: python
+
+   'target_hosts': HostNameList(
+
+Configuration file access
+-------------------------
+
+In the above, we have expected the user to supply a list of hosts to run jobs
+on directly. However, in general the cluster layout is already known: this
+can, therefore, be determined automatically.
+
+As part of the :ref:`pipeline configuration <config-file>`, the user is able
+to specify a ``clusterdesc`` parameter. This contains the full path to a file
+which describes the cluster layout (see :ref:`the note on distproc
+<distproc-blurb>` for details). The recipe can access the pipeline
+configuration and extract the information from this file directly. We can
+simply drop the ``target_hosts`` input from our recipe, and replace line 43
+with:
+
+.. code-block:: python
+
+   from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+   hosts = get_compute_nodes(
+       ClusterDesc(
+           self.config.get('cluster', "clusterdesc")
+       )
+   )
+
+There are a number of points to note here.
+
+The pipeline configuration file is available as the ``self.config``
+attribute in the recipe. This is an instance of
+:class:`ConfigParser.SafeConfigParser` from the `standard library
+<http://docs.python.org/library/configparser.html>`_, and can be accessed
+exactly as described in the Python documentation. Here, we simply extract the
+value of ``clusterdesc`` from the ``cluster`` section.
+
+The framework provides some convenience routines from working with clusterdesc
+file. Here, we use :class:`lofarpipe.support.clusterdesc.ClusterDesc` and
+:func:`~lofarpipe.support.clusterdesc.get_compute_nodes` to extract a list of
+all the compute nodes defined in the cluster, and then proceed to use the list
+of hosts in the recipe exactly as before.
+
+Additional notes
+================
+
+Some important aspects of recipe design were not covered in the above
+discussion.
+
+Assigning jobs to specific hosts
+--------------------------------
+
+The example we have considered above is, in one important respect, simpler
+than many pipeline recipes: it runs exactly the same code on each of the
+remote hosts. A more general situation is processing a large number of
+similar, but not identical, datasets (such as independent subbands of an
+observation). Due to limited storage capacities on the remote hosts, it is
+usually the case that each host only stores a subset of the total number of
+datasets locally. Therefore, when dispatching jobs to the host, the recipe
+author must be careful only to send jobs which refer to data it can reasonably
+process.
+
+From the recipe point of view, this procedure is straightforward. The recipe
+developer earlier contains code like:
+
+.. code-block:: python
+
+   jobs = []
+   for host in hosts:
+       jobs.append(
+           ComputeJob(
+               host, command,
+               arguments=[
+                   ...
+               ]
+           )
+
+When specifying a job which must run on a specific host, the pipeline author
+can use a mapping of the form:
+
+.. code-block:: python
+
+   job_list = [
+       ("hostname1", [arguments for job 1]),
+       ("hostname2", [arguments for job 2]),
+       ...
+   ]
+
+And our earlier code can then simply be modified to:
+
+.. code-block:: python
+
+   jobs = []
+   for host, arguments in job_list:
+       jobs.append(
+           ComputeJob(
+               host, command, arguments=arguments
+           )
+
+In general, the recipe author must define the mapping between hostnames and
+job arguments themselves: this will depend on the details of the problem the
+recipe is addressing. Often, it is conventient to use one recipe to generate
+the mapping, then save it to disk for use by several recipes in the pipeline.
+This is the approach taken in LOFAR's standard imaging pipeline. Here, the
+:ref:`recipe-datamapper` recipe determines which filenames are accessible from
+which hosts, and stores them to disk in a :ref:`parset file <parset-handling>`
+formatted as follows:
+
+.. code-block:: none
+
+   hostname1 = [ /path/to/filename1, /path/to/filename2 ]
+   hostname2 = [ /path/to/filename3, /path/to/filename3 ]
+   ...
+
+The :func:`lofarpipe.support.group_data.load_data_map` function makes it easy
+to read back this parset from disk and iterate over the values to dispatch
+compute jobs: see the imaging pipeline's :ref:`dppp-recipe` recipe for an
+example.
+
+.. todo::
+
+   Recipe outputs
+
+.. todo::
+
+   Combining recipes into a pipeline
+
+.. todo::
+
+   Testing this recipe by running it
diff --git a/CEP/Pipeline/docs/sphinx/source/conf.py b/CEP/Pipeline/docs/sphinx/source/conf.py
new file mode 100644
index 00000000000..d5738750ca6
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/conf.py
@@ -0,0 +1,243 @@
+# -*- coding: utf-8 -*-
+#
+# LOFAR Standard Imaging Pipeline documentation build configuration file, created by
+# sphinx-quickstart on Wed Jun 10 17:09:31 2009.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# The contents of this file are pickled, so don't put values in the namespace
+# that aren't pickleable (module imports are okay, they're removed automatically).
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If your extensions are in another directory, add it here. If the directory
+# is relative to the documentation root, use os.path.abspath to make it
+# absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# General configuration
+# ---------------------
+
+def add_recipe_inputs(app, what_, name, obj, options, lines):
+    """
+    If obj is a recipe with ingredients, add information on its inputs &
+    outputs to its docstring.
+
+    Designed to be called on the ``autodoc-process-docstring`` event.
+    """
+    from lofarpipe.support.lofaringredient import RecipeIngredients
+    def format_ingredient_dict(ingredients):
+        for name, field in sorted(ingredients.iteritems()):
+            if hasattr(field, "default"):
+                extra = "; default: ``%s``" % field.default
+            elif hasattr(field, "optional"):
+                extra = "; optional"
+            else:
+                extra = ""
+            lines.append("``%s`` (:class:`%s`%s)" % (name, type(field).__name__, extra))
+            if field.help:
+                lines.append("    %s" % field.help)
+            lines.append("")
+    if what_ == "class" and issubclass(obj, RecipeIngredients):
+        lines.append("**Recipe inputs**")
+        lines.append("")
+        if obj.inputs:
+            format_ingredient_dict(obj.inputs)
+        else:
+            lines.append("None defined -- defaults apply (see :class:`~lofarpipe.support.lofaringredient.RecipeIngredients`).")
+            lines.append("")
+        lines.append("**Recipe outputs**")
+        lines.append("")
+        if obj.outputs:
+            format_ingredient_dict(obj.outputs)
+        else:
+            lines.append("None.")
+            lines.append("")
+
+todo_include_todos = True
+
+def setup(app):
+    app.connect('autodoc-process-docstring', add_recipe_inputs)
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.graphviz',
+    'sphinx.ext.inheritance_diagram',
+    'sphinx.ext.todo',
+    'sphinx.ext.ifconfig'
+]
+
+inheritance_graph_attrs = dict(
+    size='"0.0"', # Don't scale drawing down, as it screws up fonts
+    ratio="compress",
+    fontsize=14,
+    nodesep='"0.1"',
+    ranksep='"0.1"',
+    rankdir='"TB"'
+)
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['.templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'LOFAR Pipeline System'
+copyright = u'2009—11, John Swinbank'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0'
+# The full version, including alpha/beta/rc tags.
+release = '0'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = []
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# Options for HTML output
+# -----------------------
+
+# The style sheet to use for HTML and HTML Help pages. A file of that name
+# must exist either in Sphinx' static/ path, or in one of the custom paths
+# given in html_static_path.
+html_style = 'default.css'
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+html_logo = "logo.jpg"
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+html_favicon = "lofar.ico"
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['.static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, the reST sources are included in the HTML build as _sources/<name>.
+#html_copy_source = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'LOFARStandardImagingPipelinedoc'
+
+
+# Options for LaTeX output
+# ------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, document class [howto/manual]).
+latex_documents = [
+  ('index', 'LOFARStandardImagingPipeline.tex', ur'LOFAR Standard Imaging Pipeline Documentation',
+   ur'John Swinbank', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/index.rst b/CEP/Pipeline/docs/sphinx/source/developer/index.rst
new file mode 100644
index 00000000000..e47cfdaa4f9
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/index.rst
@@ -0,0 +1,88 @@
+.. _developer-guide:
+
+*********************
+Developer information
+*********************
+
+This section describes the internal structure of the pipeline framework. It is
+intended for developers who plan to work on the framework code itself.
+
+.. _code-structure:
+
+Structure of the code
+=====================
+
+The pipeline code can be obtained from `USG Subversion
+<http://usg.lofar.org/svn/code/trunk/src/pipeline/>`_. There are five
+top-level directories:
+
+``deploy``
+    IPython system deployment scripts. See the section on
+    :ref:`parallelisation with IPython <parallelisation-ip>` for more details,
+    but note that the use of IPython within the pipeline is *deprecated*.
+
+``docs``
+    Documentation and examples. See the section on :ref:`available
+    documentation <documentation>` for details.
+
+``mac``
+    MAC/SAS EventPort interface code. See the :ref:`mac-interface` section for
+    details.
+
+``framework``
+    The framework code itself. This is implemented as the Python module
+    :mod:`lofarpipe`; see its documentation for details. A ``distutils`` based
+    setup script, ``setup.py``, is included for easy installation: see the
+    section on :ref:`installing the framework <framework-installation>`.
+
+``recipes``
+    A collection of :ref:`recipes <recipe-docs>`, intended to both demonstrate
+    the operation of the framework and serve as useful pipeline components.
+    Recipes intended for different pipelines may be stored in separate
+    directories: for example, the ``sip`` directory contains recipes for
+    useful to the :ref:`standard imaging pipeline <sip>`.
+
+External components
+===================
+
+.. _ipython-deprecated:
+
+IPython
+-------
+
+The IPython system was extensively used by earlier versions of this framework,
+but is now *deprecated*. Both recipe and framework developers are urged to
+avoid using it wherever possible. However, until all existing recipes
+(including those not distributed with the framework) have been converted to
+use another system, the IPython support in the framework should be maintained.
+That includes:
+
+* :class:`lofarpipe.support.clusterhandler.ClusterHandler`
+* :func:`lofarpipe.support.clusterhandler.ipython_cluster`
+* :class:`lofarpipe.support.ipython.LOFARTask`
+* :class:`lofarpipe.support.ipython.IPythonRecipeMixIn`
+* :func:`lofarpipe.support.utilities.build_available_list`
+* :func:`lofarpipe.support.utilities.clear_available_list`
+* :func:`lofarpipe.support.utilities.check_for_path`
+
+Conversely, once all IPython-based recipes in active use have been replaced,
+the IPython support code should be removed from the framework.
+
+.. _documentation:
+
+Available documentation
+=======================
+
+.. todo::
+
+   Describe the available documentation in the docs directory: what the
+   examples are, how to build the Sphinx documenation.
+
+.. _mac-interface:
+
+MAC/SAS interface
+=================
+
+.. todo::
+
+   Describe current status of MAC/SAS interface.
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst
new file mode 100644
index 00000000000..792d6577497
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe.rst
@@ -0,0 +1,19 @@
+.. module:: lofarpipe
+   :synopsis: The framework package.
+
+****************************
+The :mod:`lofarpipe` package
+****************************
+
+The :mod:`lofarpipe` package contains all the Python modules included with the
+core framework code. This includes the :mod:`lofarpipe.cuisine` pacakge
+(inherited from the :ref:`cuisine <notes-on-cuisine>` framework for WSRT), the
+:mod:`lofarpipe.support` pacakge (containing all the pipeline framework code),
+and the :mod:`lofarpipe.tests` package (which contains a limited selection of
+test).
+
+.. toctree::
+
+   lofarpipe/cuisine.rst
+   lofarpipe/support.rst
+   lofarpipe/tests.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst
new file mode 100644
index 00000000000..7b68d046901
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/cuisine.rst
@@ -0,0 +1,62 @@
+.. module:: lofarpipe.cuisine
+   :synopsis: The Cuisine system.
+
+************************************
+The :mod:`lofarpipe.cuisine` package
+************************************
+
+The LOFAR pipeline system developed partly from the `WSRT Cuisine
+<http://www.astron.nl/~renting/pipeline_frame.html>`_, developed by Adriaan
+Renting for use at the Westerbork Synthesis Radio Telescope. Many of the basic
+concepts (the recipe, with associated inputs and outputs, for example)
+originated in the Cuisine system, and the user is encouraged to refer to its
+documentation where necessary.
+
+A slightly modified version of the "original" Cuisine is distributed as part
+of the :mod:`lofarpipe` package. The modifications include:
+
+* Use of `new style
+  <http://www.python.org/download/releases/2.2.3/descrintro/>`_ Python classes
+  throughout.
+
+* Reworked option handling code to use the `optparse
+  <http://docs.python.org/library/optparse.html>`_ module from the Python
+  standard library.
+
+* Reworked logging system using the `logging
+  <http://docs.python.org/library/logging.html>`_ module from the Python
+  standard library. This provides a flexible way of configuring logging formats
+  and destinations, included logging to files or TCP sockets. See the section on
+  the pipeline :ref:`logging system <lofarpipe-logging>`.
+
+* Assorted bug-fixes and tweaks.
+
+It is hoped that these changes will eventually be merged upstream.
+
+Very little of the original Cuisine code is currently used in the LOFAR
+framework, although the :class:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe` is
+still used as the basis for all LOFAR recipes. The recipe author, however, is
+*never* expected to directly interact with Cuisine code: all LOFAR pipeline
+recipes inherit from :class:`lofarpipe.support.baserecipe.BaseRecipe`, which
+entirely wraps all relevant Cuisine functionality. The
+:mod:`lofarpipe.support` inheritance diagrams show exactly how these packages
+are related. The following API documentation covers only those routines
+directly used by the rest of the pipeline system, not Cuisine as a whole.
+
+.. module:: lofarpipe.cuisine.WSRTrecipe
+   :synopsis: Base module for all Cuisine recipe functionality.
+
+:mod:`lofarpipe.cuisine.WSRTrecipe`
+-----------------------------------
+
+.. autoclass:: lofarpipe.cuisine.WSRTrecipe.WSRTrecipe
+   :members: help, main_init, main, run, go, main_result, cook_recipe
+
+.. module:: lofarpipe.cuisine.cook
+   :synopsis: Cuisine cooks.
+
+:mod:`lofarpipe.cuisine.cook`
+-----------------------------
+
+.. autoclass:: lofarpipe.cuisine.cook.PipelineCook
+   :members: copy_inputs, copy_outputs, spawn, try_running
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst
new file mode 100644
index 00000000000..6c008259263
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support.rst
@@ -0,0 +1,19 @@
+.. module:: lofarpipe.support
+   :synopsis: Core framework code.
+
+************************************
+The :mod:`lofarpipe.support` package
+************************************
+
+This package contains effectively all the core framework code that comprises
+the LOFAR pipeline system. Broadly speaking, it address five distinct problem
+areas:
+
+.. toctree::
+   :maxdepth: 1
+
+   The construction of recipes <support/recipes.rst>
+   Checking of recipe inputs and outputs ("ingredients") <support/ingredients.rst>
+   Logging <support/logging.rst>
+   Distribution of pipeline jobs <support/distribution.rst>
+   Utility and convenience functions <support/utility.rst>
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst
new file mode 100644
index 00000000000..751021e00aa
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/distribution.rst
@@ -0,0 +1,70 @@
+.. _lofarpipe-remotecommand:
+
+************
+Distribution
+************
+
+Before proceeding with this section, the reader should ensure they are
+familiar with the :ref:`recipe author's perspective` on how tasks can be
+distributed within the pipeline framework. This section will describe in
+detail what goes on behind the scenes and how it can be extended.
+
+.. todo::
+
+   Details!
+
+Node scripts
+------------
+
+.. autoclass:: lofarpipe.support.lofarnode.LOFARnode
+   :members:
+
+.. autoclass:: lofarpipe.support.lofarnode.LOFARnodeTCP
+   :members:
+
+Compute jobs
+------------
+
+.. autoclass:: lofarpipe.support.remotecommand.ComputeJob
+   :members:
+
+Scheduling and rate limiting
+----------------------------
+
+See :class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn`.
+
+.. autoclass:: lofarpipe.support.remotecommand.ProcessLimiter
+
+Dispatch mechanisms
+-------------------
+
+.. todo::
+
+   Extenting this system.
+
+Dispatch mechanism can be specified in the ``remote`` section of the
+:ref:`pipeline configuration <config-file>`
+
+.. autofunction:: lofarpipe.support.remotecommand.run_remote_command
+
+.. autofunction:: lofarpipe.support.remotecommand.run_via_mpirun
+
+.. autofunction:: lofarpipe.support.remotecommand.run_via_paramiko
+
+.. autoclass:: lofarpipe.support.remotecommand.ParamikoWrapper
+
+Exchanging data between master and nodes
+----------------------------------------
+
+.. autofunction:: lofarpipe.support.jobserver.job_server
+
+.. autoclass:: lofarpipe.support.jobserver.JobSocketReceiver
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.jobserver.JobStreamHandler
+   :show-inheritance:
+
+Clean shut-down and recovery
+----------------------------
+
+.. autofunction:: lofarpipe.support.remotecommand.threadwatcher
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst
new file mode 100644
index 00000000000..3b70596eeb8
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/ingredients.rst
@@ -0,0 +1,53 @@
+.. _lofarpipe-ingredients:
+
+***********
+Ingredients
+***********
+
+One of the most fragile parts of pipeline definition is ensuring that the
+inputs to a recipe are correct. Broadly, there are two failure modes here
+which should be avoided. The first is perhaps the more obvious: it is always
+desireable to check that the inputs actually make sense before attempting to
+use them to operate on some piece of data -- particularly when that data may
+be scientifically valuable. By checking inputs when the recipe is started,
+awkward errors during the recipe run may be avoided.
+
+The second failure mode concerns the source of the inputs. As we have seen,
+recipe inputs may be provided on the command line, read from a configuration
+file, calculated by another recipe as part of the pipeline run, etc. It is
+important that these inputs are presented to the recipe code in a consistent
+way: if, for example, a ``float`` is required, a ``float`` should be provided,
+not a string read from the command line.
+
+All LOFAR recipes define a series of ``inputs``, as described in the
+:ref:`recipe design section <recipe-docs>`. These inputs are ultimately
+derived from the :class:`~lofarpipe.support.lofaringredient.Field` class,
+which provides validation and type-conversion (where appropriate) of the
+inputs. A number of pre-defined fields are available, covering many basic use
+cases (see :ref:`below <pre-defined-fields>`); the recipe author is also
+encouraged to defined their own as necessary.
+
+All recipes ultimately derive from
+:class:`~lofarpipe.support.lofaringredient.RecipeIngredients` (see :ref:`the
+last section <lofarpipe-recipes>`). This provides a number of standard fields,
+which are present in all recipes, as well as ensuring that additional,
+per-recipe fields are handled appropriately, including type-checking on recipe
+instantiation. Within the recipe environment, the contents of fields are
+available as ``self.inputs``, an instance of
+:class:`~lofarpipe.support.lofaringredient.LOFARingredient`.
+
+.. _pre-defined-fields:
+
+Pre-defined fields
+==================
+.. automodule:: lofarpipe.support.lofaringredient
+   :members: Field, StringField, IntField, FloatField, FileField, ExecField, DirectoryField, BoolField, ListField, DictField, FileList
+
+Infrastructure
+==============
+
+.. autoclass:: lofarpipe.support.lofaringredient.RecipeIngredients
+
+.. autoclass:: lofarpipe.support.lofaringredient.RecipeIngredientsMeta
+
+.. autoclass:: lofarpipe.support.lofaringredient.LOFARingredient
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst
new file mode 100644
index 00000000000..bbda265f54b
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/logging.rst
@@ -0,0 +1,112 @@
+.. _lofarpipe-logging:
+
+*******
+Logging
+*******
+
+One of the major changes made by the LOFAR pipeline system to the
+:mod:`~lofarpipe.cuisine` system was the introduction of logging using the
+standard `Python logging module
+<http://docs.python.org/library/logging.html>`_. All instances of recipes
+derived from :class:`~lofarpipe.cuisine.WSRTrecipe` (in other words, every
+recipe developed using the framework) has an associated logger (in fact, an
+instance of :class:`~lofarpipe.support.pipelinelogging.SearchingLogger`
+available as the attribute ``self.logger``), which supports the standard
+logging methods: see the Python documentation for details. The logging system
+is also available in much the same way on remote hosts using the pipeline's
+:ref:`distribution system <lofarpipe-remotecommand>`.
+
+Note that by default only messages of level :const:`logging.WARNING` and
+higher are logged. Use of the ``-v`` or ``--verbose`` flag the command line
+will log messages at level :const:`logging.INFO`; ``-d`` or ``--debug`` at
+level :const:`logging.DEBUG`.
+
+Logs are output to standard output and also to a file. By default, the file is
+located in the ``job_directory``, but this, together with the format used for
+logging, may be configured through the :ref:`configuration file <config-file>`
+if required.
+
+The :mod:`lofarpipe.support` module provides a number of helper functions for
+working with logs, which are documented here.
+
+Searching logs
+==============
+
+Sometimes, it is convenient to be able to keep track of messages sent to a
+logger. For example, pipeline tools may send metadata to the log rather than
+output it in any other, more useful, fashion.
+
+As mentioned above, all recipes have an associated instance of
+:class:`~lofarpipe.support.pipelinelogging.SearchingLogger`. This can have any
+number of regular expression patterns defined, and it will then store for
+later reference any log entries which match those patterns. For example, a
+recipe could include the code:
+
+.. code-block:: python
+
+   self.logger.searchpatterns["new_pattern"] = "A log entry"
+
+This would record all log entries matching "A log entry". Later, a list of all
+those entries can be retrieved:
+
+.. code-block:: python
+
+   matches = self.logger.searchpatterns["new_pattern"].results
+   self.logger.searchpatterns.clear()
+
+Note that messages generated by all subsidiary loggers -- including those on
+remote hosts -- are included.  The call to
+:meth:`~lofarpipe.support.pipelinelogging.SearchPatterns.clear` simply
+instructs the logger to stop searching for that pattern, to avoid incurring
+overhead in future.
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchingLogger
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchPattern
+
+.. autoclass:: lofarpipe.support.pipelinelogging.SearchPatterns
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.pipelinelogging.getSearchingLogger
+
+Logging process output
+======================
+
+Many pipeline recipes run external executables. These tools may produce useful
+logging output, either by writing to ``stdout`` (or ``stderr``), or by using a
+library such as `log4cplus <http://log4cplus.sourceforge.net/>`_ or `log4cxx
+<http://logging.apache.org/log4cxx/>`_. The framework makes it possible to
+ingest that output and re-route it through the standard pipeline logging
+system.
+
+Standard output/error
+---------------------
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_process_output
+
+Logging libraries
+-----------------
+
+The output from ``log4cplus`` or ``log4cxx`` is currently intercepted by
+simply redirecting it to a file and then logging the contents of that file as
+it is updated via the
+:func:`~lofarpipe.support.pipelinelogging.log_file` function.
+
+.. autoclass:: lofarpipe.support.pipelinelogging.LogCatcher
+
+.. autoclass:: lofarpipe.support.pipelinelogging.CatchLog4CPlus
+   :show-inheritance:
+
+.. autoclass:: lofarpipe.support.pipelinelogging.CatchLog4CXX
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_file
+
+Logging resource usage
+======================
+
+This is a decorator which makes it easy to log the amount of time (wall and
+CPU) used by parts of a pipeline.
+
+.. autofunction:: lofarpipe.support.pipelinelogging.log_time
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst
new file mode 100644
index 00000000000..2db9a29a17c
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/recipes.rst
@@ -0,0 +1,49 @@
+.. _lofarpipe-recipes:
+
+*******
+Recipes
+*******
+
+All LOFAR pipeline recipes are ultimately derived from
+:class:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe`. However, all the
+functionality it provides is encapsulated and enhanced by the
+:class:`~lofarpipe.support.baserecipe.BaseRecipe` class, and it is from this
+that all pipeline recipes should be derived. This class also includes the
+:ref:`"ingredients" system <lofarpipe-ingredients>`, which controls recipe
+inputs and outputs.
+
+A number of "mix-in" classes may be added to
+:class:`~lofarpipe.support.baserecipe.BaseRecipe` to provide additional
+functionality, such as the ability to dispatch jobs to remote hosts
+(:class:`~lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn`). Recipe
+authors may mix-in whatever functionality is required to achieve their aims.
+
+The :class:`~lofarpipe.suppport.control.control` class provides a recipe with a
+little extra functionality to help it act as an overall pipeline. This can
+include interfacing with an external control system, for example, or keeping
+track of the pipeline progress
+(:class:`~lofarpipe.support.stateful.StatefulRecipe`).
+
+The relationship between all these classes is illustrated below.
+
+.. inheritance-diagram:: lofarpipe.support.control.control lofarpipe.support.lofarrecipe.LOFARrecipe
+   :parts: 3
+
+.. autoclass:: lofarpipe.support.baserecipe.BaseRecipe
+   :members:
+
+.. autoclass:: lofarpipe.support.stateful.StatefulRecipe
+
+.. autoclass:: lofarpipe.support.control.control
+   :members: pipeline_logic
+
+.. autoclass:: lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn
+   :members: _schedule_jobs
+
+   See the :ref:`distribution <lofarpipe-remotecommand>` section for details.
+
+.. autoclass:: lofarpipe.support.ipython.IPythonRecipeMixIn
+
+   The use of IPython within the pipeline framework is :ref:`deprecated
+   <ipython-deprecated>`.
+
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst
new file mode 100644
index 00000000000..145582f759f
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/support/utility.rst
@@ -0,0 +1,98 @@
+.. _lofarpipe-utility:
+
+*********
+Utilities
+*********
+
+The framework provides a number of convenience and utility functions. These
+are not fundamental to the operation of the framework itself, but rathr
+provude functionality which is commonly needed in pipeline recipes.
+
+.. _parset-handling:
+
+Parset handling
+---------------
+
+Parsets ("parameter sets") are files containing key-value pairs commonly used
+for the configuration of LOFAR tools. Many pipeline recipes will, at heart,
+run a standard tool over a number of datasets in parallel by substituting
+per-dataset values into a template parset. These routines are designed to
+simplify that process.
+
+.. autoclass:: lofarpipe.support.parset.Parset
+   :show-inheritance:
+
+.. autofunction:: lofarpipe.support.parset.get_parset
+
+.. autofunction:: lofarpipe.support.parset.patch_parset
+
+.. autofunction:: lofarpipe.support.parset.patched_parset
+
+Cluster descriptions (clusterdesc) handling
+-------------------------------------------
+
+Clusterdesc files (see :ref:`distproc-blurb`) describe the layout of the
+compute cluster. They can be used within the pipeline to help choose nodes to
+which jobs may be dispatched.
+
+.. autoclass:: lofarpipe.support.clusterdesc.ClusterDesc
+
+.. autofunction:: lofarpipe.support.clusterdesc.get_compute_nodes
+
+.. autofunction:: lofarpipe.support.clusterdesc.get_head_node
+
+Grouping input data
+-------------------
+
+Often, a long list of input datasets are grouped according to some criteria
+for processing. These routines provide some useful ways of batching-up data.
+
+.. autofunction:: lofarpipe.support.group_data.group_files
+
+.. autofunction:: lofarpipe.support.group_data.gvds_iterator
+
+.. autofunction:: lofarpipe.support.group_data.load_data_map
+
+Process control
+---------------
+
+Many pipeline recipes spawn an external executable and wait for it to
+complete. These routines can assist the recipe author by simplifying this
+process and automatically recovering from transient errors.
+
+.. autofunction:: lofarpipe.support.utilities.spawn_process
+
+.. autofunction:: lofarpipe.support.utilities.catch_segfaults
+
+File and directory maniupulaton
+-------------------------------
+
+.. autofunction:: lofarpipe.support.utilities.get_mountpoint
+
+.. autofunction:: lofarpipe.support.utilities.create_directory
+
+Iterators and generators
+------------------------
+
+.. autofunction:: lofarpipe.support.utilities.is_iterable
+
+.. autofunction:: lofarpipe.support.utilities.izip_longest
+
+.. autofunction:: lofarpipe.support.utilities.group_iterable
+
+Miscellaneous
+-------------
+
+.. autofunction:: lofarpipe.support.utilities.read_initscript
+
+.. autofunction:: lofarpipe.support.utilities.string_to_list
+
+Exceptions
+----------
+
+The follow exceptions may be raised by pipeline components.
+
+.. automodule:: lofarpipe.support.lofarexceptions
+   :members:
+   :undoc-members:
+
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst
new file mode 100644
index 00000000000..34f49b4a99a
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests.rst
@@ -0,0 +1,17 @@
+.. module:: lofarpipe.tests
+   :synopsis: Pipeline framework tests.
+
+**********************************
+The :mod:`lofarpipe.tests` package
+**********************************
+
+Pipeline tests are implemented using Python's `unittest
+<http://docs.python.org/library/unittest.html>`_ module. Test coverage
+throughout the framework is currently sadly lacking. This must be addressed in
+future development. Such tests as are available are installed as modules in
+the :mod:`lofarpipe.tests` package.
+
+.. toctree::
+   :maxdepth: 1
+
+   tests/ingredients.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst
new file mode 100644
index 00000000000..cf713cf0ee6
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/lofarpipe/tests/ingredients.rst
@@ -0,0 +1,6 @@
+*************************************************
+The :mod:`lofarpipe.tests.lofaringredient` module
+*************************************************
+
+.. automodule:: lofarpipe.tests.lofaringredient
+   :members:
diff --git a/CEP/Pipeline/docs/sphinx/source/developer/todo.rst b/CEP/Pipeline/docs/sphinx/source/developer/todo.rst
new file mode 100644
index 00000000000..ac0663ad568
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/developer/todo.rst
@@ -0,0 +1,61 @@
+********************************
+Potential framework enhancements
+********************************
+
+There are a number of areas in which the current version of the pipeline
+framework could be enhanced. In no particular order, these include:
+
+- An ingredients system for :class:`~lofarpipe.support.lofarnode.LOFARnode`,
+  so that inputs are checked as they are supplied to nodes.
+
+  - A consistent interface with
+    :class:`~lofarpipe.support.baserecipe.BaseRecipe`, ie relying on an
+    ``inputs`` dictionary, should also be supplied.
+
+  - Unfortunately, this can't be used to validate inputs before starting the
+    recipe, for obvious reasons.
+
+- Ingredients probably shouldn't be provided with default values. At least,
+  not default values for things like paths to executables which may sometimes
+  disappear.
+
+- Error handling throughout should be cleaned up; more exceptions should be
+  caught and better feedback provided to the user.
+
+- Configuration of the :ref:`logging system <lofarpipe-logging>` should be
+  made more flexible.
+
+- Rather than checking exit status, the framework should use a more-Pythonic
+  error handling system based on exceptions.
+
+- For consistency with the rest of the LOFAR system, :ref:`parsets
+  <parset-handling>` should be used for all configuration information.
+
+  - That, in turn, means that the parset format should be standardised.
+
+- A global job queue per node should be implemented, so that multiple
+  simultaneous recipes can submit jobs without overloading nodes.
+
+- :meth:`lofarpipe.support.remotecommand.RemoteCommandRecipeMixIn.__schedule_jobs`
+  should be non-blocking.
+
+- :meth:`lofarpipe.support.baserecipe.BaseRecipe.go` and
+  :meth:`lofarpipe.support.lofarnode.LOFARnodeTCP.run` should be made
+  consistent, in terms of both name and semantics.
+
+- The logging system should interface more directly with ``log4cplus``, rather
+  than just reading log information from a file.
+
+- More detailed feedback and quality-check information should be sent to the
+  rest of the LOFAR system.
+
+  - This is an issue which requires changes throughout the LOFAR system; it
+    can't be addressed by the framework alone.
+
+- The ``recipe_directories`` option should be removed from the
+  :ref:`configuration file`, and the regular Python import mechanism, with
+  appropriate namespacing, should be used to locate recipes.
+
+- The dependency on :mod:`lofar.parameterset` should be removed, so that the
+  framework is a stand-alone codebase and can more easily be ported to other
+  systems.
diff --git a/CEP/Pipeline/docs/sphinx/source/index.rst b/CEP/Pipeline/docs/sphinx/source/index.rst
new file mode 100644
index 00000000000..77d0addb50a
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/index.rst
@@ -0,0 +1,92 @@
+###################################
+LOFAR Pipeline System Documentation
+###################################
+
+.. toctree::
+   :hidden:
+
+   todo
+
+This document provides an overview of the LOFAR pipeline system. This system
+has largely been developed to support the LOFAR imaging pipeline, but is now
+being deployed for a variety of science pipelines on the LOFAR cluster. This
+document is split into a number of sections: :ref:`the first
+<section-overview>` describes the aims of the framework, the structure of a
+pipeline, and gives an overview of how the system fits together. :ref:`The
+second <section-user-guide>` provides details on how to run pre-defined
+pipelines. :ref:`The third <section-author-guide>` provides a tutorial
+introduction to writing pipelines and pipeline components. :ref:`The fourth
+<section-developer-reference>` describes the framework codebase in more
+detail, and is intended for pipeline authors who wish to dig a little deeper,
+as well as those interested in developing the framework itself. :ref:`The
+final section <section-pipeline-specific>` provides a guide to the imaging
+pipeline itself.
+
+.. ifconfig:: todo_include_todos
+
+   This documentation is still a work in progress. See the :ref:`to-do list
+   <todo>` for upcoming improvements.
+
+.. _section-overview:
+
+The pipeline system was developed by John Swinbank (University of Amsterdam)
+in 2009 & 2010. Since 2011, the primary maintainer is Marcel Loose (ASTRON).
+
+Overview & Getting Started
+==========================
+
+.. toctree::
+   :maxdepth: 2
+
+   overview/overview/index.rst
+   overview/dependencies/index.rst
+
+.. _section-user-guide:
+
+User's Guide
+============
+
+.. toctree::
+   :maxdepth: 2
+
+   user/installation/index.rst
+   user/usage/index.rst
+
+.. _section-author-guide:
+
+Recipe & Pipeline Author's Guide
+================================
+
+.. toctree::
+   :maxdepth: 2
+
+   author/index.rst
+
+.. _section-developer-reference:
+
+Developer's Reference
+=====================
+
+.. toctree::
+   :maxdepth: 2
+
+   developer/index.rst
+   developer/lofarpipe.rst
+   developer/todo.rst
+
+.. _section-pipeline-specific:
+
+Pipeline Specific Documenation
+==============================
+
+.. toctree::
+   :maxdepth: 2
+
+   pipelines/sip/index.rst
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/CEP/Pipeline/docs/sphinx/source/logo.jpg b/CEP/Pipeline/docs/sphinx/source/logo.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..27a5db5145597965afbbd1ed759d9a9cdcb48d28
GIT binary patch
literal 12323
zcmY+p1yCGq&@H@Vk;Nf+a2B`V?g<_wxVtY)fW_Ti7x&=q?j$(D-4h%Z4H^g`f8Ou@
z_0_#+=9y=zPWPFvuBoo>nZK)le*o~5<P_xqNJvNkg?|U&?*`x<0Ql-Z{JR2?f&Uf?
z@E=f6kdgm8(NIwT0R;^W1VRUa&@eHvu>M>Bfb+ljPXy_Id{8jZ&@gbZFtBi6|Hr<@
z{~sIt|Fhxz|J(oi{XaK<2LSjWKq_Dih=dP#g^vWpNBTPipacK_X#Yj~zd-sgLR2&W
z2>l-|fd_c?@5!%_k&%J_Lj7m-j|L#)qoC5h=0+os{zyn??!ptCTnHip6U%5=xRQ|Z
zhBSWJVJIq|yZYx(fdoK82L4~_0Kk9pNT_I_f2=4z0O_AL5cR*5p!{cr^dF6n@|u<#
zRr({rKka|gjbE<N2zg}Yh<1L0H7tlp-noXr|E>eD{>i?=2jT-H0gn;Ci#7BQtXSqJ
zhqY4$6_|o}&ttkNq;)71M8B(?2jnYx-l4z!_UkVI{UWi#iBjWS#7Yt?p`I5-%Op)K
zE_hsG$vQ&Yg1?y$?e(m*D3bZfl}0721J!A$8BQ0{?Ju@C+uRkc=+BraHk7Wpl~k?V
z>F5Jj_)lL*D*<Ku1+rsW`St_zTB_>@Z?ti^HR;T|5_zNNSRl~zjW!3EC4*9txl(VR
zA+i-l4y8o&TF$oWPD6V~^yjp9@^q>0PQ!X;F;3;Uj*Xp(B#`i^E7pm00ep+*KjS-f
z#bz^}MWHa;UvXmI-!1?zSHEGmb>u^)k?vY0<0{_Z?e85vu-la{nk!~+&8LWq%ekMQ
zmzeX@SqCC0@O(qqa#JiVT6c*ep-@PkhI?0ceDQ+jA^SjmR>DNz-sbY&*Jln6zrt(2
zD7lh6UN4+J2K_m8;Um%EYzYaaQ|FlCQd9sX4bA`#w{gBY>1Hi4P}*9=U&)f8qBDLs
ziJDWEdG*I_!<SK?>dL`>Uh-m(X<~^guzp=PtM+|zrQJi|0s{GtPvgD!@wmxe<FNCy
zE_MM{#l%f&PI{6z)SpmyL7BYnyNn2P8!|WIa`UhXBT5=gn~CU5{d}!vF;N1N#u)}X
zsM^O1*sa~G!5aLIC(^mQH-L@({NezywkzW}G`{+c!OTshPHBx|aPL-S-VukTKxQg@
z0Rp$rcQL)1z_r`<p>C+L+uD?H@HFy9jrRCSA(;{S0+;%<-qy!By(uEIo&88q|3yz}
zZ%ruvEMi1hSEY>r_Y{;QD12LE&fW7-j=JyT!&xfQ7}ECF(>#%G&+-xryLN5mmHGgl
z$2nE#4@RNUGI^{(-ru@RBd6H1ihltRH)>6UwzW}KfqBEi;|H;EWI4PuTBeFJHC6dK
zQEeRy^7O+kVdk|hh^cC9VQ`((5PCv+Kyp4BV@`BdE}B+F@mnvu&EsB|A8&(y6Abn0
z{+9ey_oen?jGZ%VLu7mZygF=g&njVief$1p-HX%OuNjo_nOg?AIPUWM9ffmybDW;Q
zAb*|Ya|r0B^P0^y1;-7VHg89h+}MNd8tR+l3TRtd(Y!p~_zOVmL2JKS-c$TeF0#F%
zZp?Y}sB<uDaH+7;GWDfcPto7K<TTxrX9_>7C`6GasX{p@TuPp1QO<?FvA6yl=g(n>
zaY<lj2|zw-)9b9zlCbIHOmRJV{ObAoJ!Py~#E{94W8DzCT%~F{ije`H_73wTNXf$V
znjXc1vpbcnJSPM5e6Cx<oi&I-F6%^r$6*_Unc`wB7QyKB(h8+y)_BLM)mLDn&zrbh
z3aZiGqM+O;mB13qysNB*hS&{J+#axpv6B`|<hTV@UM--K7mtnMNE%pjX;QMuSo620
zG}=V)Y#(S9JZZ5o+bed)mpmeW+n!ob4uAVKF;(+5)VwK}EjHDC6CLZVi{v*f2P+SW
zU^&Gq#OE;!xB9tS<N}&P2^?E%^x{(wpT!6K^=i7WL`gz}Tudk(^f)fVkRHLIq+R#d
zJlv^#uaGwHn0|2F)5z1i0yE(K=g}VSeHp%-2Mh6c;~@$)L`LtwiQ-V?QdD`4*24OS
z14xFdb4TaNg~JF+FSacQZmyboi`-OWSvDOTR9t=S@5oA8jimH$6H8&)9-TEv8?`d6
z=Wb!G@(UAm7mKud9;Ku32heBl1Uk^@Rtb7S;8MXv+cZ%%xA;zR&)H|H3}<S?CpHSn
z2Oq!lJ6%&+PUwDk(`#9~4a%#5M1wOT!mnP&<ZaG0Pt^i?v92#4dlJGy{ozs{?}3C7
z3lUs=uEh7MgejlVB({EjEJ@g5Tn}mX9W&TQ*pdH~2h%<$uthA&S*A^ihRCc~+e600
zHP9N%aK>A0SKnCIUi7VG#}#2qe0xfCo~s$Bm~T&Wx+G7ksb*slH|0bV`=~}`3u*tB
z^sBZYoNGjDF+;h;k@6_8rN5IrG1SCBjSbt)<S5qaJhqoWb(Q>y15IhW-QYu4d1N1}
zUG0^JPkZ`@ev~Sq<J0((mjc7^{0XNd#kdfgwc+6b$nTt$(P1~%Ox6O8r%Y08*(PmG
z>gQ04d)&>Hfa-RgJlTt8>Nh?9Im2(!`Oei07S8(DRXb{TF1lgfq5`e#B3S93SBUxd
z`0|++Fb5mNOVb@HnbHTsL+>0+PF~7H&T84ABTIR6Rh#%kK{t(n^t;C1EyS$PmfWvT
zJ2)$w1%n0A+sww}?o|CvbuDb-bflETOGQgg24sHw%=%H<kIsW=eUMsN+K?&?8>Hlc
z#|OFer>aqj836Q|!`3_yH*l~oeCIOa&G-0~-}!rPzj|R}Z|HrEYj&IG&n<eC>9hBC
zKd#HYrGujtj_~jpXZPdz-L$3Ed;_4YU3!w+G_<Xw;K=OmIVqhN{{;XyaIMed{{lLn
zr6S!gjy=*(de+<SeJMHU;G&r1^nFm7t1Y%2zfLVK&FccceTixIigY`=piwe7*Y9us
zMtd1-(CIzZN|!O^P}%UOiqnE0H@1V%0us-yJ&XMtga(8GG~*=-mI7eMO;a&uXE5Rh
z8K9)5dk~u49vOQ`I%@VV@NGfk)bDPojl0&^?qXR>@<gXMQs4aA{{qtbzV_<<D?r<L
zrd}@PM)H5E8a5vSD4C*PU+7D`QK`osYrlCjuWOf<+Wbo9Zl8AAq4n9;WMUM!MvRd#
z%Gg^;(q>+{3Tv$^cqwFe0(Ym1a&$h{;|<b9o=T><XBn5Kr3&<}sK8Q!wJ%UCyj2!F
zE###Cu+(Y+Ss3VGr_w0wVrAKVg*b`j$`zY1ig)3ZYtaM13x{&@A@JY{rTS0jtw;?w
zBvJ5?<6Doxi;0x32jWJFsDywwY-}sseCphAD-7QG^Hp2QpgNUd3Y91|DNajAS90Q(
zL+7?OmsQi!G!I(sw`QOTV9SDr82TFD)$W!}Usn>IHGYXg*3nb^_@8jgt1jX<omMVO
zHM6iqgg<-r)pEkmYhA--REz4r01WPfLaM=)8)3;jk6>I+<LUBWw;OW`-i=z&xi%|%
z`o)$z1lEU?AubW;PIloVu@ocIcp?6uy-R$%=YpJef4Yncz*XU5!s9T{Ly4+lqfRMf
z8N!#9W<jh4PQ@5Vz~}xQcEvv+)V6%GLbsX8w_Cn%oiYUP8(2p|L^p528yDZC+M{l5
z@9K_6>}o$dMO{*pjj+7tmN3Xs$|S4c;pUMkcB&SL_zT!wc_A9u|1AQ*J>BlaB~s6<
z`mL(AvS=5Wrff8Q@6^v#NZwd$cJ{gQPhMw=Bf?V+hC6ifkfdnuj)c@yKPKb|3=yT;
z<IlG57IfeQHLVM2M#+(>U*8`k$PtTok%u-m&aPeybTEDF%I^B9tSjQ49$$Wli7j$)
z>2}oR*$JPw1(nu5B@jf71{4pVL;-O{d`}vFE+%q1SbLK6$+ado@Yl|jZap;r1<0=o
z7Z)q%Imuh4M`LBEFjjlQI1X<67wkEG;-b);m0x@R1(bez21ks1;`XqGRxI`X2~et-
zqAzbvzVMySa32S_E+6=i41A)ld8S|AXtr>I=9u@kNnHJ6;pbrc*xAu&UGO2fBzzv?
z*oNdJCmKBCcL}@Ns4AeSNgBB~83F|AJP~dt9X6s4aWFyt0!X{43QCWkBz{~~zhB`y
z&#5woP*E{#og)OIGCZjX+lDh351Dgc^?He6-B*74kwp*7oyceyn_Y863;4S77Z43D
z8Y^ZpTJC#cC3g=b`hDZ;<zMX>uLMSU>FuuboUX00f8aIb8@Zk}5Kn}MQYmPYexm{)
zQ{8#Wez|Q~`jAz&lMKq9M`}%QTsb|iv9EkXkL*hPa;o$f;KdZBQfFMco$T;Jym?#>
zWgNc2hg!A?K<pRWWpMfOG0lEXbDwGj-tQ*8XTRBqJUaJI@%vtOe|0*g@RCfRhc5p6
zyI}48n(ldE0_7ZKL|)6C3I>PTprL;nNn}^qVn<J^{&o~$6@n&eCdC;E$chfI6pu<1
z1WISkw?5kNlI73<c|ZLYT<UWmY>}SfMAJB1x!LcTc6t?O)6V0h9hJhYi0(5(?K5V*
zpnpSn$iU8HFNfSWav~RpG**M*R;UJD^7Wng34NSYA=PF(L{F(DQyf(Vh^53)tNAQQ
z;;rACb8@r>lT31gP8u6~YXWEmjm}ywT6<+jGamFQbTwtMW@%sI2GH?>0@267t1&lL
zn&LGux#>-B2l4d$HKXP9Ytd?(;kDoOA0hFY(sEQEUh|F>2Vv~ED$w|?mNh&ZIQWc=
zsxc~>eqO|kA6uzStD}xoeYwGI(eX0xX>X6gyg%)gePv5kf4V_gz2pUEdXDq&ZdP>q
z`SWEQ);c918vneYA_MV8pZA;2`BqVaUoo~<vJdBtDci+ZrSHWweyOPypdvo>HZ`P|
z`{GN9j{4m%-P{GaZDITq*Gakn^Xb`yGi0iH;8zKZiv17{`bl2p+0v5aiLucxa96C)
zaByZX`T;mnLcw}1e1yio*zqp(M|!4~7U0LZ0spMm>C87-5z_VL5ZUpSas8G*blaQV
zhVlo|AWl@5>o|<>&YKwdNgYEQHm!e%Qn_tb^_6PGfuz+i%CuO^Ce#mBZ_Lk?XyvyB
z(?OI|I$EnJ7?#%RTLgyVb`|t;dDh4@v^nJ^N9~_f?<R%1iqOG!kYHzzfz9>x^W!Rq
zMAuQ@s?M{3rNX>42l%GIwIQM@LRMibYv&kX!!v}nM-itT@%F>9gR&6-?W0p}o5rc3
zJx)5>C;I8_3uQ%Oy(O~L2$NPU9*H{)*2woiexQ8m`%Il}A*_r&+^f}_f4x|gJMJ3I
z#}1oI$WMOJ5dH%*<Uk|S9^|yupJKdU-+u4!R3?ZQ0O@!oVWKQOZ`-!`Svf}+7c*|(
z6KxUy7&5Cz=`ZexolfBF|K(V_2ODS6fe@a&G%M<}Jo&{epYiaE!aKV6S&}h1zIC-`
z9F`gc#=PzGjrm_>vt4&{zPS=oXj*N_!p&xL*vsTbARHtgMJ;wqYRzYuE;Y&CG?vFP
z`9tugQSGmmgnIb-J9@;PbX+Smmx78~Jq+VTcF1(3z?@+7Mbh#~B&trj)(hrR0`sPj
zw>F1B-*f~@k7MDRpi64pn9)-C7+XcWkuQm3umQ2)DR0_=#(g2Z+vC=Z5cH1rkR>sp
zpR9iY*3ft*Ubk8M2ShqtCg{+7X(f1?OkRO$b+=@v1C{Kt;<Or_GK-NLsvHrqS!-Ff
z&h@O=FreJh7y6s|kFQ>e`fQy$l;vhA$rPE(q%MPoYV>7k)1}9@ZRcT1N+q~aR0wl5
zc`^_K<$d@-Z_h{p7*S>D`s4crsS`hD<gXw=+>b^N09_@xVc2z>sDg-eNJC2jW~6sv
zBo|!%b)&!?HS6GdYt+O<tfrJ7yTHE+u|nc_g3<z~%6*>J!g2=)9g$rcN`MF4m+r2o
zN?bY*SGG5dkXHN^)V?X^%X~7w@LNh$o9|Dt523T_Fm$&SXUhOHG*lPN>Pihd&zz8Y
zXc#jp=b@J81w9$7#-tB8Nm2D`ZReZkFN<cv4#o;sDu^7ZogEM{MIxExY;SEbV`Hh+
zYd^<#N|O@`YRHrR__54HTXbZlB*oea>vkLJHe}xCnLGneN)chHqUt5Fy|39*BK|;U
z&q=@0-TFL^(RepGlS-+!ecY8c<Z3welfDLtR6!n@g<|hib$3Ayp_|2LZ{H7YpFw0M
zKhRTl)*xiUSBF4}lFXJViIsJPuA3W9O7TOcD#^H(_cfGo&00Cb>h2RKCOI8RAsO6>
z$o9YwS~26L`L+sIPVVi}-{!<wV7ZFaJ4{632a_r;gjO4cNc%sSXB<;hc;NP=Cd*sJ
zurZ$+9<<KV0XyN6#?XcHVV>o(wC=%kDMRDUHBrmykMw5wnDV*7kJRg3^*c5+tJS8!
z_~zI|f!m*rzu1ff91ExJsB)f(b{J;ZPK{44_sg10%vqTn&Ew_VVsz~sQ@sE1K54*=
zc^*?dT#rsv`IyyQ7=&A6*^5_}%ZA?s<coHGD3QezG}nAH+`ye>SOtX%onecBwU@L7
zW|<FdU^9GsmRfKHMd?s|Zs0i0>xzwwbwW59EkG!#nE6@X9eZPA(O<Ei=Y?@j8ryoW
zp*aeiNsmUYYB`j80D;L3PAS-t+kJ<6wDq)Qr7NeUF{=AmIz_0<QzppkN_<am9n>^$
zt$j}Ij2-U)-wUk*_o3S>rJd_gP6|_$T%3pN>!(t;b9U&d@-d@)ce^zY56VHRxsTMD
zEBch1BROUOHqi%7IqSH#gSJ2Kq|1$`o&gG0*wGOq&wl}}DdO}e?c~8sv(R<s=X8Dj
z#i&uo9fiwP=fE4a;B139X#HUE*+yz;rl-87<&1f8L?3x8eysSaFl5Zv{Z6N{{NG?d
z1EU)T(U8ExQ4-L6&eE0|H00M+ua=KF-4>4v%*gXz!Q1V&`IAAca@vtJp<^@)KYQU^
z!e!@lKN8Fh2O;!z^~BuammkZL_!!Hp+JbF223=>^YfIN9z(9hr@^q&BB>B&Gb(EuG
zn)L8&JvHZmBH>@&?ud$N2<4M}{Z*BNhywU04$Nq&XrhD=v8T1jxyQA+DV9v}y5!1b
zy<E$JUz3*|PJ$EpMeHe>f?YMb_8odETH|kZ51DxbL$F`l7HwTRBRHWhvk4XbULt74
z!o1}9oB21-h{v@qOlnCYCyGDr%imG3d*DCpXT3=c>60ESib*XbOwh^$rz?JK=gVo1
zCL!U81ef*n3aTuj-GfW-o>*gUjCNugg`2vvzam7(a2TSyB7CWx10rhd97;;_Qsj#>
zonG%HH4cRP%z5(X*9s1X+j0I(igP8Wd{XfYpxj&cQ`{wKX;PI!;2KgFwUj3(irD!C
zV%SZLt`K#Wo3gw|Fva)>^BHN(mU&C$_1*CZ&=xAK(N$t?h3j$09LCqaNo`%)4W8}6
z^+{bWXFUZFqX*%Nnxn!tmyLcbNwW{7-fzgmP4uW7c&mawHkVsEHG5Dj>+6Y>3JS{V
zZ#@wh>W{N+jJw9KS!xV301upz0G5jd?G6Xuqx3W>G55zzsDzCU{9Bv>i99N|#Jy>$
zu932-AquO0iRD@&fy5VkYs8}Oc6RVKCO^$B^Xd1Dd=0V!R+CD(<ifJAJ*b1D8YQP&
zat9J@wif)`zGu5xV&exxQ&sYxm9t)>Ho;(E){3wY^iRV<-lVtnO6Gq7F(V%uC2~6t
z2dqW!76NJOi|VYEP0o=z#ZnKeYfif89UkH%TtB#s^nMs!0k+rII{mtQZJw$8<^;_P
z@n)9O=g|wU6-_@TI;xgEb7wpZt1_0%om_WuwQLtMNfqw|c2M=K<o}#ztQfL&Ngf(@
zH5h~<Z8%xhRrMJZJs95K{ywZ*$2n-|R|{!Ou+qb0zCp4E36VJ#Y<Ht!xIh^+W!c$b
zuGnuY8W88uSz{wvot5Kn$5o<{ZVrO&1p0E=y<MVm2;Au{CTi}#fQCBLVG46YeYPFS
zkyp|{m?czg)=yhSMKd;5u7DrOKneo`p$^h8&<U=Dh?@cl6c@%%knewMImC?Z{IQv^
z7AH6$LC;&NS%&eEU@p+*;@KZ6P;JDg5WGL$vPcq7TyM%YP{g431fW^oZ~+JsP}S<#
zdZdAGc7LEuA7$yv4wZ(;^N^OR{}h~<7^F#T)$alwm|F!A=Y3I=+i99<)hAX_I)L*<
zzYixC3Ie^NN{;id4%vN+Q=JqM5jO-7AYPDF(>k3v2Q05ci=?m?pco=V-q3yhAFl91
zteSFU{Bym@<4vls?;<`Y$g<RN2$I*bedtM4Z-_uJo??9#7DC{lP2vE^rhGIb8i{p2
z^2l{_x90g@PLc;=?CcAnVJEJsf#VvFmfJj#lZNh@NdYcF;#FS-%Gc0UF(2(m1MY;=
zkt7~@!Zs?Vf8ZJ@A2n#ZP8exN%FjPfClOawRrADaLe0H(x5y!6ONvU%5blU(4aeHb
z3gwnp$Ll>C5Gm6?G6(og>Te%c$jHR3Y}+9E1qsJyBs{ok8L}46D}s0Shed9&Qz@Ng
z*cB$2#yBk6PKQp$y47T$65>%mpZh!hqMtLn9})*Cp15-pH~dOR%W^yOb?$WZ+zB~Q
z_CtEkcNyxE?K|rh<WeRZdD`Klz$ddEFMy9O+m|Y<bg>y?CPS~hC%^Q<xS<i{E$uOt
zuL=xpoK0_O_r}0CM9$XS9l~`Jazo@q+NACm8{fY$@hwbheK<Shp6|7Sqxr!;?x$V0
z*%Cle?i${0?)B4~>;~~E$iTnhPipWGY!<Q=%twsfleZ-AL$8%^z!ouz^v$pJa^c?e
z*>WYD7Y!;amVBjQUc26;bg!npFpBA4WEvEJT5Ly=+h7wfs95tVsI)Jgg=4jgey#*Y
zD+drVGeqkzFBfm!v_-a|o27Q1l|ICYc<%IDTBeK!$;MPbvU41s!ZK=l8ODVuJgc(}
z0w%b&V^FN6(jG8`?j;@k-|PB29V!bbSJ;@-%f=eiPaU${foE}HqesS~)Yb63Nw_eM
z%_ZrfNb;<^M0Ry~H=FdyxB6d*!oPqvgOre@cdtb$P?4g^&Vy8W4W4sh26|H1R01o9
zMKMRFU9-4V+3OM%4hH9&xL(M_nhmJCrs>>^F7VNq!=+z)w2<2BNZS6U#=<Xow3GY&
zQNM8(Y#{Ix0iW>FhtzG;JP4z?pHFdvx~#&^v-w>TI>o5KbJU`DI5lCB6;oHGC!R9;
zaT*4khetFNfm=}<%4@{Onx%IyQ7qi)&yi8}s_ea4PxaeQs>st*V}h#<;^Z`&%-F_(
zoDB<+{QQ8Ji3E8eVz))MKc$V9aaOv1A;X5!s@`GudPB;0qx|B*%DzCus<^Ciu{SQi
zSw$i@Q~T1WbGRf%W5TW-i>jRpyv*BsJKG1^7f9+3rTQ$1wQk<<8^uxJkMCt4iM@*#
zuSsoKYYZsvq{mD`YwKf0MC8Q-x-}PeYpHNAC<9#~TzPVvc{CzHowPA2tEtDDqYE+D
zf>bZGI^TeqqX%NGE6dAhYoBsORgDzK0KaNCyu46~FDR8blnxm}-$!H0Y_a3h`SmMM
z+~kYGjEHe0tGS}KcaPdtP1m3V=E(}8UoSAH@)^$Kn@gMm&n_ZqRfrB>Kh9U7i!{zH
z%8@#YOZuGrLgm)wAUasQbke=~era80!53$FeLiIsz<E9;X%AuiF24j`q8X4Rz+)6d
zkO}E)QSm0oLVE-a89wMHhesBE-+8WOi((<2z7k~lgvoD;yGtVPXV5d3<4sIpqrpUw
zN+)0@tx?#Z03f(v@sq~(ZF<okaCqyR3_0Bk^mY4St`^4IQ<)PnG7pu3<WcGEH*$m{
zBs4h{2LkF-sUH*kSrG}k5lA%6y?^?@R@8Cucxq|dKUATbC54W$mInXEPjZ0RLfbK3
zGdK1!z1lYVgWvyDlBK^$H}<W3t6-EmRs7I8D)5@920d%+`!`cKdpJ#7BBOU#|2P04
z{M2j~kX35qvg-S13pC|9AHk-DHzKc$2Ll6NnD!Ds1N_;<69vhi`~ECD<s7Cr#JI@d
zt6AQcq;gQngid;X9SuF?wWJhJ2uRyxa8!w(b`Ss3DK4gcLvfmr`<(7h@aT$8l2p4W
z(ZN5+vyDhjm^i&PC~c44C^lTz$pNb+R?RQ#WFx;<-_@7W5jS(E2&d|(wS7Wi=U1)t
zUivz6q>Syc2IaE&<matd=`*}tjxrbFVU;Ch?~QY`c+^2H{kXrpBu}Ag#<5XOQ^97;
z2@#jaT>E)2F=B_yL0HmswCPJ-QK=f;)57{XwW)dKb7O};b;Zd8TQqrzd!1;AyoKby
z_d0_*JH9?V@!LA4Xnm$=HLKkber`7rxoyp<0sEFmyT%A&>qB_7jC?W6kHsnqo37}u
z3f9ic_nnP5<kVV2?wT^^6akiN4hA|cPlvIeDijJClDLv}#l`K0Re99>Y<eZX3T-rs
ziHJnT3T=`@ouTE~z3}qZ@cPx{pS{V}&Zy!L+vMGe!i`F7<WQj*ikpwC8zpPJ8{OG=
zVls=j-LYc}(1=WvVb`;U8US-zMlo8=O@eQV31sM}@K(ImR}*AG4>Lt<-~;}rtvlBH
z2-x~+{xQWfhx;)u$j@MHouU6ak7s0C@2Jz#YGLT~Exh=%>!jCF+_i$F!*wFHb7y+t
z!uto%g`>bMY(;zS%25VrhMiIR@HST9jni^N%Q8)z;bwWZrAeAu{HQaJ5LN3CvzLb=
ztI_dz$DAW^bluy&>JjQ8|NGhO7x8xvMp^HeIb9+W<i=S}lf-drJ)J-?wKd;_+E<3{
zbKi+l@B+6@Y*yZucW3Yl`*}>ccX_SPeKuOb&zsq-SS?Do#s6BG`^2FH3nF1{6A&Jm
zQav~CXzfi94vtgsF*fq6Des_pAl%qo;YzOZP7+c8-geCi21foiRajeh>McVw7@2Of
zwcK;xdAAMY09+@K5Eoo(l#jKh*dwgg)qZMm5$(3F?oZyZ-X5Ym6=aTxJ@*V^%P;ha
z_57`!5EQ_8X?U2Ax_}JgD<0J%EpZslzpe$YUfL?^|3&Prw;#hV=jUp_LVw=55wrtc
zqALrH_cocL9FHfTUc7N-1#DCevo_pbwg`1Fe&8qeY6?nMg<Hd}W&#Diu9z(swMXni
zJYUOGE<*TZBPDr+Xc7yF)Z%w!x`6ofz|t>`w_x=;u8I4>F3WCDyv_BV^~`w78@*1w
z0d157FJMEpA7kwGY=p`EocxeO*i47IT#Zt_fzHHFRm#RTcaDCB_}(F(I0zwJ<xS6A
znA7Ju(O2qERz5E4t?asyD%#47xXxpr%p7qukM4Cw6=S%QtyzOBWjPMMso0g-9Xc|l
zqB|9@Da9oPKC_vd(Srsq-L_whwJB!KsT2-drZ`9Z+2ooVyVN+nb~WktNNpaYBu%{Q
zb*}u$E4eZ)ots0Q<fB(iiJbQA*0+Q4b1Ig0yj|2O+?6B>Ig=p_XWV3E+7eojt%6x?
zPka|-J!67=_oRe9@05o>tr|4J7i2*l)C6-jZIaU$pGx|It$$@CXYH#`_^jS>&Gd^`
zkGZGu*t*To3lG6P77?0m78cj?T`SLYtqZM`s)`ScC4A%Na(0KwirDm!>-SZ~aY<+m
z7hZLN!~B}*Tk+>_&7fs@K6b(!-$AsIywepgbY8=Ckmf+n{JYYY_E)5?L*dav+GOHE
z6~6?r2LaRM7VZ-EdUbqzoA%nZDc(hpoCr+g1+Q6qo~}`tfXEshQ#xicTr{S2XfGRt
z`5C$eMboHeKC+7J(?W~NCQkd*Y1_1Z^XwioF*i&@$vHt9q-2#`FmtKnC2mw-j+1Ha
zUKcn~I=p9DiWFn(c($cx^UbnLkShP^eOg#>9X?!`2zT;xuI)yG1BVF%rI{OEk1eGj
zB}RtqyH(HJq8^RSb*h7v!$prjVLs4QI7!Sjv5i9Nz528=g}<u94AS6HSvgkL!39vm
zQ$mE9v{vhrr@Q7se@`|y7Q9(bl%@J3fpiB(?GlfWUg~ywrVQ_PYM6sMbT*QZxR7vO
zJyr&fYV=%@*?{BVpf^rKO*D9(EW`>mAlHi1_kn1=+J)KPf)l=BgyYn%rcr;f+|K_p
zDr?lceE(AS&Sb+#p3b@Ht9m!CWr5P{Pb>&_oioqSVe(XoQ9=ZQJ&o`BrQuxoB#?o?
z9|jc*W-q+vz%$p%FSmSQk|=ozX`SdSO8yHt5oALhboEn2_(w8ecpL25U?N=}YB;zk
z*%N<Uv1xV=-!fSW`4J9XHT<TquB+xLkp5s0i$tL@^!($2)NxNz^{(maP+T#xr}N4-
zX%!e)xcV!clf{7C3?wcuxdU0>I9a(G^|FxdNau&k#wE{BB7qwy{KGxh6%X8??qeOj
z32j%Mjnc5tD6Q__TVOSv^u0Ph%>*k|wKj~H&{!8)PU6XhF<w_Y5|?X|Upcsh_v$EH
zpEYuL1HEjC+Df|N(}<rX?Ou*ScE#7FW+JBb#TCIb$UYDCTrM+syF#^g=}g4Uf+sWt
zDW7R#sok5*e}xOhjFyN0Su(ZtN-2YiFRu5b;~@mb4&d1GRbH7CL1eC|Yz89@i0miF
zgtUP>w2ue9smzT+8!UZ&cG)UlFSqVU6Wkiw^36V{U+=^lKRy9?KO*$(?BYi((6U&Z
zweII1RAZQi@|yo7tX%MAexmEWwz7*KZhlPaR~*$4thle#V=%7{QZO*|ca)8~QU9rP
z@6^GX->h1{SfkxLY@9jDue>2f9+JB?BZ7c`P~<5I%e@&Bd%4~G7)nY}6U-ZWL;1B8
zLdEDr36pBsSZRN)Sgwz1XlEQtZ$52ULWQd4r)5C#Dj$7|*8rH;NuJ}blVWk<=dCRq
zRo$Mil*OpZDjJ^@2Ge)I4`;_LBt?pG0lCEBMfuy8(9MSZ`#7SZL^x8l-q*>xg{g>A
z1w3G7F%R(;9u=<MR5F;9{;Emdgj5<#gDc_S_Eq2Uol3+wCAmCQ<95B7RGXxBH7?i6
z8EKS>ZyGbStF5@H&zMsYz01K8eCF0xs>kWUhl=M|NAUqPlNVA#FtY<B>e9Xf;BS+m
zdp@_dmn~~<MRFya(EKVnzZXRgjOl#*@`(KI0F2OcnVFqu>BlYO5HAUNu5F~EKR?ri
z4z5dT_9lN`o8>}u+0&T~xyhZ<u!>R@6_Y(9w9$4RQ}lE7>jol?+<U|85%4Fyk%wEs
zCx!y+lR7|*esVKhQHS(y?^Vs=vtTLA7v4clPAMK@KPS6O*q2sx@}%tD7WIDLEz1bL
z{~C7|XFH^=h9ta#>n2-Z>%Ps_!N2Vu$+{jAsL-Cs@7B0*Yy3`j@o2Gi=HNjfy^_B+
zQ*#Bz)~~3(CoV*>zt`UDd^vQ%Gh*-Jv9JIK(-^fYn>^i#MZ{W`htLvTAhkZLo?<N>
zrbE_%3Rdx?VdL}w|7L?*C)kK5+{lwj!(XRbi-Yz<qC`v464i00^;3*AGObWccSm!Q
zveN-QRZ*neO4~I0YMGnO&YO!T#FLr;Lgw<qE`hN@ztG11JMATr>uLb;sqgys#b1C*
zbXllcczxYdU?;L3!t!bFrs{(sRYd$0WGXh{<F4u#{ka`E9UE<Nxp_mwwW6FXAbb>|
z#^0c^Hi_(5?Ld3>Jx!45RY{6jreey+T&FOpp8a`1r;HF)%MXyeHe&&H6aJIeur;WB
zIb{meGJ$UpMwkCTbxwg@@Av+Sd#n^dcV{RkwZ@@vT{Vd^2<sQ+`?rn-Mp_-T@%3-9
zkw#&Yr5S`5yvZ@m@#C{ji6<K1B=>jgj&P@Zt%uw4!Bk;`U_Bh%z*4alW=;pyy^jse
zGs8ym#Hc>TL7x_SrK(1bD?hDIEGJJq{dQ_x!|%&?NUl4Dn<97=?atc1yjPHu*QoOz
zUCjQmsS-6BWr34b+}$89b#z)al&0F7?e=Pd2Ck|`|FlGF5u-Ava9@Jg(2W|#`K3l5
z^z;37*XWz{<OD)oO=%j;R5q%#9)9Ca2xDI9v^IlDPGnpXx#ZSfSK{W2Hije5n@F@C
zXzNRW!$`qvIO72Z#^IrIWwv*Mae`vC92b8Z24Pc1*8z>H{acn6qc`>-sZv0gwT6nw
z@*28Xq5Wiqk=J2^%xiXg6c9_(B3-+W(*Mi_h4$voM^r~Ity>m`K!fxF3XIIJxfA$d
zdEh=_<UiqJ&qMlUV^+c4h+;uNXIDe=SqYrwe02=tj@BB(CrF|PHVmOqSaV=8GUtTs
z3W~OK9a+xQMXb|nWe`)8jk-KDtB|<M+S{eKpMr@jqk~T$^^THMAx1mXyij^Nmh4?l
z-$7&C?An;hPAT`x6}OigH+v2JiME@ucC1p(LXfjs#WdQbAu7wrt;Sq$qxh6JG4Y$f
z0BXX%WiE7_Uk=q>vLy!j)f9p})SU8<`R~$&C0a)z*3OWEN(s?2d$1$?A*Sb7#?E$f
z%T9|w^~X?6f(>~Xcn$uQG1({1353jZFJ>to5BY`4&+*YYIXs;nYx8{4i%Mzb_2&cu
z<7(lt5o~C+iL{;!f*mhc)*8&5Zb2J#@6E&ek|ZpdM$-W_ue_2WV~U{XHh3ixL1nwM
z>I;wrJq6r?#+SCg`xXkI5gyEj?cbm4Z-KaDzBrJW&82}Rdgj*`8MG`fm61RY=Jy`A
zglo)-c>eYvV+B`yHJ|@8u_c9jFSS<ktx*?ED_UsYd~i)ZXmG~C@m={a^eaM17jHEh
z=t+rIxMVuTl4w+t(2eVkEpgxN)nyiJks*A_RAae;tc@c7X0bQj@|~LT3(7kW`hPu7
zVwZc_MF&IhNZA$%`VEWr&S(QL?y%N5fZHzmYso{z=ivT`+!v6writh}b-{yj+tIuU
zmBEJ&R^9UC<kKm&jJS%DsP^7%WN2G~FMIUfo9{oW0Jz`gJ9G4%t5)1%L}|iyr^%!1
zUW*z%?mu^2DyB_ARG}%0XTJGBPmQPTtWLh~EO&4D^@8RKG6iQ74eCoP%HfY3yX89X
zL=Cq^7+Kq=dBs2jagiO`iGKm!tZJtkp=)EmAy-Q}4AAqlef9-*X*}ym8;$A*ivif}
z_>=lWkKNl-D`Y{D{DRjlBHF1gAuc}&4(<C75RbZ-aeu1uztApSFb&vArd(PXF`v-G
z<#-f^Hy>>1vomJ_4SMfsvNcHDmf6E-bkrkJn_l6E5GZ5Ih?KGD$PkS3(GtZZ00R~?
z2<gWtZ)REr!gOi~>9XyIK`p7A;_+r@;hswxzTLufty<m<>TC(lqww0~(1%N7YSQv_
zFU7c5PzdFj8o2zbe*5}3R<?fcR;*8{99D?Xk?NUEkHX3VKQ8)@{re-WimR<wy$W4)
z5dT^Uhs!x}it(!eh>!tVR%R$7V9+~NT6d7@ZLRce_wgm3)d(!~wxU%XUC(eyv9zEm
zz}o32aM&Lfz|pWsKWJz;Y7@Z)rux8&j|2h;0asGANF{OKz00B5%z)IDYj!o(4~yEY
zX>jcNW-Zf5aR%5}4*aXi$7uY152&$KTBTE872c~`LyLC^AD9|#o(i6IERY>MB|T)`
zdsZT$%Yv6v!<^E@X2Z8${g<uZ8Ur#E7!!DLI_g2KBr<H=!{+F`1qQ=OLAe^}GAxHV
z44{(3Y#9u^jw{^t1`nBDLU$W*15*rQ4o);2rSy&)JAZV!tno)I;h8IOPFUJ>c&P~x
zEMu7#?O~^YTwtCYT+pheHrR55Ce+T-DZbD@Yb?r+Cz)QZA^&|&^RmXt?IcW434hg{
z^^T1y<#NI3sVdvSc0Wn)5{i=_lVC?l)(qzha>_E+mjUWAOV8V9*r~so_4o@27rCfz
zZspi<$XciwayI$^dd2Ikrtt6>f^X3K3GWM$A<L%kbSJR3e@u6FhpcF_f<VqZt?1KG
zN#cB)P6xibBABNmOx^!vU_Z>vMHP*fR6ruN1dJ|zN>fN)qXVNwN?C8t0+mMB*vMQI
zU+SZPlnL}XWmtqtUWXZFOEXL7+vCb$VaSkp?uOAgjr_|@f1rQgVONc{DY7n-!my&E
zvnnkUX=Ou-RoPkf@42X7(#Jj<`QDV^xO-})*<X_ea$cLyk*jO+L%FUE-hYT()QGCy
zyJ<5oB1qK37dJu(|0EnQpSxu=gdC*LjlAJf#Swq3@;)|q`!d{BUE#2g?^Hc~4koco
zXQE%qB-XlcVLU*wW^f0|QC7}|@uz}umXgAkQaKATE9I?dn6j1dbTIx+*r}Y9&8Ld4
TcyDCz)c<Soba2K@|E~RisF!#8

literal 0
HcmV?d00001

diff --git a/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst b/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst
new file mode 100644
index 00000000000..14336fc7bb7
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/overview/dependencies/index.rst
@@ -0,0 +1,72 @@
+.. _framework-dependencies:
+
+*******************
+External components
+*******************
+
+The LOFAR pipeline system is built using the `Python
+<http://www.python.org/>`_ programming language. Certain features build upon
+the following libraries and tools. The short descriptions given here should
+serve as background material for those who simply wish to use the framework:
+directly interacting with these components should rarely be necessary.
+Developers, of course, will wish to learn in detail about all of these
+libraries.
+
+.. _ipython-blurb:
+
+IPython
+=======
+`IPython <http://ipython.scipy.org>`_, billed as "an enhanced interactive
+Python", also provides a comprehensive and easy-to-use suite of tools for
+parallel processing across a cluster of compute nodes using Python. This
+capability is may be used for writing recipes in the pipeline system.
+
+The parallel computing capabilities are only available in recent (post-0.9)
+releases of IPython. The reader may wish to refer to the `IPython
+documentation <http://ipython.scipy.org/doc/>`_ for more information, or, for
+a summary of the capabilities of the system, to the `Notes on IPython
+<http://www.lofar.org/operations/lib/exe/fetch.php?media=software:tkp_notes_on_ipython.pdf>`_
+document on the `LOFAR wiki <http://www.lofar.org/operations/>`_.
+
+A slight enhancement to the standard 0.9 IPython release is included with the
+pipeline system. We subclass :class:`IPython.kernel.task.StringTask` to create
+:class:`pipeline.support.LOFARTask`. This adds the ``dependargs`` named
+argument to the standard :class:`~IPython.kernel.task.StringTask`, which, in
+turn, is fed to the tasks's :meth:`depend` method. This makes the dependency
+system significantly more useful. See, for example, the :ref:`dppp-recipe`
+recipe for an example of its use.
+
+
+.. _distproc-blurb:
+
+distproc
+========
+An alternative method of starting a distributed process across the cluster is
+to use the ``distproc`` system by Ger van Diepen. This system is used
+internally by various pipeline components, such as the MWImager; the intested
+reader is referred to the `MWImager Manual
+<http://www.lofar.org/operations/lib/exe/fetch.php?media=engineering:software:tools:mwimager_manual_v1.pdf>`_
+for an overview of the operation of this system.
+
+Infrastructure for supporting the ``distproc`` system is well embedded within
+various pipeline components, so the new framework has been designed to make
+use of that where possible. In particular, the reader's attention is drawn to
+two file tyes:
+
+``clusterdesc``
+    A clusterdesc file describes the cluster configuration. It defines a
+    control node, various processing nodes, and describes what disks and other
+    resources they have access to.
+
+``VDS``
+    A VDS file describes the contents of a particular dataset and where it may
+    be found on the cluster. For the standard imaging pipeline, data is
+    distributed across different nodes by subband; each subband is described
+    by a single VDS file (generated by the ``makevds`` command). The VDS files
+    describing the subbands of a given observation may be combined (using
+    ``combinevds``) to generated a description of all the available data
+    (often known as a GDS file).
+
+The information contained in this files is used by both the task distribution
+systems to schedule jobs on the appropriate compute nodes.
+
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst b/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst
new file mode 100644
index 00000000000..07397f8f527
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/overview/overview/index.rst
@@ -0,0 +1,91 @@
+.. _framework-overview:
+
+**********************
+Overview of a pipeline
+**********************
+
+Before plunging into the nitty-gritty of all the various components that make
+up a pipeline, let's first take a bird's-eye overview of the concepts
+involved.
+
+The figure shows a schematic pipeline layout, illustrating most of the relevant
+concepts. We will consider each of these in turn below.
+
+.. image:: ../pipeline-flowchart.png
+
+Recipes
+=======
+
+A *recipe* is a unit of the pipeline. It consists of a a task with a given set
+of inputs and outputs. A given recipe may contain calls to subsidiary recipes,
+for which it will provide the inputs and use the outputs in its own
+processing. Note that the whole pipeline control structure is itself a recipe:
+it takes a given set of inputs (visibility data) and produces some outputs
+(images and associated metadata) by running through a series of defined steps.
+In fact, each of those steps is itself a recipe -- one for flagging, one for
+calibration, and so on.
+
+Although some recipes are provided with the pipeline framework, it is
+anticipated that users will wish to define their own. A search path for
+recipes can be specified, enabling each user to maintain their own private (or
+shared) repositories of recipes.
+
+Tasks and Configuration
+=======================
+
+A recipe describes the steps that need to be taken to perform some particular
+action on the data. For instance, a recipe might describe how to set up and
+run an imager process. Often, the recipe will take a series of parameters
+describing how it should be run -- what time steps to image, whether to use
+the W-projection algorithm, the shape or the restoring beam, and so on. These
+are provided as a series of input arguments to the recipe. Some sets of
+arguments will be used repeatedly: a set of default configurations for
+different modes, say. These can be bundled together as a *task*: a recipe
+together with a set of defined parameters, and saved in a configuration file
+for easy access.
+
+As with recipes, it is anticipated that users will build up their own
+libraries of pre-defined tasks for whatever applications they find necessary.
+
+Control recipes and pipeline definition
+=======================================
+
+The *control* recipe is a special type of a normal recipe. The fundamentals
+are the same; however, it contains some additional "housekeeping" logic which
+may be useful for starting a pipeline. For instance, the control recipe can
+configure a logging system for the pipeline, and may be used to interface with
+LOFAR's MAC/SAS control system.
+
+Often, a control recipe is referred to as a "pipeline definition".
+
+.. _cluster-layout:
+
+Cluster layout
+==============
+
+The control recipe runs on the so called "head node". The head node acts as
+the coordination point for all pipeline activity. As and when required, the
+head can dispatch compute-intensive jobs to other nodes on a cluster. Various
+mechanisms are provided for queueing and dispatching jobs and collecting their
+results.
+
+Complex cluster layouts may be described by a "clusterdesc" file, as used by
+:ref:`distproc`. The framework understands these files natively, whether or
+not the distproc system itself is in use.
+
+.. _pipeline-jobs:
+
+Pipeline jobs
+=============
+
+Once a pipeline has been described as above, it will often be used multiple
+times -- for example, to process multiple independent datasets. Each pipeline
+run is associated with a "job identifier" which can be used to keep track of
+these independent pipeline runs. Their results, logs, configuration and so on
+can therefore be conveniently kept separate. The job identifier is a free-form
+string: the user can choose whatever descriptive name is convenient.
+
+The same job can be run multiple times: this might be convenient when a
+previous attempt failed part way through due to a hardware fault, for example.
+In this case, data is filed by job identifier combined with the pipeline start
+time.
diff --git a/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.odg
new file mode 100644
index 0000000000000000000000000000000000000000..5b969be7b05a71680e03a4177d7954b31738b35d
GIT binary patch
literal 15891
zcmb7r1y~))wl(gq!QI{6-QC@tLvSZpaJK{v7Tnzl8r<F8AwY2ZVdmye?tL@wd;dOP
z_vx<Qd(|%K?$x`h6lFlc(13uTfPgp^wfV9@>3e8^fPmh%*CZf2D?2l14+k?N2M1d#
z6C-CUfIWk&y(vAw$jQoy9^hbRZwfGRu`{!Grnhi3a<H^AaZ>yP5DE(F55QN9e+%sE
zkqN-w`Ays1&h|ua#eS<1z5PrDO8=*Fb)g@40_`Fet)iiM^ZaKJF#txm=+fc$c*24A
z)eI9u?FLzSA=3m`A~MlQqOR$%{TPT;7d!-GT)cV5hynukpc}vxMFhbOu%rX9z#;bA
zcN_Pv=P_p;p|hKNEdoBF>NIP;9Qxa*^_;_^955`m41y<&Z38j@%ofttu=HsKfUrfx
zRN8P`m@#g0oxSD(j%UTAbSfWsZi$Lfz3WGUnyREQ+uU*|f#*@xPh}@$qrSFg#IRqg
zXEY|}2V<-V%ZUYjaCkni9rl{6hX~kXrqM<Kl82sr8BWxwq-SMJm#*WJ)$v?un%q;s
zR0o|>I6Jq^4^X5mwFd3q??#>w#F@3Aoz~xQV!=lxaN(8pLCXt)eUu61DkrL!4>UkL
znmG2<bTV}1kY`S{P;Hb#pTSW;pD{VI&#>*DsHl4c3I4e=w3n(e;CkE^Y70uualSRt
zJ)FOAuY{(iM}~eeAm38*y}Eu_fMxO_aPy&O%%Ll^1+rI<!hxnI*N(uFFeI6fXcJQS
z&Kd=oP+1<_>@%iDS5d#+eU<^gpA(1s(R9~yW#xW|)ou_Q^T8~lxDBu=SDuToA1ivO
zt;Hc}5S(t$m07?R<p4E!SsxE@V}!C%?La;7T2S1ivU7&Jeg&nogkAs_s0lIut^3PV
z&PE0T>XbclLh5}swQ6CiGe5#0mi_<+(t7_p3Vy6MR3wy$B{VdIp6E@SudY)Zq)Ywp
zN06B%R*P4RZH!y2ip_l*eO~OO21PfqzujWG84H7qqkun=%cL`sonVMJS;DfWNvm!V
zA5hz|0>B@kWiwd6Cyxt|ovWjGYc3~Eh}!c_nz?*iFhjOlMla%#JmcTC_iu-}^8)#H
z(iJh0(^%>F;aS0f%B|A)xb)n3prToxTWu~372w^_ipv8X_I@9dsX~;zV2^Y)$F`x6
zxbFj}JTpN7NFzd{9?Dba=ITx8Kr)$@O9QOGGkmH01t`ThwfjeH!HJ*sV8cnz7fA#_
zms6BK_;z829|=JW9;U*d(aA*97~v$AMZ;bsL<@<z{W~D)z}=JT^6|gFTh=Cz*c1-h
z7uBVh&|fz6YXgQUOD6<htRMy&B=OIIn#`+^B+)Y~Uz5-hFLh8rJ^;hZ%5IR3f221<
zwsagQyol9La{Dqz>8dLbST*3+&MLRpCdm;Esb|>l0R}$JlHUnT_Ux5n{(NX98KXlW
z%bFQR9W(xk-79K^{y82f5t6kidMYnM1baAIW&2Sze@tI(thS=uX6ekA7@Hw?Ww}u0
zN;q;iFAdyFid4fMv*VWNSuf7-1uTn^@&HQu=jbSg*7Q*;f1bxBIxB%V5r3Y>*Sz!;
z=BKoInC+)er0%DqC$tML6uuLmTj74zU3C`+=<dhr((w4CpO(th4V28ZGz@gOVibRo
z3$ZSl)N=`*!Cy`jREo|lJ_ucMomHY(Wv>v9NS&k^&(~btP668j$bVAjsDSNdLwLX0
zGQPX2kcLu~)zn!INpWt)ZT`r}V-vgwUsL(Kb+#1duF!T?vZ+&-cJJ3|8H;ZL@8PKB
zth@&85lpIrwcelO&TF6X<FeENM_^Q*&{ciQ_ELnaCuX%Pm0(cmaX*MjZ<!u1@C+|T
zNc?fO+m8+Pylty#m+^6+PJyEeRDiCl83HV8A^;ug2Pi`L{+TZ(7$@q5up{20!8XAq
zaon-?D*f}#+KXx|Q`B^46g$-#A;|4@ZCgx$V`NJrSR&ED52XR()sg@c2oAY17QYY(
zryj*53wXAQguZCls7<v*LzI3j(#~8mQU}CzuR;~ECUN}D3<BXpQz#;-Cdr^$Xyj^o
z6e1tyAT+5K$)LyqBxME^0w->7`e}Kc1fo2>!i;(f(S7C<)_96QNdX3t5j4z=pYdMz
zd6bPYtp$f7?}xWfK}X2qyU9J&nGib$EL2Y%c-6QxJoWP>BfnPC78Iz4S1Y*h9D`KL
zW`ms42m6K|^o6}|AoBFgmv&{pXeI?n$bjsH;kF0)P0$ai90hY5b{(13x5@evt1*{E
zRPHJkd-E>%q(~|Wl{P90c#T;%0p~%r>{Jq#siG*%!r_r2aBO#`L(0@xM$~MFU4Kr0
zyte`Cjs@3Jj3vUXAhT3pVofpRaO!eNLM~4YKad(?04aKGDqOV`FMJv5>IkZ+)xoP9
z5leu%{7gO7YJHhrH$aW5$L7(Os)}7aETCWK@paL*@WNDX^(j@9US0)?If#;WS}3nz
z;s>4-{V6kTP+Z8=^DcwvC5*=9;+Nexwmi3{16|1DD$l&tV0c&F;@k-JxIwiz{lweV
zldhXcp<pUIAFbhKA5xX3Z2G#$V9S~#!;oIii|*3)A~iL$qp;MxMeFvj8|rlrCDid|
z+MsF!U8NQDMrmBCm{+rSYH9~%+{OLYfEgxEqOxLYS3&iU)JN%qlGX4$6_3XUg|w&q
zU4zVF0VME0LO=R-mlG)Pm)V3WcM+F-`f}Iun3U1;Q@WeDG{91UK!*g@**J998Y|NX
z7HJKMvS7h(d~Z$oM?r$u<X3+5rNX+;uowiT$?`)t^|Q0g!13wP0oSZz`6Hi;7xr+i
zsbdR@+<7Uni;6l~HQ<YoJU!UAeL=Jm;z6kbWp<Il?jGn{m`&;7m=s7t7j&D8RDNt(
z3+{@<Y$SaX@+4&UC`P1%s95Htv4`7(?Q`mGX5I?;^xk-S!jXC|bSvk(|2QL$=S8G6
zi=UBzfFDtPHPTnEM1;zRBW5)P10dM_HU@uCfn&1dKC<Ldi%Ak<V90J{)pVqsVCfRY
zgP?LX38{*&nZOr$*AtBpD=B6~CsGIRJuO{avRH`wW!x3Enil-js!OwjI4K@tNV^#`
zQ=SOa>ICj8q{Ep%Nu4P?$28L<Jf@f~SQ`gBKYiu_G6_ICh3Kp`3TLjvn!lGqAsg;4
zodGSheSZgKQ}bh4IDH_pGqmoe5?#=93rg;D7S|e*cHF8D5+7FgG^XJvm+QyW<@+`b
z->5Hh``g)k?uN(M+nE}%UWP_~tUTme_y!A1;5<2uJ@y!638o&!$C00&JH77lNL>OT
zkvSKZW5}pdM|S;Usb@f7kPX|NV|?RaPq<sQ2N~wLhcGF@?e*y@7%{c1NIGyBy(7>*
z7oy+SGO6APO-zV$?ATuy1suB&IyZpO4x5_Ba#%lq6iN(+W7CL3n`Hk|KN6_ABW(SX
zD4a7ixr7&{2<8(^ebErkglJ!2t3M_((FgVE9ghC`AQi&fL+_ka6y`_~QMODWl{!RK
zJ6btlITKJM6xNLl$#20C0C=k5ThFi+2}O3)S+YW^1LjTtA_Wn}mJtONqQ_~w;8Bl>
z*fI_wCO2TPA#A?fBzlU1He}c=DyyhO4-ec{M1jDFteKE3u3<4yb9Q%ZJm{~q5ti(g
zY>GlV%*`MpmM1LxA(|F={oE#>T)sjS9x!@{B?Q6Lh>dVZrjl8CTNo<@Vjsy;+NuLD
zMGnl?JJ>A`J0je0!+8*|Z(}(0d>@Gf)RWrm5^W!c#nwE;ZuK#EMy12!<$d?}QQ3>s
zwJEOm_u5tEL*OV2#`#<f)_^59gJ8c1v{fq-$r1w=9g;o+BA-UGF`O(y2c-tuPh6r(
z-{AK3B6m}Nofg49Qie<hPAG&w?6M2qD&C6Y-JGsOWs~M1wW%**4xSXpYJ5sI&g2)j
z0HPtMEgjHL%Ph6Z;BtmZiyl9j<+%`ed_KLGEZBu_X7d`XOBYFVp3UCBkzB_vSq#jh
zY3ojpwTVNBTtzyNZkd<i_7%>@<iU5wn_SpM&P%gCJLpl_D+6V`_8yavYq;?_Z`vwq
zblKc;q_fXP&2sW>DhsBm)6dW*aV?AHPc_SW`DX6YKFuHabj_h%O6*}+Ck=iJ9dCk(
z)JoadrZ0B!h*=5+U#^MFU1_>=#`n{NgBV_Rt`uqnFtNDmOjv6T(RHF168_m|{=5MX
zTV|SGA%q?OgIg+H<cO90j*>WU2G6k)+LiF*z})v1&jd`qwtPgC%T5h2CYGk~^o59R
z!{`-GMECm?Iq%*Derey~Fw}~NVBwI`+H+hxR|V1!-|A8!`QAn3RPIQ4vee8GU)BfA
zTvvg8lKa{&B#H0TXp+?V{Xvkqi4Y#gK&=|RX2e5K#2#Nf!8w8Lo231`q371aeid$4
zXr(8PnPz9q{7Rew{wc*v^~`Pc#|^s%+M)4~1~V-Qg*N9+uiLF_S}g7fKAE81hNUIn
z4^^2dJ_?M<Yom^)hSPqT!3~IAwwi~m^u#LXz^fVf=vslh#kZ^e$T14ncf>^-VVnlK
z%*3>te0|R01P||mY5dw`aG!%R0YM}s(tx0DQlG<UA{DkqtgP=Vacl1d`Ya}|gJM9G
z&4eXX{M!lDW+XdgRWO`$57UjYw&{rLXft58;RiZ56HF-hebUxeLd1DjJ!pNdRQA2N
zy*hZPD8P8~XI|f{A<r(t!>+=u(WukdJc}nRVcJ8`*pM~Kk6RRN1t?A5K)1lzP{Ogd
z8TU9TZXv$7AUM_wk$}LsX(ap29&nD=Vp!g3$@@^|#v2d{ASu4E12>8Kxfm|o$^{fR
zeqtwPP5AcG@&c+T0|~WAbpR9#4+JzH0R;5VA966SA97C49=2vqZ{Kk%ItDRI>?nR6
z3Py#NYnB7m9C{DcRErRNxDL<=a;N&R+Bvl3iIlO^o*4`i{k|Q(@dz)HUu_3yC{~YM
zalV9dvTP2=T!yfy>pweaR1VM~$6s8Kf0D~J6dnvJs9jzLd+~aKZAS2QzlurW9kLWw
zr&2f!NQ9B9GZB>+Y)qt)X@#~{D(6gAEtl3sYEm8u*T}OCQs=)Jv^CQh9_kCnF*neT
z<6%?RymU1^_k0}vxR4?bz8Z}_+xnr1A@jVd%Pl@TGB*elUHzwE1dsSo42Mv;Y8_1X
zi7U^_Xo0S6*Megc9DJP`r`27tHe+`ZIgyblfV_Lb8H2qQT*<xEH+4><s5F8NIXC+o
z3_*r9Stu9=qsGHdBjz15-b?dJ<aPPd2+*)pbg!iV%dnot=YlOgv&{QyaP_;+JN*#A
z2g~Q1T>-BSl-~I^RsdwN=(u(5=j5f7z4V4+YhavX@fFT+jBKCl#s-Roh5%|}-s^2r
zuK=w{;?W5H@ecqD5J=*N#T}g%*g<g}_*PK)02yv<&k?WfV&Omm%MCS3$=+aV;DJ60
zy?HWw74dQy_H<^}I-`|P))O;umtCSfL@0e{nStxHFDmfzJY5%^$g<XL3O$T<8<ab`
zR|i+mbBa^3@>j9Lx^?H&xL*t-wW<7Nia<Nbux{uD<>ldXByEiJt=B;Am=K6COJWPO
zXeVu95Wkgt<y=g?J<ONCLaN;)K}NlF^I_!p79;$_j?o{kX43ts=gdC95=Cy)k~!v>
zJ1^82!p8FJ+y&=7TF|MFGW2uY7qp>ezXOI^(vIE<K!s68o<CW>syf`+c&6E8c8e#C
zWDYiT`8HhVIRP@sj<`jpr6;-`MX*FfOX)chO%hDXz4>@)ARGHs0ckgz(@kiY-Vh_$
z>?!#?l<p2IH1;YCtnI8K*}7;@j>Baosn2()x$q&FBOgoYMzh}1!T0O)#m%y+cF|aN
z{@$pP+Nz76Y4NN{g=~Zvr<H(Gt#xU|C<rN+3fHMxGokZl;i<M95Od6=oof$x;yfbM
zdzLephe@sMfY!iuJjj6k9p+1u^_E;r?dYHbWJCuWzT0Lx^kolHD+Q8aHDI$*`i9WZ
zzuaXC%YSMmTN<cn?abFZ)-=4E_?QR=2U#)hjTaqW3^sa=0c%ZCxgb5M03_X7n5bpa
zxIFC+DwD?w3tb^YCYa*)G~GZJ^o|}!%pd{r_T_3DFKD|>nmyi;0-Y+_#ctm#iVva8
zx!X|9jeorsV3~FCadX%bN}%IJMQ#pM{FHwvUEOv3OR~v)UUL3iVRGt}NR9Sn#~Os!
z*)f>qR<dvH;&u)wZ!a<YX_(Q?@k4FO>^fii#5B23%0WFV8<v;_Dq!L6kt6PIj0`)?
zR!F1Kr7mE~BQ2EsCz!FCfxO)Z*LQ@m3ZXXESq|b@HguL6+HxsFqyx;TNa*q|Cc%Wr
z`*`N(Nvqu?rA^BBtCJEYq$;V7qDamOUT6_E(JD?2IabBF6FQ=b<)ljc<u;}%jYnkc
z<a!eKh`8kk1@H1aoV(b(*&*iW%|A~xn@1LgksZ3Is14_m5&1;5Lq9)m-OR?crX1^0
z;Yz24aGm3EcDCqM?b32m<S+6zG~Unl^i!FPGO|6>s+2_*xmwDtUl@lU!TKnER31lR
zYK8bb8)I<ma&s0m{%8#Kt?fjMxVDi{cy0_OyCi}LO#$=u025Qh@|pJh?%DB{cZ%1?
zhbahSj8%bwA$0t5c&cXK7)kG`4W&-PwN0IKnmGeEv~zX5z_#{Vy^@1ofR1fy&}p%q
zXtRwS;%4%QCJBYQFNWA!s#+i}TOi^Pcjy7sGOv(6O`Fk@kq2pR=pc9n2bUvDX?zM_
zO$<S{lzB>q<8b*(Vs|-PON20BdVRuKPn|~L3i|_!EB|!C&208k>zD_x(zp$=C7%q7
z(nXc0wi6m`v;r&rX`x;TZ=eDrZ&T6WhcG&y`R~9OTbOx;j<$HiH&`U}P`RW>@5z0a
z_GEnE=)YeB@4>sDVjcguf{ZVuQ;Q8fS#EY~pMuk@vh+h2mpFMziT?1lro^d0@TBpn
z$9w-_>XHb@o}ynQ%m1q!G9q-rkWhoF^426G^Zgy%5K+xh(>!RZJ>+|GT=Am<&|MnN
zhNuaKNym|?6%%XE`@ZesZP@Se7CKki$?Mpr4xx`%r8v!63u~X>tq9glE>7CIwST^@
z|LGjXyLCU`_Qd9P80Vn4qqazK_&uUmE4oxJmG7fmU<bD-yF!*0>ZS_YMyR_kYM(@0
zS+z+F3I7Y{W(Zew`;k=b%SyfzgjO=?(9RThAd`^28`m*PL5)9u71_`9GmkRkoAEsh
z^n~<vRRd1~ftqE|HW4Re#BW$*eJ0NOhOF=Sq>DXyW1HK&rfe&?WX3{H{U-+#g6$_*
zuD`$p@ZKql!UmYpwTrjji(M>}Gx6J^WJ4b(9kGVIFCcW5SKW=RJBp{CGm7R5ZDbhl
z4pbOh&V449_M1um@^Sn_6H({}UafIP?(JC`<^BfOjzNLNM+;rD5nqJH+eHIgi|Abo
z)PjsJK+u@ZFLsQefLQ4AN7q3X<9Xn<ycs28=PWCdrvMBTzw_?{vz&L0$gLl@8Yt>^
ziD#0|R?Qi3dFdo5D^>eQ+#b$gzEQU2QS?7pyhI4vxIC<^Bl}dGy~AH>A2ON+(^YkF
z4e0J|lF$4|!&}=Ouo*e?&Q1QH?g_~%3-S82$xZ*PMSL62z7g^cXsGgqfVK4=_KjAw
zD8TO$yvCD}n10cUUjgSKT%jThFd(3}?T>)7nX}Owso)iO-4VK)IXb<DocV~E=oyI#
z&FoD8rdIYAe8g(1;&hzE0{k$%0CRII6Ep5#QE0l?aJ3QPtEs&c_n(*eh+Q1*x&Me)
zbK4u)nK^Mgn{fXgxaR&{oBNN9{!DSVwX(P2BeryQcHm}UaC38`cVnRkI9f0;adB}m
z{Bf1|Us;)&{9Dh##nJW;EmIQ)Gh4H_(6|!=6Fn2dFWol}|F2HH>HhALgQJ<#E8rh-
z`2Q=$?>4_<0RVu1JN;H4e^%EYt}`<-vNHU6{>y78J1g7&YmRTabS3~hhgUz1ZOwkq
z)X3h-?*9t&ru#=pe+RH|G&Oy7<bO8GtIsS977V6F&PH^uR%UJ_zdZl59RGW1x&B?+
zKef4k>v+t>gnwkp?d)h|?_>^e{L`s_`|#TJUPnUS#zW}ds_~Bs{1fH1*_j#Koot;M
z0H)^5j;7}HuP4O(zxwQ3<MQ+V(ZnW>W`C5F?$v!Wer84{Mmk0oI%YOiCJt^kW^NWH
zUWUKX<25zm{>$LihM8TJiJ6;`iJO&^m*LmNKXWiMwQ_#Fb?8i89RJ9PUs08jQIdsS
zj)hs}U*P{k<v%S=JWO5*KR^3_Y5yzvPhAT$d$U)3fFr-cYgH)xac}-nAuROFq-yq7
z?u7rDZs6buur@PsW?*4vv}58T6?U<*HKpTXV`TeRE`I_3nZdvBbEmhPz{%Olgz%4w
z0T{o*ymmSlduKjkHdbN=e%^nzDfgfC{Oj!B2!AC1^Eb}B*KZtEOBXw1dm}4bCkE$V
zhx88i7G|1es?!@a&nL<<95PTck_|zuGH!uEK`4`igm^8^Brs5tFw!B;XYuhcDALk_
zAd`|cDKC6CzD+kn={LL<yB=fdEN8<HvaRjq*i68Dz(7Df*oT83fq<F?KmbJRUPJu2
zcj!Qc&}@n5k)Vz*yBi^jqhBEKj=;-SlN-kr+F7<$sGOZ%M9l!C2+Rf2H)FSl+R?JH
zC3j6HdSBeUgl5gKax_H1rzlG34*R6mY^w`{on6{-EafX!(|~li<dQy(3GjNHf7fdH
z=BE5m)`zjnPFe(@<*>W`xvO(31q=}g^(D#(xH$_fC>IFmA{I+<ln{6w8si-hD72W{
zyY5LU8s-mh->YFk{6A3Z+OD(kVU3@FKra7)(HC%3okeFw$PED+>$$En;D#eE)ScQ(
z3z0qJFMuFR%W3*4-0LJ>o$dCKIRvX4zq!VIH9Cy|^rxhd6mmSPXMS1YkA%g@p%tgC
zSm#`m>Cu@%!!QV|cU{zbYvRHtPiyn3WPI=my|a!{kMy4f?o0IB#V<BEFsv!0GGg*&
z7u_VGyW`!0l`{uRPkm@$iqF$DgNoswzaT{U<JVrB$Cj7%Os137f5fNfO@*yJ^U+yF
zla04&wic$XMEa?e0z75lAHu|0i^?S^=~Sw|r~c$qAcN)Oz3t(UQO^~J@y*;oU*Cde
zN2sGKjcn>{1Uk<LDxG@bKvsb~-?;kuX8ZOKW$1nuK)2aSOs)ZoeB8*U<)ZI+R2OHU
zLxQtttk|ix#HQp9<aGAJyVgLL&F>-@fYL7OUBM>kwmwRDT1RICJx}Odq+cTFH}8T=
zT?Yc(c0jGo-bM!p2C}z{web~;HJ#L|))7zbCuX)zpaRb5*{FNhe(22R-j#t;pKUq)
zUdXUJkKI{23v0E}VxeAVSk7AisXiY6>pM%DUeF2t`_?&m=8GK-?vVVY<5QUSz3Rvb
z3P6{TVinig8i3=l>;9EUQuV>+ZVL!>35sbibOovBjuO`nH04Vvwx&iqo^trZQF_$U
zv^wxZ@L&rxv=$WW2B-dD12tKLz%^}@K7nMr<@?%pGbr5ZcT9Z!bUm9t`g_0wk4sXL
zc;mMbwa!S#zDl~i6ZNt6Qkh9E;d67Wm>S}rUcrCDa6G!<+A>JTDd3xQk|eO^<@6(j
zOp<_nH=L64xNGHzr9$k$H|bo^X0cmw<XHTy`rgn^@CF%>c`LW+SXR(T%jIR_E2}c!
zwXjAYeFnGFjJ7U+1Ss61#5(3(ZUXCFbDCK62y!S=Bdxm6N1s1)M9^8KCXY<a?`u$b
z2-F)yeFt`53-zA<ae0!4ui**nXzE*SbAa0N6!-G1jVrM($yX7~LLHFpiPGS>uO$|K
z(z+{NB5J;{(l3}x8x*|l@XL#03xH8Ii4<sMYNt=Wke^OQ3nT;$);>x?Dx+IM-ZjR6
z$N9F)_HQk_HZCRzWAA*I9*T(cQ*%E|{o;LW`ZQES+9pIT{Zs6F@A(QGt=7D&D;1Np
zh0{@B#QE{!cfO=3jfyf<ehw(ZunG*E+hlu6pM2)eSK`9q2Amp&`>K(`yn#v~wJdHX
z7pq!!i3Vk`I5>^pS6+mY)JI0>?c>mLBxz8t`5()ulW<;~ayyZdcT7($>6_{&LfOQL
zVFXXhPi*fFJjpw%YFt6eYP229T2QctKF;m%21Y@*JnGW9T%@`CQcidZZFE*R>r`PD
z6{{w3gMuN5r)G{R`==~ooPy%PW(`m1>V$Txc+~Dfx#i=EpUW83M&~vRac-r`Zq-bU
zB5qL5oX(4R&~R4`sI|3Oq!P3z^g9G@Kt$0X2E)#I9L>RPjq-7ZyVx`+U{G5}5|p4I
zp{rdzPPfPmb^#|NF|?J>c=0!87L@teW>bWNaAmSakoMeaX|N7)rNNIz&DM=g>OCI_
zGS)*_`BiO^Rkd$F??9N1)aWLjvN>sVLmuHzlR5={&LU3s<M)WM>gs}O{#g)+ow*L+
zxW}{tXS~Rxc&3@pfc6(E%&b@3#NcGjbC-^I{fbTd5zU_Y#cJyDDd}wYs?V=_Xo=M`
zOKrNJ$<)&yJElC0Iflvp2*;-5dq<<>Svngu4T!*X%9`3nY<qtv8g~&@$d;tSI6=8o
zKK>Q?^8?L#J5x=^dTu;jK!2MY6-aDpwZ<oAdNvo<`ID*@+x*_lDZSmkAJXU=1!NE<
z6Y9?{%t~n6J#x)}j!Tor;p}Lok;bM+U~mO`X;d-EzMAUtDJ`mDfw^}-J(A5Byq9=;
zZFzTtG7F&iv=|G^b_Bp!)(}j&v2mw(>baFGwqZou=^^Z1kb~k7`?&^x#_usY)6<t%
z$L+&FG4d<CP!Pr|-~(B^WEy)YMx|Jr`)m3;gCxT^Iy=zjtzKfUk0T}@k_NLPGlPFX
z=hgNl1OoYD+`fj$lvV^`Xlo5(>rbpakodft7}8}G`oTj*KzBjvD<4hdJgb2x(@<X5
z*BEnQ1UHq{nD7_b&xJQ1bV!95T8aU*HD5bPD00xiaXVbzF)L(&l-Cq%3_d@e?^!Vs
z9FyNSH&n66Eqp{%y4XiDJ0I6D!1tVgU)<XRDH<!f`_TE-b(Fkfnc$|cF!8W#c-Kq4
zfc+U>v%LhxLa(q;x%2(@U03_(XoGMlCw50c2K6<A+_X?N^m7c!ff*=1X^|pwo)~5a
zY`xE2w=bRBLtMBJ-p<;Z2dI%Vwfk$;h~yd%g@=+l4K+vM<b>BZqhC!#JG542&|O#o
z44*8~`wP^fB+Z;eQQWtkDpB@L0Lkaf;wX(x8h7v?p1-kus3)P6GG91~G+OTB#nej6
zX~S(20YTGdC=M0RW*b_RKPT~vnBD)WJ?kuAWYg^QV3BI93x1q`j<FPcMaW(n{fIl^
zQX?6Vlgj%k)<#^HSgJ6F&Elcd9Sf-2mUk>w8<ZNBfVkf`S-llAV{;mbS-KdBX=i_I
zfd%ec;#IpesM_fLECv@qmS}&7BVn~|nnSq+2J`OxoXI!QjyvCkQF?3$-b^eS6w8Wu
zM+hdN3*CnbKe2$B#%->f>pB(oK5^O}EFc#oi~2O7f8&eXGLqv5#V%32+XwKeQTY%H
z6_kwLE|P|_Hd<`Fni#XJ)JW^%mB=t=>b$BL1C-pzPnzl5ObxWsFZQAHD|Vsu7v$gl
zO|`4ORK;Xs^;fiTqk-9Iw8F9qe?k}0k}l5#$39FQ8>oQv<;eDSjHRNO7*{9b2PT6H
zCGQo&h(bU;BgVSNSfB_;3^1#a&ASZMFBT1V7=$EAB;Bb4XT6{)E^w7kqLakO5<Qwy
z>4+O3oQ5}s#_aLkZS~k|&B|H3$l=RY;F(5#k!K#)SWa%2j1=sxPNlNzrC6eD6c}7y
z07XpdOYtd3tSQV6RLnQ&ez7~$E~`rrO~DeanNZYkzr8&NELbL8@Yfr=^N+#|!d4JN
zB36E2wZfJ5@=PQv;mOXf_I*G}csDxZj-i*82PMQz>WaGFw{V`@!#rNqz*HGS#MI&a
zOteJTCDs|#W+LehY1Hk#Dsspnvp0usDX1%<2wDZ6um0j5fFmf?Kdjfibudd^e&vJ8
zQumEp(|j*Da#rC38i5`UUgPu5M16DDP)V$*SE}%hDyYg2grOIIg-IPW+?m0iWo`Fx
zJ<7))CoNxeb;Of7XGa=MBUcd}T)(`Sya=0`RNh44)j|-O_I)ji@-NZ#Ygwo@n8)j-
zi85)6)|r3uZPfokQ{c}AEG`Vv&wiS(zbyh%0%o+*N_b+6yfo+-m7DLc&39xPs7Wxd
z!QnBLi`i|~4zD@fDHL*D4QP>Ia4846z}kY8@{NavdcP=rPT{EzMBj>%lfBiJjuu^{
z?4CVKRA^Z&yR;|va)woFzJm_sWqS(IlEO{e3hs@ZxJGlBfJQG&Vr=7FDopRhl3AkH
zuQ5Qu3LosZ+6IbFd9CC+1xn+meIB~n*%CASL1D}2>trk0COwVzQA#^~3D&bw5qISh
zx;A_XZd5^5q^#J|R;~_ZG>MV5nWu0>1!J@3bYD6fQ^n1@`Rp;1@)KAu3$QdXwwsd<
z2pjM~O{;)r1<p8#_)z@(=*MeMNVc^C6Te)?`*$djENM|qoW88uyDe&5XV6oOPZ%wG
z%`G%1)|wa}mpRhz<g->Ymf{2w9~v;HkxNZoP;F&{g5eupuKA8!p$jOxltakhFZ4FL
zeNW7a12Y-VJ<JdR-fH*G2wiJu{yr>elsRV{;5yyZ$UC;_Y?IxKH1ZThe8t84Njxk%
zVL8E3yK6Ac@4FBq9d_JVrI&M0AlWSxxj2O=v9~?h&Mje|M$@~<=T|z@gac$~e$Y&B
zCEX1@iCyK;&zh~j=G04n=5!)3RHSTFe-c+W8y(xKbaTBq{30qbN_e?H!Qd&(2VKTL
zG0e!p=)%`0^umi6#_?XTc0__n%f>WAQQo&?Zz<5r38cn@V`-PTTFwyRa~LZ3S_sF{
zU@!+>^FUE(7`oA{5EIUR=v*kIx}3)>E#|rmuR{ImHQ?4^sI&;^LqI`#I&VEaPCfW1
z+d-My7D=57{Di*YI@CFZ#$&$dM#gz`GZ6mPT#A>3u~>b!3)JAH#f*nF`wg+rG>-&G
zH(9ZGKd-F9?r#dWA0=Qe#ll3FeoE*Q!InV3PW@u?9cVly{)s~ew$+n8T6Ve|(-Q9V
zb<5d@IB>J<IF;H-A@@n0W*K>6<80^fyTdtbg>A=G#Am1NvQgi2+8l4Y0hUZ&*P8>^
zW3;6Gkq4Uvod^+`m=_6D&)giflX|h(=M1lJ1}wFUn3p2yCJ@9hjYB0HO<VC3Sl)75
zlX}#TRaX%VR*f8{?2Z`AW>3!=(GHE3niIp^lcW9VW`pOQ#RERq)`l#LSsxkf`nU#P
z*5aGxX1~kIPlMLaPcDAGg&U%h{XW?{f(9(jTCR|JO@xwf#;0b4Y$8By8dT_uU$MNx
z;w~!RPf#i2XC`zR;M;5^t^BhMK_krCWE}J<(-6n}E_gpeYU5}ZWNNK+BxBB%i({m}
zSm&H{EQZ;e{)mGXO_$AXtR3h5%e27IhH8l!Avory7a!WkLTTk5x4Hi6lkpmc<BV`6
ztg1G%E|qH{{O`L`KSmQPsw`PkJW5B3dqsC%RE)2a6vv&sF5HN1@^RbK*-JemON$4v
zj72bI+b*<3;pcy#X(2L2Z>K-r&UGzDT6{KI6R?AC+F%;s|GezNSUNi=@49#u&|x$y
z>%F}d-T55I@7BOOw{C>$9O2Q=)AgxK*8S(QcAW=lQVMfjTjF!M1WyFskJZcOcNVwX
zN4`Qj%Z=45@97#J>Dn`y)_<7WOHGJ1`JIu1m_!`{VpzBK>n3Vy4z@t^7u(dkZa;f4
zF0_$Wlq(tEQht6)Vx)4VlqNpB<1s9>clfq(5L<cp3{_AJH2w2SHx-Lrw5ARR)~2l>
ztW~G)wNY)nw~7y80CdVgOc1XWi-&7NT!~svNVA6iccf3g9^p>3)P0L=_ewCqXBGkO
zC$^PPy-uF*LTai*b>|$E5V)n{ptwKiDB3Aq2MS13Cno5OAaUWCT0MRQ5{&HvKsh$1
z>(&$B$dJq__`#m9rSuR?p9yV#xM2mg%{=xs{6PfxWS?WS<Utoj;l+W#@<EdTu$_Yo
zw^(wD83M|8OQ6QTo<PL35_n;m7A>j+OP&3RhL<^S#9^LhT##grAA_I}F9(P_$@mbT
ztt>e|rK{Fc2TvA5j44}@W$G4&kZ4fq@SsSX-joi?S!iU-))23|-K|9bBO+lce1E80
zJ?MvSIWGioenUo;DgJ8G2%}~Mx2A#4fE=gjl|5YIo|!(+ECvZ!nLgu<4A<&=OQusy
zbTlVigzA~F$$6fOaJ9%JTId#Vhs#9~2D^3fl-RH0`9Ok1KUcYfC8=Qqp80-o+?V7(
za3FsR^8;1GF+rz3Z=q8UVE!zJXrubYdHJJ^T?l<!u&0_oDv=PlwDpI5pIFq?sq>JQ
ze3F;l2z-=vg?C+1^W7H;ZE~=xLvB6ML{*<IT<(?GGYoY?0kB?9?zAOq9`$uO{69)m
zuS_sRwzLt%)>F~VWDl1iwMqu+d<n+#y_@=ADzs{OrwbEF-_M-QC2FK4dindL^;P?t
zdGB5A{P;w5s!iKVK5xmxDjH@&A}^_4=1^&tk@2Eyy~Q&Jm7kF5wIKP<ZwP%ky91HN
zj)YjOm78%j&FiC)8eNL3(o4C^k4}>fe^P@}q;DL^dn(arRpyFTy)Kqg<XGqzx_*(G
zgAd<Fa~MVth1yWV9ZWd~?E<f92^jiVEy!v|W%voQOEs1h@<c~63O%ii=SQcdoyY={
zu%ZrKX91Y)_7qIItvwR8Zi^yH1`TFn%hL~AVnGoy%Ppt1&VG&cP3@deKkAOSqs?1K
zojGROjojXS_pX!Gd6=AI`udRxr7Bq7SN&o+Cq`JaLNkgyj#eS^=hJi@dm@&l)suAk
z`pQy~(KTBFB|#f=$`*Hy@Tna?;8A=O!56(*OOr}$0Qkn`U{kpgkZ(RfJF`Ih5$sTX
z)81SMu4(MHc`q-rCHeBag<NQWrisv0Otr)LL84f`Ti~hrQbfTFh}ndQE>!fU+oTC$
z)av*&>VUHYp4uPZWBBt*s_8iil+Gi?<ULV-4?*<|PcmBJD4c$H-Mq;85?_z`V!bPa
zRrKK^NR#Z<qsU{yC0Uv`D^DM0{5Olr-M#wcN8$_67N_kMaMr<jz0S<x<Wlns01H-j
z$`c?tfQ%bl%tp+GM@K-QFa@u4k7BnKJD{?eT~sRX{A={`EkX@t9I~rd-3yb05<NQT
z^8BgI$~UpQ8YWG>tjaV~x2U=KSa`T)fqi@Se$0+DLA2sn;g`oJ^QNcA#In|sZ|hen
zkm$`T%O-X#8;Yi*4W%^r`AI2^GTA2-?z2FG5M!I*tx;9A?E%|4OPyI^BUQkfF=SAJ
zU72hkG_;-IPq2@tXIT45Avz?M^9fSvqnejq;N&sj5W}G)RU0x`l|M+7ImwX0t7~^5
zG~;H-AyLBX<r${v7zA{;o}wG)zGcVnl=Zu7q;3=;Z?AV}!)fw>6iE+(03j7SP&#NO
zXrWONo$sJ1IM)i<iiQGoN1R8{X`wjfki`awCtIL_f#CcfV?rOY)UvV8UqDb4fzr@R
z1-(DESU?|x#|Px%%d<cs2fq+BykOpRbZWrOj&r1PRNeJ}zi7o<7w%KfQSwLJGQ151
zF<Z4;aH0bNiN0?C9DSvDjlPNi?9Ht#{uQrwVrDQhF?k(O`6Il{VB+HF_^)LIpsz5H
zZ$n2wAaA=@`4jfn2-BZwf9`Q#|Na~HH3V((I;`SkY~=Xr;O~neez$xJ2miOFxh>!|
zT>rmWVgHSl1Hi$>;dNNZ<^MCcH<SN=ADsaJ+yC8);BS0<9eA-Yb7By-a<(&aaQa^y
z{ksCbh2~v;tIoe$3Ig|<sgs%W+mOVcbBMH;95*?UeAX%?l^!Z}bile~mHlC4vAXPp
z-*wR?v_hAFrtWXXo6=ui(O`W?VuY?bpPjw{3M0#yI<$9dSZu#%w)klZqm2A5qNrxd
z_7JR*hH+!d*m}GopFzby+D|vEfMQb^7<SI0(?#2V);ye>X{d>sHY9?|;37aUybweJ
zPF1J{d;{2kfyQ=}9?W!z#!33tiHU7uk;as{X5rZR`at#j`(4_sF^hnSD^-O6W<^Oz
z5CZYWCi8?@^vAGBL@}+=Rb7T4cjQHSO?z4--josFQQ8-SG4;rCk*4G2TAEK5ruY*Y
z&BE!GB1Xr{8F~!)o;uf$5yvzpc8!wsYT6Ioi}2DKG&{6@lcJzC{EpSlwq~?n;mnMr
z4X-!(iYi?y=HSArVH`-mwEs{(5^DP~Ot`_VCcVeMQ^DNANzi5gEdc?%W7sDe-dCYA
zGbnrSA_X(cxSQicBt`aQmR|ba7(%ZC%XU6=KO<!82zZTy4!BKZ%Rny?N_S~kLyxL9
zwuEF-sSYmffz5I=X=9Q4{d@ocv=H#Qs+0`DX6Ibq`VzG-u%EE1m?)qaUqatNGkI&M
zUHt3|(=ss^xh|Q6afeP&w|+QD;la{AWMeb=NN<l8ch|DWfbCZipBX`16-9)M>|U=`
zoCw<S**ej!%%-V*-%HV(57cCl4td!m`^1VmL%QS9`LOUZdkePHE60vxMmb4*+LlI>
ztL*&B4(Cm_L9r34R8Bwks^7^aM~B5?KhaOt20g&t`Y17<iPQz>%H0x>8FkhdLQ%Vl
zMV=k&Wd+DGuFY%um`wg~%}Ix{vd;*;kwYvDv;07JtK^?X?GUWvkE$r9k~H9sPLUwk
zfzi%n6pV7ygy^K#YYGUD5kW;#B@d0srb!No1><d(fQFa>UO=yLK$W01Ez>Ub@mkab
z8L;bzBlZ+M(-LV2Uak7U<WP17CD6(mt>^k85JPglvBn?0oujdV?w=RXHYq_*?Q3*~
z$%o-SqT2hA<cp8$(J@~2#f}OW>Z$F7B~}vlGhsiQ1M;-X1}#(cM+RKPmv8*|@e2|v
z;B~e6MUFZ!r+hShQiKqE{_#&rP}fl*X!wxZv#}9rcp*OBCFJ6_VBq&Z>}M_l7y|R2
z;_8#OJe-*n2`l2n`x&SQMbo0L%V^-5%lP<`1YRzpfcz?;n5~mK6q4SFnXs`%ihhku
zME_<hGs6z{@bRJ$-Au7TV)Zjl)KBapDN}jeIMmEog}UsMgCEM9$Dl%1emCXh+o6VV
zkG<aKULJHR!}mlMVM*m5B<k7#cQ;>VhmdVhNud`HR?8F-ISZ1FWE)Rx#l#C8A9wE?
z-=99=1~W1l#_4(E%3Z?et?b{-kXl!_0uSJs*{+^9l|1Fi>&l_mnhy%|sz`q2YL!wH
zKXpRspOTB_SX!=<lBJjZ+KcsNF4VVVFjQe2DyMKqG7ksYLG!1p^2*7FYz0cvk^+7y
zQC}P(Il?Gh`J73Zpq3An>&`q(cqSSR9}*diI^8@KN|>oPFTZuhd9#C9T?mL|?{$Y-
zJVjHH^r3hpWR7Vne`EWR$3vi$(=dZyz=GBpV4FH@m6e>9J}(@xf-$j}R`@AjE}Nzh
z(0~=eyj|`~W&0tJ^mM@5oHiBT(y!%{_fw?dKI`*<DHC4R2FS`<G-Oo8YN%TgkE`#{
z7-58skM=42T9)S-X9L1RT}<(otmidmS_S@4f_+=E&py{B3`1I5mIXlv$K}fMxZmUR
zjF)EOnXb5xE&)9?Eub%C_e>jOW$$UUlGnw<?c71327NZO*+qIKYr!KpLr(W&8}gAn
z62WuN6$|&K+uhQj?QFB_blKL^yPaK?6s#xq=A0>jIsDC-lFuiv)~wgr-%Pmz_zw$O
z!O3UMGuP9LV8tuY<r)BpD{Md9dOpLzh?eJ!1v#e8%QXDdE{8k%eh&R?VKU@mFZ!HV
zSsKz0zBLJ`$nNV#Dw&@^La+Qienu(pm^_7lS&LopFd(lviz=S%cza)`4fzC}fCB*~
zko|gJ|J`fil_LPjiKz<FNy&>d*u72&FgJ7hW9?vFlAL`PBU129I`IYn`=R1+s3h$o
ziZ77w(xoc51w8to?AO51C3P^sdwG_y{m!CJuqyNsXVJVO^V)2BoUTY=oCg~gucB_3
zy%QNWUD`CY9GXQo?|CUd`b5)DiRbNGBjy#k8pc_x*CFm}SHz3;Uya>*s%kTY(=<*&
z3=uKnN0hv)$Cl=YumuY;tOddM9&EEINl<4?D!hj>DnJjTF^XSstX?KF83byz2|~M|
zCy0bh7GZ9%8sxz*iv)IZ42yxVaxE462>GSKC{3?g2XZlX`?3<IJPU^(_H3m^{)Q?O
zAB~QTG8iJHC`LK|9YIUaLtQmg(EdZHsLprNaV*5hrsap&F2!*wL|;-1o_E))rhzuF
zeDU5>wnV1Ifq*DlTjTYg-*X>V9;lkCI7{+q*k*3CqC9X|m=XPU^4Ti3vnm{)z~9<J
z(;*>S>U9v>4DzilfI-lJ{&_*@YlHkIzm|vovijAAzfU86TlM*uD7_y3wg&XS4S$~~
z^ft@*FTqCmea`VOyI&;lzo!xXW%nNAbrtLDOpCWkMZc_mx%oR6eIs~($@43*`KQtU
z|3-PU`b9n8c<W!{&G<h_<3F(2U!cDV^cM&a&>L(0OQ!w?^glW5f41OXmOw!NSxETT
z*;xO~Y5y7Jjr9E`B7cMOJIDQJod5L3>u+%W;=KP?l&Zf$`HKVpGs+ta{!8}$2IYTn
z;(rGF&k7=C{-4$SJ4gN#=U1Qn{lWOglK&FjSDfED^RF)R%kuZz`0c^@mjrVD!(&uY
U1{~r|4e|AN3i|rb{dV^M0FH+Y;Q#;t

literal 0
HcmV?d00001

diff --git a/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png b/CEP/Pipeline/docs/sphinx/source/overview/pipeline-flowchart.png
new file mode 100644
index 0000000000000000000000000000000000000000..289d75eb0f0471cd321eec281f5011c0c1f3639e
GIT binary patch
literal 85797
zcmX_n1C(P;7i~}5nzn7*wrxz?wlQtnwrx(^wr$(pFW>v$dRZ%3xyij%N!2~4_St(^
zxPqKG95fa*5D*ZYq=bkP5D+L45D>5~Brp)rFMLFYF2EO1_7a*-KtS*VfalP!cvggc
zpkHz;k|KgC?(3JjP&_K~IDTIB*Tmu?XUqA>Taq1;FM`3g;*yK{U&>z#x+tUt&dMFq
z5V{G@;?DX~L_*h@%^$g~@*6E^kuE<y_}!)_Q<*Q@pU2$2J9ehqu*pM)2L56oR4Bhj
zL4fXoJb(o7rsVKiT3WETc(vw6j}pafHLDwK$x$#N_V@SS-riiO5$Ne__e|v$AS#gg
z$BdaGqtjzLX|8bwmalL;Ul0BU>q?R!b4}DIYrpV!zq}Xqo!u;TK8y0tX--qh!B5Fx
zh(IBZocI6xw|l+S2|V_<bT>_g42k379U&y2Kg#uqv89e$z9Dx;=I#gOBV{HiQJ8t)
z<z{DTY01i!Hd^d=#y4a)4}unz?Iw#tr`q_Y8-Kh^$?4;Y#}iLMS(0RN<rW)?N6%h+
zV~ru--!;EH+r^2;guvRT(itQt1d&^dBpR0tJ*0{8anq2tTmDibO}=Sn9t->sN(ISO
zI$hh97VL<412O}R&Z~u(-1S!EuP$%Sy66b4?sawV;5lRlHkJVQha^LAu9Otxd?PIf
zN-on+xx)31Xn!n~wC9@6fHa$3upN=D8LR0|eXV(sF~X#hg5l>#`$Go?WPYw7w7dgk
z3s@tV!XpA_sZi&g)&-P7y{eV#PR@wmKYY1!@ptaxH&8!Arc@TWub3u^8DHsSTkX?F
zca%(3-iNl8dnvSq539RM=>=p#eT3rd4k}%q5*Y4QXeI&LKgrO-=J%^6<ug#+TwEO;
z9Sy|!<h@9bsh8``k#P`tilZO|&K@`@ms-+zKGTfe?S77#8tVh4tCu}0@v%zr5DP`R
z>0(m|J^gKN%M(gwlnjL}_q*3pl|_0^2~QU*cJVKbPZu|(pclW-x<7$aGvuM|>qY$t
z!p9F4C;LsC^is4Zp~r5G(ahpSjSsnZK3ut_wTzItHe9x?_}e2l=@8sNhCt+)h^CIG
z2C4!l<OmJy$;&kLEh6JWA)#Pow#9?u8D<&39_**rVn>=7WTYv0sh>UfQ~L3QfW8RR
zo=-}nCt!@u{%{qxg^3Wgq7`%SQ-FJ@Y*IQRHa0a?O-;3c`zoqjXtbAg;h_TaQ?VHT
zL^M}jO}#=7SOkWc?6aGfpO<%Y`F_53c|0&Y=cSz-G(4Y(IZkhlInu>qa;M15&7Esu
z%ht@6_`Hqk^BYogYV&Cpri3;(HxsD8zrI2ySkqziuxYSUzPF9Mn)mg7&^usNVo87<
zwO>h@m}tGNmGmLt=k{#!Pff<We$XRc>u!Xn!V&1}a8KV4M`yJ21?9`YcFYz<<CI+D
z)3kaF$CFU&CM@lgxHekWQXzp;p##E;R2mfPYS&KR0ZRDazA(G0s_NI>0mNMIKBAUF
z0&WltIKH!{&|#zD5Hve`Yf2e^RaMpbPkuZ@+ZToid=99r3Wn;0aJFrtjN)~7v-tM9
z!lryu$*J55cat62G9(tLU<Z0N#3-ijI~q?Rg09PL9=zfKI=6(g)@IPr9q-Oxk>QQ^
zCQYNcyP=iOxxZQ~ucpZ}g5`ogwlXr4TUBz&MUNg4Gn5Rzh|**R`Q;wS5y-Nc_(2Sa
zT9nd-2Wu_?3nSg*<Kq=R^5F{q)G>|(O|fmkqxCRQ-~T3(%lEiYbMX7&cdlzWtdr{N
zOY84s%myyB*>E=#O;2SbMoq=FY$Nm*cIePbr<TS$H*rjjO3uXUOC6qd+iq8C`>I_^
z#!rp(ba=lpt$eI3)02+}-|}afc51e_|Gvva_;4Uc6rtKIAWa<On@O)Bzb&s&>o6gv
z_yUiOg60GN=A=TJk1^VF<PA+hWi04tm}`ECq_Sv6VR@QUUCQ_yNYaOdjV$)Y;}SiY
z-y0NbD_}BA4Ey~p&S}8Y>+;@y(SPGH<jiC-@CA#EY4><VPQT{l!7uMc8s?)qL8wPL
zV5As;#<frrHYQ1mb17F`$x{4X86<7ARI*t`5)jms<z+5*RJ?2#MfrXp`40&kflMyT
zylYR26jO~!fh?@aG4LecXnAQ&HWuIggV`MeNdTVteol?{+4Zh@PYy7eL&0wtWONeW
zVqzw~`U(n&NW0Xq=C$Jj>UtAb48Qx`Z+8Q7a`2#|aJgO2mm9W`I_s5B*VJ`abt6_<
zWwgmO(9sCNk+Ctn_I1gBqZj-@C$ea<+VC5gtX+g_qwX@R^ZUF98DwkM@VpwH&gO4E
zqXtlZf8*E@aB!q;xK`{tl&i!*$15s0usRUF*P0*lf2nWo#085P62w;YA&I$MQG@^O
zL(06ED!Ttt$E1Y9C;`7xg_Fg#HBedcsT9Djywlh#1Dp7;{QZ0BAgAE26ZL9trnXgw
zD@wE3`ch#+AHg5LKM3&n`0A1p5!Kj3#_jTQ`!}!rkkaT!WRQ2zlZl|k!ha6z9js_V
z40WZ7IXE~3vzCIe1CI(Ftn@b0c4Ri4e{RR!A4kR<u;8aUYTPboW0f5DZ$-MATg4)Z
z<==~?s-c~`iSkYy11Kfmbfj$4dRR=3+u!z&+BkK5eEb<n@4B8#Y6GYoEq<4g&CSoj
z@ZS`lbcF=Gmxr@skqWO*s&3U#@QI5Fl10k~1_n!0%d3vUZ~M1jZY4)ojeqo2!50w;
z4XnFAMHRablCkxT+~~;;_u3tX0-)dlaWN2zfX`-~W)Iua8$5T&r&xyIK~VY&7m)ex
zb~JhGG->3DzgHxtm^cYpSWrM%pnr%t^9vps<?TdyJaiJi2E4xnL&NE=)zu9AV*OZ<
z?ftg8)|tw#_`gcZ$7hM)ZboFX8|*1h#;^;0g7LaX{tVpT+2MgZ{Ln}uP`+ax^g3!<
zG749?XrnZKCh>}=KQtRpPMUsv<W}7O5(D>zNiQPYOnt9(jH5W+tv7s|J+l#lrxCv2
z<?%2|3iTI*l=UTfck;b8Ba06yk~j(>Mjl?K*Ho`14Vr!<C}Dsbs?xf;Ja0fErIUa|
zRL)}UXC!*RXTH)Eqi1_IF;01O^V3DAZ+rNs(V}`14KHRScgy2J+)(R2es_6(jr77V
zi^u)s<NLl4$1U`RY-S^#-aKK#Pz7<fU{cEbK9G$!q<QMTqKYo;2brkX<op#tvbVPv
zpcBx4$nJcU(`Nf~(mo-#sa>)KMS^^KdfMeaS0-JddFI5;%}s(Jgt4iM9=alPryKoo
z7#|;h+nq6c*k{A-uxy4n+OvO7U@>ns3N<ko@Pw&V@$<_Z!^8Xg6EmWD?`TeXX4Q3u
z)wOP6OJa|`iXiqpzvwUK-(bZbw)kuP+xo@&5TzPB51lUeJLx>`rRhb3ao_1##A`M?
z5CfJV9^dB7b4(`GgLiY-*|>Z6y@^nWl9Cb(wk}FEDJiLl=4{*@Wj-!hK|EmJk4+T<
zBBFT(JJ4^4*S|n<5dQ;aO&U!$8!f{tmR44Od4i(2u&RG~2zY^&iQv`w(Z+!Q^#d8O
zc+qGz0b*8|ASxs@C1v?yg*D4zys;Cdx!^Y~9INjamUMxo>FWBrr>7^h@0ZyQTsFF0
z)#>Ha56*~PV7fXh>hNC(1Y4{!T8ksMx1gL93}*IP4K1yhF`B+r>=0QWlgv94FgH~x
z$b0PFf=}LYAtj{*^2#9Ykc-Wm^V)TVZ<U8X3A0=7dIlom;)MsPJN%@iqL7k*F1Pxb
z#*e~?7O2~b*UNUxvP`ujFA5|f!1Smct*<CLpQTN)CD&s&kx6o13zXm(vBXj!7m;_C
zrNo~;3LkQrG@~93BR)ZWuC!kY&m}RudvRSg8CZ^w{+6gu#-m=wtJ->Lm8QXx!+K#k
z8%Inu>MPb%?#OmK|D(*P32oQp>G&<eVsh7KC0}7(p@b~-dw8|pXe8}S(nWvd_YMiE
z;#Z2L|6HI5v3LBnvOD`*O8DuYoJq-9*T9#Ick7Z8o6dAJHspcvpn5}3e~t03>Q6K=
z>vY8AdCKwdDslsYrg?<Dpsned{yI%tCl*?MZ}VZ%&8>IECTYt0o6FJzPu=4X{HyhG
z$?Y3)jho9=*tHs7&I(Zx<Q=bFGrGcNl4jdrLef(6<_GV$SA;1oWXKzD@IvDHGnxcY
zuIZmNwp<^n7OTwrYR#hr#25xgCZ&8r%czp8nOuHV6_r0xMT(GxE*HxLP<zmYp4@Za
zUYC*()d8qv^X2{M_4B03`W+49RsNXGPwH=UjX*LLG8kuewiJ&&<1|kd4ff&x&I@C~
zBtu97B<f7n<fD^osilDWcY~crdYs!^Dd&)+e4N8Ez+o2nUmx<5nW#EqHKW=`lyYd)
zj~2}rWuW#;3e?KH9ST!M?r(4{1u^`@!zqt`wBFQVp}fphTgqkCF{u(nT<k!=w!)l4
z{5#mE8|5?Rx_fYqp@yy<2AE~%rIO{q3~pJz)S6eClC_l~udE9<9I}yBH7eHk(+$RI
z&uXuh*_N71wDS0BAJF_6Qdn46HtE0DuwpcE<k1vQ<npSkZLYMhP!p)u6;f8ETJEn*
z>xWwpth4RdC-%)~97q(Uts-u08`knWsW>+V)B3tss_NJ>P-|F&M-NX<+^8PA^<v2b
zQs<?MS<vm^HMnF#RWOd;qTXisfAT=VUJI5UO_yW{C5^qGQ!lL9PK385Wy9}-iSpFY
zuS;YGWgj~C_Q2jX7ORz!`g~iae<WO(`Hp#J?$7JV&X?4ynd3-i>$^y>(0;x@>_RcQ
z!|I;X*0Ns3)ni$O!RV-(n>UymTfN|&$xM1?=&$IddA*E{J1LdH7=zA#al2k3s#yQE
z;EbA=tI_A@dY)Ct|8zPZW|wJA43bbr7E^@p(fn<6(NdLksWv`eH7|r*+#<`MKUHs;
zWoc?jq*kc65ka>n*Zw8Q78$P@Mrjo5)Z61NntZxd9?qt$qO{i15JVDp;r8)-rC2Do
zv@18-;nHzi?af07MQG?-iws-%rD$!;m=K0h->Egz)18^K5X$0Yf7{@2;CX&-`|-+1
zOHN)oT$39R(AVwtqVN}RBW2m3EYw#|Rz#-fJP)T!v0Y>ho0*u5jgRklW>8|PQJc+#
zz7aWh#l_<;$?d_!mtN&q9SlczdcD{_tf{3ukRc&8yL7du){$qOmxb=p<MCnqv0k%V
ze9hb_(ahqZ+}%Zfeu)^7K)%rvIU(;Ab5+SN!-_BM?ve4eMO$VeltI|+;P0*k9V0~9
zNiHF9Xihtqn=Y^68c@gUQaAmCojVxRZ?n(WYFz~s0Ua){mo%iL#k75tTOZGTb$pvp
z-s<eCl8~Ahd^6CT2DXv}-ceoT67UW=K61sh-)`NSceS0aV#)|23=C2I+5T~$!+Ok8
zVKFfJNp%cZP=)Y1nxDq4t93st^-ZZBDY9Z1tc@jAAMu=-9L<f<WTrw9RI&y?^5yA9
z@?rTYf6rN-AxRF#Y&ToFddtHZLF}fa7bKF@C=he?%IJPlDP{5%x^k0KdW#CKn#Nz&
zk+sK$2M6n3NUEuYSQ1SmY<{&80pmy_QlV>daL2}+kaTj$dz{xF7U=U8Kra`H$M!fu
z>{$AH={xiYpryvf%4=z1#giwfX{}03H_f9ifLw}QFV%T8n9mgeV2P1z-S3|1Ov;|_
z771i`?V>-tB}?oR8?DrKPHhH8Mt_-fI$dewdp<$+4a*{4pC1V>2`133zKGXd7LG2U
zmSviZnbPSKGad(XJ&}jin05EC_>=86NBidc9@kdpHzFgmJTm?)aQfGDfCnko9%HuZ
zbmJv!Zot2X1zP!IKEc(MfHpQ$z3QZzpMq_=H-Ey47>n08v)|~RPi1g<Jy(T?hZ9uP
z)I$=pv!~jf-CB$5f;>cNBX9op1Dw#|a*?6{sxSYMo)66Rq==lHbk+hfc5{<Pw{yFD
zpWl~=9?uwc+FV_Ox=2&1xUyhsnq~jNzI-Cz`-O8YT|g~1BGtvoIzmER2u?~dN?NCE
zZGxI!Hlat|Ha_8p23}@n0IBhE!1H0v{p^?(O|B~?C`A&wYLou=Vx<-v8=Jj_*AP&s
z!NL8&MjrXJXJ@8VZ2GwxwU&6oGg9HFKR)Sd&9hI-Glio#{s91oiCDytDMYZF&|ZZ<
zpRaeL&=k2K=|ozi6v*Y42B=OO_Wbg^dp&cXZx69JoPg=cm1<luRZ_FPOR*Yfd5g~?
zw{z~dcXVucfUM{;u`|QIvYE*A|L`Fpd7Z={iXQ@5fEFXcQJ&RH`0}Byj{cpk<4X0f
zYJ%pcfo^*@E+|{Skxp0Ey`d|#P7hi#@P*ax=)vb+j4*1YR{!OE2}ack^{at08xX#B
zc){;H6B7nW5{eeo>NU<zPL;9;%3A$;SK@skLk)YJL-C;fwJ&YHTG2NI8%i!ADJf`*
zvjxy>Lo&;mBM1-}+H|Wv&0%8&i55RGDf062JKbLF<~Q$FC8zrdczf+c#!5Sc!tDp~
zBm&_5D8n%xAeA!8X}KFwA4x$C&ZW2MioOr_0)L@E!hfL%5YJ<das?XN{4r2@yCccV
z3F<o&KEl)jl4S8`h$Ff86^ST6|1;TDa2Q%JF05c|ZdpT01|_-)G?gTRy~*U#`C6Jv
z_6J9zU^`fGZ>63~)&*idf-le(n7fy)(MBKpHu}%diRqOYkN}1(GEWIVs(0Y<!}~v2
z^O6j{b0g;80w8Qw1b`zT@8Dm!l|Vo&DnLN?AhV!^RlJb_*#5^La;4z)|3rPZTNCKl
z03U>i&*bn1w-vg<acIKXHr>*0<fGP_ov}kQT3RV14LN%`Z^StLXu7^`K6Sd0Wze)c
zKHrMg$_8&unPNLL%r3ydA;^0;Jnb08i}PfuZ=}WQUv1rO!HIp(!2Mc@)rB-oDdM{7
z>^2fz8wq{)1UJ~r-*9yY)91&=!wac@@#7yncf7xcL&0PBc#fVZ#qiLFHLL0H`3fw3
za4b~q<PJMP7;7gPbvrS;3z#@L#g4_z{~~mB(m}g}RVd9w92^>YczDSCOV9WI{E`W7
zxgz|#tYAEf$Q!kmFy+Gc8nbKrT=}%r*&%q0YJj#k%T4T#yZg%)qb2fPlOmtc;c{NR
zJ;pZpYz3O>8u@KfApuf)Eb*d=>|p}_It2<zxaZ;flEX}lv4_HV#6thTcGt>V&dqVA
z(>x+QM|7{*$~s*7(3Fd2vPVM;!|Uo;SBKm+o;-)}XT_w<DH{Lo?(Y2j{7D`bD_TuS
ziAcuQ-tu;J7Sx#}EFe#Yn3|jK>?$nOgX*YlmqU)@U;-NUqfwy0J&A{h$&@?9=0pTb
z=RG3kEI0MfDl!q-^{_e7tX7Z=;@=NR@UgHzOeH+=0#Rh=evy>V6Y?Hpud2VcP)xgj
zRQcssF?{a);6tEt+*6DB78^bNtE#N?CJHQd3+&pZ<5JMmNy?6JcfXIzh2?vbyq!oL
zZ0S9MO1v#n?_PlA^U{3G@XTi^M<Ln&WRidfprLzF*<=6oe~mWHIUM{USr!$aJ^$V`
zV+7wUqd&cHMl!VJA%h|U=RzLb)$W}zG$nJAL1s#S{~V6@HKO4#3*HExZEMTt_6#|S
zak4NVmgnG4IIFhkvAAGV?;XknnWYS+{D@qDN4QDCcqUtWz}zN>+i`BOXGo7qF;sTo
zE4C@d-~YFbSxmw$p>78ZEdu({uCI0fU8B>A)Cf+xx3L3?zXd*MJ|Ab<$Z--%KTI_&
zL3DN6WE65fs-%Syzkh3Ph`{aL_<)LQ(p;AGhQFW!4)*2El!N#pq*^`@?hck_PxyS!
z0vDw!r1RY!7JwWkbylZAI@Dh4D5YuB*=90GBW#w*!!DP_q3D|}m(djejM2i!$RaK-
z0_&`z94|B?og=5voxy_0D5np@%(~or>c&x1R|RnvSA$#<`J4Diw{0U;28F}^(|i8l
zjFLW3k>hmIArJ#ny|ljdku=<%Zs)|QjRHcSrT=9xcnEF5w>#a3_j2lW&j-*g4Gj$y
z*AbvhJU@p{E`R%l1cF<dK~Li+RqnMWm8j-adcgk?BP>pw*nXt!NT+YAizYBAARdu6
z{<=psHI7biXP$phquco}5O|Rpi<><xrX2(V-oFg!ADkWuDQUCq7M2K@dDbQe#bW0D
zTHC{pkvx~VYLm5<)us55hiV;*VzVpv-dIrq9FJudA<&~(Gj90jr#B-bWBwr<fTOfp
zsd4x{Af=v1)#Kyom3V5dsksIq(57>EvCl#CJU0pAt2C-Wvi?bMZp6XL@2m9V$~P;T
zU^9)n^^`K?O_Bdh^Q0=5O>kwbzfotM_nLvY0hW`Ksi~>pdos71lqvHhRTY=^c!?l4
z&@NV4rY+y|=#eLgLB~{3x$Q4|KUMZY977;RYM31x97UM6UuA6UkcRfaIEDlSh8mNc
z_WMPsA@Z0w(=Yc*!+g&_*lpR?dmWb|{$6gYJ5mQv0jOPPCsDImzx$+q-m=>i#Kgeg
z`+!^wm5~O`VIkq*h8)4NE6VtLBHA?o*lK-E>|xbZ#*{K$W>3;%WWhP<%A!pEADkc|
zaU&0Rr-GoM!S!bQ^z?K>f=iEKQ;)Bvi+-bNbZKenF8cA|p^78hpNP%J0XH3T1id<w
z;@E|nqJCsNVz{t5!*&BqeEgjrA1|`Rc7wl+?+00b7^F6uG_w;D8a<!R0YurYPD--L
zngac@@B8!CLMu#KTY)WU5lfpj^W>Q?5*HW$D)<w4+Z_ZT<G_OFTEwbQrDRzz?AG4h
z?hWJ%hdScK{zZa~dR(dGZS$7caKCO{z5IL$_#&fwb$tjLH-B)VgE#%THW0=6=UgQt
zBLkSg$UZtk|M&Xxb2#A;SWb`o!z@lG1^4^;YB88)trueDB9`v!o(&#do3FPZuaj=W
z1R;H$a2FP<6NZP3kG073AyK;<+(xe6RDywYX0!YI`x^aTJ-Zp5YQ>}{WiE2RpGjQh
ziK}x1P%(|ovWNQKbt5+HEa;K5YN0`a5tnbpQ;yi`y-f#jg9f4ktZwnrEzXg#0A?N~
z^xKygc4p>-&&Bz9p_%jt+enocmq4AEFhvCgf~0x^0pauLEW6qw7EjjE50wd`hC?E;
zXl5i4{yf;JH35DA0P_2Mt-r+RIhS#;EbHu33!M#2xFpDEn)^nelx9f=u-@c~hnv$g
zTHG?u)EfhY>+TMB4R-t1Ha0I;8xSt7l8%LrhH+zu_fK|Db0brodF?x?M(_?j`+y$~
zND4aDx>VZAfds>2sP8ET2D^}~aJFd~?-A0{c$^w76UChe;dW!BUh=z*C#l*vV!!q^
z=8nA8H9ucg6Va6REp<JA&~KIg4JP}&TwEAS^-DyyfZ2<KcT2OL*Ni6K4`svnh|B&x
z*E`v4bLK6kFuRMYo5URAPc}c4MORhz)x#+>I=!2Ec@93zo58p39Vh+}XGq^x=sAkN
zJuBV~r=?y(jVEOH$uaF3r=#)xk;ss`E?~wH^_7{TmPE;(JujV#tD!~+LXN-hhRZ*^
z_=_#dX*GA3)kEWG&geZ9N{7J~;bS_$?e@uED0XYObUqya?Y+LZn6x2W7V6PfV2J#I
zcL<f1FY9M%L1~B21y~uL6H1Ig#O#T1A6diP_I<(NL><a5;4bZ|G@UF?rZbbPWOYk1
zxSUS`%#OmJn8es`7As>%nr*cwd)5YRCT8YZopvUN{lRx?H!7Rbo#L$t)0$1Umhr(b
zR2)NCY_`kquMY(WYy@`DE0?XFEys^%aCf+CuzJpF-@$5T3$2TAk2W69yw{ZvR5BPI
zVZ570W}@RgxeOMl6?ZW)A^d#;KHWoCYz6lp<<Ezo<d3NAR$fO}ESX2yBcIHvhb;P<
zOJ)34<0}HHOgRacB%x%R3eUplHj4s==gC^sfuhap6Hq+<Sw%$>DlRQ~adDF-kG>0_
zj3UFBhHnGn^5#pvtov?(<KyG3bJZ~(9i(8}FpSssP(cmc$$_l~lZp4I3r_xp`T6hr
zeEm-YCL-|XQaejDc|IR<x{Ha3e+;^?xj6&=@9PEa0bvGP`jgk^wf(P7eK~Bpq@uV&
zFbdwLlI7MnYa7F1Czg}RWv};6==8ar6r{V|&<~DO@`xvE<RtAUog8Pn<j+n-EAS;Y
zK7*a-buRqt4HxOWN#l4nxY*tgDEX0zlqiC_ou8W#5?1Uo6>LY=noFGz!OPR^wdyZb
zJaB~!;Zo|qY!{)tb5QGka`zs*!O4|ULRQF2wFlofwpnYgmxcnTzDEh`=by;<f`r7e
z(nUa}n1Ib4O=!DkoCH@+>F7|Vk@toClm-367@c3|Azr!^+>=LC(L13?bSoEuc!Q^3
z<M>@>D}KdL_ea%b6_G1ZfdpXpkIeTuM=cLr^#dyg7vJQ{UoGDd4eK4mk=dYRWf_&x
zkdBVjES;BXZKe*n!Qkh3w2AzC6J)$g6pqTsf2j+X-l~O6Yx@)s7oI953JdTL3(#&v
z@=NhI;Bh4cR-(#dKL^)~1=P`j7&rum_UdZ3JRbMPvw4CdVE4Ym_F23gEYIKDW~$t^
z(h&19zF!HcskPklZWqhdql1pl&dv@Fd)EvSFxYES{6*gH#B_TwYG)3<+uSFj9FKN8
zJy+Y^-0r}%so36vlNDu<zP`R$$w=|6HxE^dS27gx8CT?b^h?j{h909wjMt0DK73Sj
z9^Vaw`=q3#nwpw2WWQDUL=X(6q-o~>X?%56<GKcT5KdivC6!)(8$b?hwmac?c4CJ)
z94qt6%-|Ez%ROr9a<`dWK5nykblLDV8&Sr$iO1sg^z=lc(FR7qY$Bp$rF+lMGx4FG
zMzEr3jndnDTnc}R4F1d3ZF!=9^sT7X?{(}yVh<iYRX7&En=BO?+&ypHXZ8KVhiYG_
zHaat-%_K0JT2ZF1BB-c%EAy)Tfcny^RK7Iou~;^j!X8M?WyLEnePS_uW`|jV<>=>(
zVrVpkxgLX4OnE=Z#(`ep8+j0-0RBr!Sqbayx1+FHlqJ83YiZZN*XbW)zOABm?&rjX
z(;DRPab%(egq{4c3zTo?825q_UV~qW!B5%z#gRpniytYY(PTl@sl5~xZYT_lJ5cR|
z-W3kxN(Xe=@3^f<{5+o*FoK>|C;5pJ{V<a$LSrE8Sgdis3oMX!|E$FK^_N*=n-4t#
zwLW}u2WOMDkcZ_L6(wb4K*vA;{v-!tpWkR;FDjozQCnMD=RULpp6y%3X1AAHQt#F1
zFOLN->83fslIijC4@nZFA{_UWXF*8WsjsKPstIWol_t!UA<;;*r&dmqDnkg{TZmw(
z2VbgFfv0)F$AbR;cW)lAUboXz%=@HQNiv7`l2_+x@TiZ*GIein9+s0l_6nPVq$Or1
zO_d}$L=e^iENZ=(ch1UBp3UN~G3*;u%CH56wxTu$YHDdUwG<9k<I%VdU#)x=yE+9(
zdtrjxFRV;v1h<4qw46u1nY2yTC^K(idm~t&O>?GQ?uk)z!XL_d!<$bA+=mX|A^<fI
zi_ec&fn{G9pvWs-NM(p<B}$LW<{$qf(<~f&NHYI@yD}{-ngA_*yHw04>!AJOQ8m*T
zaCv9VJ-~_AM;;FIaDU&1{u7*2OclKcKyMF|%bGSW%qPJHv58~S9TaF}HE7CP>BubB
zUw(TMh}hFTg!A^PoPc45E+WEdIQzL=YrMZ3<k6}9kZ&i>#z%p`9gb!xr%PqC_n951
zC4qtpKfezUO6EE9n_YBn876tLfZ9MP!8j(%k_gvXma^haS8K7AfPzU>&rE$dSS2A1
zpO6eEiI6#Oq>cDm$*T+pB{K*&dsIE*z9CmW^1Q)7aeHqu4g0G8jOpwwrSW=hjK?0}
zc^GuX!-zC6DWM0fSyY%?{I@Sv9Ua7CP6xvPHH?LoRWVPNoVI$KSCo4SgWt(AS=|Al
za&2wR!`)rQLsC>s%SK_GB6H-9cJ|i*j_X0+0gr__2`mx_7_jm<nt{UQ&q^YJ;h$GJ
zie58gS*<98NI~vq9b0!VFM7>}papnB)~l~w+VpAeRBi@FFQLlZANU)&$pKN0Zi|p<
zP;O_*TD@+)USHj16sUi2K=y?e|3cu1J^^cAM939cxs*{FODG0p$xk-%XQFzoo}ela
zi<2KQPjBXKIJyO~QwnS{&}><Iy^&sUF8cqB7tm?)tm1>Cs$60Ip8r8af9xdzfOt;F
zme&L9kL{PQTq-vk2jY)y9qTb)boT@1I6|<~k<bU7rUq(Gxw%UiqR<TklsD%1w-Dc2
z9K&-PuEsohRfsH7j?5;r#Nh1;I}CC5vz>6EqWPk_!2>P~WAz4=y~^3_p{S@)9SxKm
zbe5=1kSvk2Q~f^gQ$r8H1S5jXDzzz2(Q>N@IzB~BPRi_p1MEh&K!9Qe$RArt>9yd%
zXgI1IioGqB%hqCdEf97#OF-o^E33kdes2b@43idfO8fn4qZJ|(U|{kCnRU*qso5rY
zp{1n!1!yIY9S8`};w;dnKOH3yP!gc=K_DQ13?Lvx5Fj8!ARw1;*Q@oRp`l-lKtQt?
z{w4k)>%S`#>Y)k1^Y-Q-Ar}63>g*^@8TcG(L_|b+dAR^AkiaU|!1A)1jMTQvM@ffQ
zpfk8~j@BMH`ZKle^Q9h`Bk+n4h5P;C*uMafU;$VFz2wZ|k?UHbO^w2k$@KkNe1QVH
zfXxP&%B@s&pTE8pRhrg3;{rEw<{VOa{EpTdqq6(!l%~>jZ#TKd9FD<r9A73e6lDLO
z6Y*<1eo*o)82jV>RYn7Dwa3F#JatKF`;9&{T68vS<d)U5ESX@+VES1&!VhI=PdB&1
z3<-l^g5s^+bJeh7>NU+QzzedNJAArOiXbY&bH7%Y7uCNP75SZ7^YNtC;GP`OruXlU
z3rkvaRJL{Z2cvUV5d~gCg1k=Hd|^k<-5eDIN01O%YD2g|lRK+}+QO=5MNpWe9wW3L
zwP9XTR3JhP#1|gzB96!7!J8F(_&2vRx*{d+wY1EGbePY3NbVPM#~R*{Q_ip1*ZACA
zdP2fEk|t%;!LXPZGT^O2^L=OU^0i<)->0~|e1BEr@&0KhVN#{W-p;d^x7f0A6=YY|
zZu>x<?rw^lMi~h2k3YvZW}aE9RmnbzN?S=EXjqp?DvYv{u#Mc<AN^M$Wt$u*#PSD5
z`m*YS>?bK#uQx^$V5?DH6gZkPqBH&m2?|$O{PXkk8RKT~fm}ORXC~ZA=&32REgpvc
zOdozOXDvSt%*ge?<Ov)(%;`i!*<%ElNHQf+s5h(Uf!t<6t-ZvY?WjkeYf^umB*i9O
z<%;-xxn0Vf{6h%E-~>yI>UaR=0gh$IND^=$h4Q%@j7-*(_kF$=v$OlF4Oqq>Z-=v8
z!h;;1$o*Nd%ALF);G5H+TGln!tG5XZ6wG?JWr9#<Qkjw}u~@Ju_tqe5+l)DL$KpvL
zQwu>S;*hkWNu^e5U^3T6wz6ZcbAK$N%U%8@DSp3$yIOvwN}1zzz2k`j|Ekrc;IsO@
zV_gcOOb_SL%rNo=719PTIM1gm8YJSL)mmeZe=oap={wskzYE*HY`jvFjGa>vEkdx2
z>@-q#!=<IA^T(GF9$h+AsJnyG@|qK)44AImaynn;4-Z#X@WjUW4=kW96V8dv%6d#0
z=X=RWBq?RhQ{9-a54a7*-3)sSv<Eg`^oEU@$1_CN<bF3d+ywnxquAIEmKjH;-&0VH
zlE&vTSB(8V^+{*!dVwYT`#b&NOKxqAO+#;7_4-ky7k(O!v|UGQ4LU2G(XR?tj$HPE
zFyuwK%RC_xW4u6>e-#>}-DFAM5`NZtR<KxA(pToettT=fLc&-PWz>X0m;AhJim&2*
z`q9=ri=`L<y$<3ZWU5_Czp~L+tvR1JGC<XU>W<}M9UP!;%uL3eSEeMm2TZOI_}6;R
zPI?DA3)A|h+iqC%dgUw~$_pTeT*KN-4ERP7W<SIG3`E6UYXik^duA8Age=X9=pxc+
z6q(9{!WqjO!cb2ySJLDOx=LV7C7IGoWQgKLOdg?`Zwk|F^c%%4v3Tr%HmdJ9FV>|R
zHL60_hJ=em8B7TovB-$lDmh+M<tU{igrKwM`*#1MF0a8XQo|-3*bfRLt<}A?wuDqE
zciL)=GYVejBr@N)WHT|8l;%5cOddqAC{m^>1*-(au`m}{naB+n7Z>w9S#=LfU!_|#
zElf;MUzPB`J{5Jiikn4xVELB9hWaUpUW=f6@!N%qjv^cI!^UHT_)WMEzJ`!du4Dgb
zI&N8qO>BfI7;~*LMEb>vu3y6YMAe6~rqErQG3Tx|h1d7L<nM-8J4>c8WNaq|XgHjY
z3gUypa{H2**Muu#8<Sd(kRMlDQ6H^))}I6v@~J)C=yI}p6Y&YpOuUFlB?aGv!$i`S
z?9m<<*Pf;URQ>0V`$6^>RWEpA#iGDz9CoJkNd5giy-dcMVWFt5%SZWm17NZcPO>SW
zMsyA2Un<oea=4q!TgFm$C<Gb-tzVI}v+;kx5W*&qsKatWsc)q(aWekK)=wzmpF{ft
z^+DVB{PGtI9hQq~nBgf?!Dt2oVs!si1$g}q?CJ5<(7+gl0vddUdAZ&yoHHjN2P_(w
z6avZwA>rX;>CD8zW>{D7Fdw780YnadmQvzP3wwt7SD43Cs<gPis_=})H?Q69ALHt6
zMZ)X-#d76*+kjznXT;(KpB$)nxWp|5sEI(HGH8FmJ=dohaQm*xvIrtzL<k-(!o<o7
z2MLKNrFNQ@lS4;N9tu3q#MF7M_Udz)`9{SXOXe`*e7%WzBv85l0}Y)g1XLj&b*H_f
zr@!%+T&d=uT0d7zk>XQ52M+6y<GFA8-i)&|D9aQC%yc@7({U+%y$)DVPcH|uiu10?
zn~7RiDfTbF7(RB87XjukmUhi7cWT$Xy1KYgDIJ~7FFp-50ocBOQ_#>Znv_PlDwzu^
z9f2uLSJBQhRz5##$jWgVhq5H26sGAddM#a*7+fEGy-7n{|FTgT!4PvJ>8T<r4(v`k
zL+AynR>OhKrn3Y=4=%XGhdv7`C*^rOr-|=J=4&E`Fy%Q1cX?ok^#qh;WU8L0+4I1V
zgbSW(&`+K^;jdm8Bj?5@6FwH>#stH2?>{0G^>VpOS&hIW?tGKIxD)HMd|V3>B&3{!
zl<>FiQMEK~0R30&LlC(;EngSq`8+{#w`FmE_m=C@9`G45=%Ona86JMoXseY~!af}^
z(xQyM3DUH1MhH2@EiL+ddm@q7C)OnWUVs3LI{)$+Z!AFOpwR?k1yWF-cC!fgBKAo4
z{d%W;_MmfrU#e6Wh67qeWyC^oz1f}`M|Cztb!<RN4l<fREw$amJ?Jt;TqGwDGa9Tj
z7zoPT1E`Mk35JGn9Mrm-ULMa>&3^Qv`=vwGU3}@(q+ITaA__9FDr5v90vhf2S(zKS
zOzBnC)mO*IVvFl_3={7jjFgm=eZNl*4{xJiJXIt>_ZMJHnqJIO8G08azx>>svbEbN
zsOV>(Q`l9A3B*t_xE7zz7VHL_dbc`h-EM5tTGu<>ZmzvJg(ekrbFTLz=_MGuBY~gQ
z+0`-s#aKEFayWtGdoC<3MMp(lZfc$_lum7$v0Jw_csL<u%gxM;r1E>+{%c_DT2fPw
zr4>$vNyAvi`L(EHBqAbm8@<yu=XfwI_M)Z@+^aI+0<H#>QTb3^Qu2H-61&!DmdoRj
zAVMVBq<`gV3!wz&%KotK-Qj2z`t;~jJ9D|tF>!_{^?5k`q1^dQqgSzJ6euip=_ON&
zZMGmn0F{@H2%_mADk>`SI$taefPDOjeI3aOk-pRB#MsEGoLqlg+&t>%`&YtH_3RQ;
zpB4BHP^FS_t{Xk3hvQqgPtI-A?#$pedwf0$3Y%{t-zhwi6g5RvfBh~c^*tb?l?|dR
zEK@qrE+rr83*ny1!Wgusj^d?bG#m^AuvB`|{bFH04cJVx2L^Y+zK4P^UX+KsjkCDX
zD!PA>Q)6$$&@@wid<Y}QxO{fK_4L-f`=X*?Jv`pf>FuQ!XLQQHVk1H2Cm$$PGv&18
zd}pk@tv33c%ie_uLI5USM7uG)RK-_<E<Ys2j*~*63Kji9=*NYR7dDp3eT+e9lvS@U
zUT#CEce91uh_}=!vZTbsN_p;xo2;6MTWUtM1q+1aW(OqU)7+Njvk&)gJdfR9nnl0)
z=~!1E9RpE;ZQ~n<ib4mUuO?FIL*Lg~WXj82N=vTb@p-Cqdw+myT4p26ay+^PMMMT4
z>hm^a>eFNt73bbnkWM8go8%=W`xr=Nc4-~sm{{1@T3TDh_c)!7Uq3#Chin!HXSbQW
z>TZ4yLQjd>t?NFvCoA4N<JR1LO!lJtIJ<xK$~ZVQv*{DVn9r(XbD`D*DX6HR;NkHZ
z9#Yiqz>6hoAJSk7G#V$_Pj&%Bgd7eBU@F@M`yRLJODy;7%S#@~2{E{h{inM@iz+I^
zJhUXCd<KjtG^JU9`F>?#fw&(>4YEO0&eav|BW=o<i8GfP7>k?nGm&@JL5X_3h$NQj
zcQF-rgbbZk1pvZ8NoHndBIs^UY74?s$FVlwAldn;h-x1ac*XL<U<iAHQ&}x6U@QU8
z{f(h8u(0sh1)45c_g-H=tmEZSyd7i%DN|$EomB}luspvw>McWUmZ|QYFmpvGS7%Q|
z;~u7=T&U=XmJq#lVX|5ATTUBGtQar$7epo=R84YLS1|!y{NN%SK3j;?lre@4)0*Tq
zq*AIlj&YNu)pC8F<@2TmHZq#ATCraG@Oss2)bu-d3>wG|IWMxzDyue|2_D*zJMnE0
zPq|byYkZsj9$A*pZGH2Wbe;X5toF^wn$~f(zYFhZ=O}4&AXC(QJC@hu6lHVMZ`<l(
zDur4kc2wG>!!VOKu{&Ks<gP7J2mGrZxHTqcEnY$m<k{NY@&{h39bQmsDmtCdIPj|T
z;Op3i4jV?iYW0V3Xx;%N<qmlu<Rdn5{SI>ZryAWbj2c12@6J~~H1hU25j4w-(@N~-
zcDHON!ch2cmqv4m3(M$wb4kpHSQ_1L26<{-Wc>r>h6hP!83Zm)`|AgC_Gx&WP<DC6
zRykTsX71;;#OE*ag51q2AQwga>DKl)|NHnbRCn^Id{<$%!liV3q!%{eg}LgVP?RiG
z)ed8j=;K{+Hfr;Ev2~+ci9l3;a00QTxcK^+F-jah@7JwPH_rH{2Meoaw7EMSHpfa)
zHKUT6)&rG^PW1c+HUmQiisgk8S$;1x{LZQIij~?ofVfp}xm59=8{F>)Z=eIp*8o7a
z0OFWy`DB4e?XqcB&-W56Tjwb)8@k&XSOb8x0G>VlZci}O*1d2+?{zH|6%uM{WgQ)+
zKmNmo;wBS}xcuLv+uu5-rr*a?nJd-Wp?gNIPhS>Rj*gB1HZlD6a_zNc*600s<AoqK
zjC0S>uHJH{!C}2HyVmD7VKg7x>HWnaYraRQdbIATBLr#PwR}Ad?bP%%SqRqA;o<zk
z!o_<!I*o??<Ef&MQ5BZR(PXS6)AFA0wA|HE|2ByipAjCvZx-`$b#XCRa|@CMEFi7}
zfw#jeoldl-v%RVe^aV6EJqAMHu-A50kgNGZQ6T|CJ{e76sA#4J<rEl#qhyVo;3yKf
zU(gGl-|PLRgfN4H_mn=ahBCRYK@^gPKlG5r8%rXJ@SBuR^5yZdsku4$eI3U;%JVNX
zHiiAsI4V_g#}DPrXJtW>dYQ-c*R#evch1nDI>H&pDl9JMkf=%pMIVKQg~cA<(sH#n
z6s^{_ohwh!%#Ay)tDdyqyZ%F=?;T(1RJwj<)SZieN`qh*@#1=l1guIpWFg_GQ+xc?
z_NM#_NhiiebfZ?2bzZ;319y!y?Xo(D4hKL(=zei}E8{NKJEbTOsI?a+I(Vwuq3~{6
zm6JDw@<J<|)$HTlpa-NipO5FGE!)9}hLI0merW2KEXmp$!{J7cZ6r$@X%g^KAwo68
z$e%S#x<V=V-aRJ2{G#5W_qf)zVBVrPzjVNsBa4EiSM$c}<7dYv(1Ks=)OD86X<#0l
zhH{Ph8NiGTJ78EywfhQJ^>_r04-RWF25&p)hK3r{UvaF%6&tmx98j@B`C$>#(C&r%
z{lF9RRACOr-I{v$l_n6Cr(jfa-SwNJVGo$J`XTmBp<8u&fvI8!ct0iw`}{q1*kq~)
zLZ(S5E4x(Ug-11lUXmSZRggLsy|*0OE4H?8#uvyit!4887(&DQ#5-t>Abj!@@AABX
zFND2Gkm~)=Rq$kFmS>pgMXn+NN@&M_SZ0V{({3UMhkR6-BcxZN@rVkI3bGq~Li{4a
z_Bm=pfjzDyyi`<HDuV%aGk83_i9ZOJ99H+`y=-c(q8&0Zf;SJkU7eBi3Ze9Nux&|L
z(p0Q5%NJC^l#`Q`LH-WLLIbp5|05i$0s%P>(nTc|xsbsjFK)+vi7($a%QYLKHs(6f
z^9{Anc?`>Na+D!5=8mV7>PW>;K-p5`&zp+MxB*zddGCz?Ea8Bsf7Z-G)WQyv4C~>z
z*I{5Gn^W<tVMaDHSB*x^M{A`&y}35Cnma#jz5>`*l=m1ARjYW>T(>@L>I@|vRn@iX
zXiUEelH&54f%76N?r(TYqcpacL$uA92J=c6+%Q6&)@Bx`vp*dxh5eytZ$3=wrsdbt
z-Pca)eQ7fNRa{^+xu+yy4_ehw=rxfcnqphECo43R<>1ukO}Rj`EMbBLqn^CuZSh%7
z1M5^&^b9?XO2-Sb8J*GmRhU=A@f<q3EhM}NWcs2o`qeqP`956GFQ_fnYmTS0gx6_#
zc?3Bu1<oD2neiKEl3K?nLMAqIg95H7TrtWiSZ-&FHyqw7+>eedCj;g^v$M0OYmFz(
zRx43aQCw~bk@QzpoJP?hD15#zTSmUxorQib?CnAH8miAo&vS+1@gZxicDZ#SDS)h`
z?z_&}0p_*?)E7bTF*N82nS5ZPQdD2>U~PT5(Q21J9{BKuPGuO}(cvj11dKF2GsDip
zB12cyn(OVJ1^YEbQ%oH*R~QRNHAP7`245hFoy>6Ozyff2NTRe~=r7pOo~x&QJ<Yc{
zoUWT#=lKH`;Xwun1KyUPAQ3%1JWr}9t3`P+v0+k@L`d?()V67j5kqgRn)}nS5th^u
zZy7Q#uMfAIZJ(d-?9qI{8w|0(9WaAsumHEs>P>MrK<e<hKa@Ol-GE)b^?khBP5rcm
zJ>E!zl|5oo;+>fMPaz3{;~v!?`yTI}VJ8X+4TVs<Ku9hqEG(7H9LD{PgCn!K5Ac{(
zYc)G^eJ43d$z}~39#>ATu}E!PgdgdHukP8M!LS~N%O^cPJ|Z%jvtZS`qB-{DUjs8%
zfSXyE^<Rlq{mG=Z?&P7sc5`iXpw9gh{83EW%bnqx;_)ej3j~1C2;;W^;fC34`g5WD
zp9@TMjtRcILoGFF_36R=Yptpe5;X^U|2?NblvX}bEB2b=5wSiPe*be#MIm`QqY=1%
z&)pSc<*34^otFB99sYkQ#S+X|+#tgVRQ?Ch{IIZYkA^KP!Z`(EVM|?I9YOptbXuBF
zu@>4r{-G9Ap4w2qUSm3cTxgxKf7`C6DyMh6y+ze}Qf6{#Juat|@?p3QgY#>PiXo&G
zxZ+lHNqAToB;!Nz-!qKr(9E3Fvi|v(nNrsaxA@zVxGjwTtHFuN$UnojHv;y@>?izh
zuMF5z&(IqNEi^z|gGp>9Qp>9|)z`mP4nrN>KBVv-+?g0O-HtVCsUD^zIj*Ek?a#;+
zh!Tskbb;=in)|%n6Ml2=xL;?=&FSyFCpWXZ2Q!uCj29*s?gL@OV&viF6$Mu{%FCvw
zehswy!3GMOW7+NUd1;{zME<LR@CzDXs`#vTB;`Xe$>iIjjPjdV{a>w&WMIfcLPBD^
zSf;3@rBzphf>?N5wtOti@As{urdG4oe02By<s~aSMdieexEHM<SVcWljpK>)$@soX
zK;%sj_{4k-BKb1&?f_89#4uYfN;djMhetwCkoGiTp~Cti0Yti*6;5;{ib!&G^49(J
zmZ+^^68?{vt(%3f3SnY%7k_(p+h{B|^+r@bdt-FOo2YtvuKQz__+f0ZE~=tTzphtn
zbZ8h39*=uoX{oq*lqsirMaodjI?^nLs-R$_7MNBpnzs@%Q=AIL{(6$5s$geg;PVpk
zB|(o9<6W^%%>f?#hUTt?KP4##G&J<!;9#2(%`!gn=e=i*%@xbKdK0FH$LeFLT6+={
zP*@-S;Bz#FS4?$T8N~}tw?J!gqMEx8<a(in@ednq?^G#utE<Kg?BwH)f!#CxHZp`~
zZ7WYxdYvu6)sOu9_r<#fm>HOm9^T{ukl`YhGuPI_LgEJBe>~G*1XC&7e?^#1D~We3
zF|xOxC*}Z~Qxwi1;xh!ErOQj)(xzq953uacHgD|`vx*-nD1lgFP;6{$axyA3Akox*
zYIgo=G7)UoJ17RHy)2GXj;P1b)E5p{28{f1U0|anR|+gDNZ8zC-xZ1V`dJTaoLebT
zZ0W0>_9iu$ZEyV8mFr(+81v~8l>X&^wb9F0;6RL6;7eL^RNu-hB?}Oi0w6>@JUa67
z2w{q~Y^qCT@YL5hI;`XKN9TAOP|rE1<a1N1Sy=HxSLI~l<RN7qbn%Ia6O)sm-6~K3
zh$IGjz0<3oLpU)q<JfzPpT2!HXi6RV1o7!s!=l}ti3s}ar3Hjo*!OSrTPpeb_Ev7T
z{cjDx2EC;th3lyx<;SW28or-CBx#I<I<2};Do?KQ3o(xHa!LMM>$myM6OifNM^jxq
z+J!K%dK0tL05>vcAcg+10BpdllLV`&K0hpUTzveVzQ8>0EaTaRdTAX|W?#g1tWy2X
zmQE{r`LJ}<;EbMr{g?+rtCQdNrw6<+s5dw>bECg<sZ{!BFOg@xr|(X-cxG-IBEESO
z^vWo-KChgg9Zvnrz<GTGx1A|+;I8(uKvV@G6;;PgeBQ%YTsbkZ@(>GluGwqP9Vm4j
zpd;SeBpa1HrtK-OF6<o`PS|qf;o)KYzx01JC_@y-lTcwgZ!7;)XZHfcwuG4Z#3T|r
za?%6}jsR(mw6688&)m}3p`xl*ejZwbkM>r~8^dYWGY;bVGV7u&%>=0uN;ZoH^m^cr
z?iUb4Fmc4>R8GV~A?wZ@0(ZmAZxm|N0lzB?OH0eu9}mHEO~xTjlLkVA_TtGQZL^r=
z$8QhfGbu~HK4?%Zs7A(xw(eY$GP1WVn*bO1Em^PdqIHyO*azQtsd8>qCPpsIsOA(|
zSaET2b#-qWPfE@qIOb;doM_}js$)B~JfQT=xMYcv0o$Ck;OeytSsV8Nn}6<F>Z#e&
z81Dmh&A3R~)*3XQ)M_yu|6l~Spl}0FX;U!(HHVp<XKXY;T^-=I8S4;7Mxy2s3p42K
zLG&{H`R#s}R=Kl3^@}24An9w!A~e0Y+-BIzY+JkTbwwT?KMw+G);p|ElRer#4`AeP
zeHAh6)*3-iDl6>JYiVT<(Ww6s9!HiX-GownHw%@|1(;i=arr4;PZog)#FT-}ipiXu
z9sqh)bvH)qwARVllv7PCW)^QFX8Yd%tFeeyXTIhWUfe(${EFQ}cyfv!#1#SgLLeB3
zMHqizW@g5FOOS}?&n0Y>)r|H3^*!?Ft<2)!c1Hih)ya`h;&M3$MFpOP4G+j;A5yv<
z+*Jh90%fdVRUe-+4LWM<Q~&{rPQ?Hi(o)V@+h>`MU7}e9NUO?@TSboYm{F<JWQ#OA
z4biVdKN!@Zfy@I333`Ub#L0fA5CH=98@vwqKeiPB#C`7e!Tv|R0=C1!ar*&SSRih|
z1yzH$zFI>0&>fv03BMLQHqc;!2F+<G{=-{!9h)==fFqIr@v9wuMcFw;CVW+Ypv->_
zuAWECI3wG0Czp+$5bz5;kcR;Lv4GOff1^B4pw)Iu!1q5A*6L-~HTx9UwzR=xNpCd;
zewHX~E+4%H5p+{7cxWA@AB4rvP{nHaHRLQKrT&(eDh~{?P-}vAosJ1&?EkR!mQi&C
zTbL*$1oz<X?(PQ?TtjdO?(XjH?gV#t*I>cj-Q6L$O>^&?d9!Bb{5UMSPj^>a)!zG)
z0tYUG)`L1+=HkEr6q(8fLH0#>m?HnigicUI8dx9lr|e2jyLvY8?qJFPg*^g5Vs=nS
zc=%@khs<UM^MNHICl8F`=H>=gY$DO*2pjf=L`DYIn%9^BJVATUhQ2;w#U?4KvbX)<
z<>@fU$+(=-(y(JEH#eRK0MX>y023_0;<6$7fHot+O<-f<<4G|{E4>w+8PNbd^Nom_
zdP$Mv1ROXVa0IXraG=l)21fK>Occilt9pSQz+S=rq<s4La|rwdfJ(<dzW@7=iKMu=
z_#u$-hP&+n@HlXn{XW+EnDs6#wG<Z@i*(qvIh{RJs>AUBEdT>!hx*Sx{?ymk11Ppk
z01`gW?CC~RZU>u?@yGgX;urY*WOxLGoIx<3q@c~s&Hei;Mmhh&%1SE(gV~5cEL2pI
z1fjI!M{FS>p+AIR*#$W{%EXoC^~I1f^71vEuOCe`X&oFLO=ma9aF(vS;1WX2oxCYe
z92cAy1DKt#`QF|$HBDN~@gilc_PcZImK{mFb0Xt83Optd9#(Qiv!{0Q)ET+ygE+sM
zJ2OW2o9T2|z<z*!C@GCiOjyfTz3dKs?RLg2qXd0dR>lAg!wWx=W^_jo%s7qPoBg|Q
zGP_2lqWImNpi?L>jHX66e@4#FKijD3n8qb3<S}t?5t{v|KXL!!;C6fE{jZ6v>gAME
zUbt4p_Pnf0h*s1k^3lmNJu2a_Qx4l-sW0^E^<v<TB*S<4ekiA+mL<!(qg8Kko>)Sz
zFsDu$gZAQ?zr+c0RkJc$`DoFI-plrn+w;GuoZiay+7sErkhLfo{3YeumjGX@;qS0@
znI6y%SX+8A4>lx{F~5G5ic=FQ3^xisdP-1-22>q43#m}sZNHhn%VZTZ1nO!w*KM~*
zJq`$7iMO!CWm!FDc3}7FP5iz|_-um<XbRM!ORU3$4Ovo!FCVKI%^W1}oQ*T+r+%UV
zS}*8~^(|>L-;#R`3^;4#i<cfHe$$}$0;yyM?M^z@*>3dSD*EwAr;Xia(-E!PRQ{=n
z!SvkwXe()sjX!1FUkUT{gJG-!@2p^&25>-fqMeZjD_g@P-rTU?e`!~%&Qn$IzhF|L
z!qn;4qy!dI53mPtMN&@O4<Sv-+sC<Yfu7-hP81_<+#NolqxolEY6|4FacN09oizO(
zOP9#sKuaACONsiVvTYxe)t!Mpq3J@f6aVkW&AsU$Ze9a{PJtig$&X4r;5q(*Oh_1A
z1Jx9oMc+M?J!JyWv1Ejd6Z{=C4$kTy$)=gnv~0=jnsQTM$`WLWF`3o2E1nO?`9(OI
zTe0YKVuW%-n6aDbR-`7o_I9k@9Z1*lD1(|E2pe!FN_Q^9&T~1Q3Ud;{OE&n!ip?~x
zwO{q9yt#46<GY26O4;|mOHYxxY#pyF6lPzRBVfwP{0$dRl3Z`<AG$cTMBq`<T!~go
z6N`XFRI9JLUL6LBHgSKJD9X(JqZuD1{3+88coTf~kBk$n!v}&WY<luk@C_??hSw|=
zRG*UZ@wG3L1tP|1_uoo*?9js-!fs}`Xt41S7(Dm-3_R3)qZqp=a`a8*x5KK*GJ}Vx
zsP~@hFWx#)oHGF}Lbg6*T?SoUMxw#(LfiAQvKMBi`9iu!?LL{Z1UGP4!JAp^NN^}C
zZ!R!B2rw`|Uw1D1sM@~G!-N7F)E{*8;vX-&V8djQ9AW!sqAdjv$eaAIKAF$#_agUJ
zqelTXVMlg(A8=?Lz~OK1iJQ-z%_o6`>Fv}c*eO1jBi!ibC)V(l{L4h4l<#P}zM`B)
z$<kRajOj+`Zpo@0oELL<x}<My?Jc9R#6`pHAPL1;7gIK~tdAvCxsy4|DH_%xXW+3M
zmZ<S3ua4_XM79PkP*MG#rsNzCh(07<Fj7{A%D`vb<kRHr>@VBv@A!sRRg?fN0V)vQ
zgd-*~aSYzJWJbdz)xrb+CXa&~&=BABx$GeDD5u{jR{lmnsrw<aEoN(KXG_g2U8+|t
z`Yd$ych&>+OP45yG;2dt=IuFoWGXr?y}1DHd1=%b2$oQj#m?2=Ew)f)-xz}WxcG)6
z5|bU8&t_9S2<Zxad?#>wn8Hj}dPaFyi}I-rE?LvS;^N}i#8T$u_mx}879;S%ij5Un
zU};GzlK)|~0n!68;k~X!X%gGJreadx==~K2TO2t%8pEVrk9%&9sXu+CK+a6%OF}+0
zvQsb0=&83|92E5TY@|qlVh=xR0)<%j%K|A=XI?dLb+5p4kZe<aoQv@}8ih9>mW}!J
zCvziHX5}j?y2A+3FejWdenRwlk#V3Dp3Ma2BN|e&5BJV;$zFe!Y;Ny0oxjXb*L3ib
zA5ucsV81B~a=SjA>gv@iJw{&>ktlI|8Mr5xUO&2Z*sFB9)5NuZ^cgQkCPO_E6jrOt
z#Nc32;B3wd2=Q5{Od;5G@=_Ktj(#!DO#Tf=4i8qZDxJcU7b?Jg2>*tRg0e*gkGP7^
zhX;^h|BTx#owEi#=e02s7RskI^C#%x@~DmPF$;X#V>0Y|E~xdArq!HYH<sp^2qu*-
zbm>Oo$v_N*2V<iIFpS@+Fcq|If5TCI$4$T%S5_|Cuj+;k2<IdI5_sLEHub;Fr%~Yq
z4)w%+Q`W<B$JI4j8szwi1><}MGPh*@kwB(r0r~Q1L;wah%S!i5oApL5C-KBKf4gMr
zo?+Ol=(!O>lkkF!|8|jF_WN6r!vh^Xdd&3xdZf+)%-IE+sX#OOs7?(PRw2tE=iX!3
zU4>-4`0EY8BwcQpdx<u+sKqRWZ#f}p{Y!E>2Pg>v?8<4<Ec>j)g?;K@dsN|&+`C6!
z5KA(Sz@Kxo`I|P*#!hj~xgzw5wQ!c6qwC(MyQ8bEB3N6A$8urnT?S96_HYk39%ADl
zv(KZbkQv=j$yPyC?8ygFrUiu4u#d<Vl%KmvU9<s6VRYo-6z*B0+0CXkno)YgXx8dp
z-sMwMc=%-OrQ)pUsr+*WQiS_wFoci<%&$9*Vn)$)`Uc9U$Hod)=<5TRw6L5P+sW6A
zJUIvWrqr^Lzh(|rh6ixb`ULP7R^IrHH8o1w1CR<L@Df;d^7=1*813K3mtNMT;80<x
zBVN$F%lsd{A=R{CB{_{gK5HE`PNy4ehXb6V0S!$+y`}6N=X!3H!xu3lKm_u)R{q{L
zxEAS>3!3ssGwbzG{>0;tX~42{;5z38CbK-4oyN@@5d#$n4#q~5@TAouLb4Y6FzDuO
zv|Y>l1zUGQIcD5ctZ6KI7W9~SL`k52)zAXEtK?-{t%p)GuO<w4+`jVM%e-+CyU41V
zEiE(2+7ZUVs1yD*6d_s5<?$NY_)D@%=xZ}N<ljt-5nt)&3_fQS+rt^R!IzMklV_(>
zztB5|EcEbLFUFuNcH$$)n4*N~fcAv4Y*rks)t&X8{*h;uDk!Z0R7fx!3`3{i<4>RJ
zZoR$qI#Nf!U@f$(W}Hp+ZRY`M{VIK^{MiKyH8x-SA)*Us>&2_1FD2f45xVJ1KlnKG
z9wnOioC)_ZME->qp|L&~iR2C^CvD<V)oZttTif=c5go#N-Ik=;0Q_Y~#|o0zaR@3c
zu<RoeS}RtxPyaL8E5_X2-NnM%^v8jLfyv1+#Lq!NLHWS~^xzJVKA1lNYlXNtz+8kX
zJw1YP&TA8_yao>F+>OC#{s1y-@h+8!oS!90Nl8B_{<B5)H$ZSyQoKpHIeVKF1}p;O
z$s77`1$cUTsxzJl$_18WvqJa*3flD5iNB9QDpj2?SELFt2|#J@EB$HyUSgSNG|=H&
zcXGMTw6-pYtaB&-PE@Xt5+|FkE>U)r|B;o~3lasz#>JH$Ji~_U+5eT!;6cZN17rIN
z>K!na-SGfyy2^4!|Bh>}37PA3@N0~Y1kXt_($J`jW#QGTmghD4)BMDO*ynm=%O~2|
z3>OE>#y)}<)YQK{o?G$%dO~Fn<ejSAC?BElX8&j^YcxJD1p~wSB7wzrUnr$)Ixs|3
zP<59`an4VyN(6XjNMl;_(wA#|rKJP}KoF3Su(r$}10c_LABF&^Sk`3iWYZ$AlkJyb
zg;w57thT9BpWCSpN{snatr2=emHTCT3!8z_O#Wv;#Dmb8(+|)K0HCK?|8FGj!NtW!
zsS+(3MeGn%v$AYNvm@kDgk_O(F*y@q=9)W^U*NJQN}#~;c1>E@x-8z}-yM-$P3^gR
zr?XbqK1<GQsapcWHp)pOarZT1%{T-6dG5#qgWCxcL%n{qUE`FxZf<f43a8yc6a@u^
zO&Bn+;;$LRqBRUux2`xPH)4UmTDcqmlIp_*uU2b|lcNh@p-|TQRKhv2C3Sfm4-)06
zv(gUflcmd#t2juxqltZV9d?f|Etb)X<!n{%yy^Hi(Hoo&Exq_gbN=eavgCHY3QqZ=
zmB4a`5TjAbmA|qu>3pbut`VV-d%I#^d5<}-JAsTLmCblMo&^FKji$8NA8>Fit#)~X
z*w`GN{vDxLq>EF~*Q?IyW3dN3v|NayZLvweWTYthm8HVq*zw>0`tIA|%gK2DeqlU_
zaoRNgfQ90A+_x1*ke!{qdA&C>2yPZ&g$-m7h&*|9SgBtL=&jv(*XmyDSEMN_!NkeR
zP-^QF@*Ak==JxBSqWe*ao!umUOimOdXkbZuc(A8;OdGTmnhUNzOB9HhqMG3wsUOm=
zb~{%rW>Z@^j5W4FbdnO>)5m0H#{}Um7nw<TPTa%9f@}dV6mEN*wGe(cCWG0DwEi|y
zfOG8T8YtBF!dnK#V^;1WBaBhd?sm@|&eb3&aNgfr<=L?#-#oA-H*xXnh|^5(t|}qQ
ze@y!Qci+&#VVY^%%nUbR>^17)>8Xn(1P1m)j%P>d+{*!;@BLS2JmgQTQv#$9T=tt9
zx>XtI0BU_L_Ug+<7S+p1l(OWu$dajVPM7dbm^{24%Xqp6%en)Y9n0iIuyVyzEXgv>
zOJB3J+7f=&JDg{-^@EQ6pO)mq<?AMw+F$7S&u!MlgHBfIahuONfbIdG**lLY`D^Jn
zjgr%%{J+_qIEK2YYOvJsXg?-rRVH8TVJ0Adu&}TIf(Sw2`TF6_<NqiB=(XAV7~4!G
zt~rsVlQOi}p)56CT{nLCuZzt7W$NX!u?r)K&Llo)JZ(zjj|Q9O;XlGdTTl^$UE{e}
z5JEQ4`{YexgXLtObNDP<vZr_J%(BF5Hz{kkVg`RA*{+aI=Q}d^DMCH*!Z~0tBLQCL
zdBad)e6LL8`O&3iyA|_-{;+Ioov{u*JJ??}nvhr&gqJn7V{IUu$Wo&TBicuwpzXB3
zfBSW{lKp;;+0o^WxZ)6JY_!njZ>l+v!q@gX`K(Ll+<5RAbbBfQosR_c6U9hOSOrs4
zFjd-=28QTf4?f=DZ;$pYAuV3jJ+pRu;CBKf*o+yR>x=J~q;j9eM=Mc^ie#L-sAb5J
zQmd)z0;r`(zaN1EJ!3%V>1oe;2dC$A5(P1_fZ_0Ln5cP)qO=O;`_7+%A~l!@X^LmQ
zs(7SEeE8k#T2=_Tjd6~&uM|&{lgxKZiF{^lnV#;XfmZ^j5yh=S!HAMkD%+t3tE_Df
zJZO9!wl!T_c$ot92ke^$U|?gK#(>~7ZfL&4y@}h)L#Rs2(kyjlF!ucZ!dsM}_u*=%
zO0!K=SW-&ArpAf>m%1Zo&(*cpX5@*AS(e=M+gI)_t0xL!LBEbu(uq+9R5gq2DLfq_
zut`+MAW~fP41NHL1*{bTA1_5!)ev=ljBt^hAG>>dZ4O6z*4Dschs=M(#MSmFgj6&%
z^RYdGTwT4xw2zc%z}Qoalp)Vex9C%gfd|+sQBY7oLPAo?CCPBYA|SA9x}Tn1aR>XO
z4n)i$-7TFW$5j!N4|~ozM?YGgMJZ?Apd?>@Df{O_;wmsJI5;x$o7ed~5DrsEfC1RC
zbq07bo?IqhD!09nhldBbX#}O*O0%_zMOB@Z{Z(}R0KJVDvy=oWQJ}kK$!Rg}p&}3)
z4P$o_a58G97x8V0UoR6YCJ#K+s{>%y8Y8{y4PDM*@<hgI>JIN9BjJ?o#Tw)M>mcAz
zS!XoH<N3rk2-H@9WaAX5(`*71me<!;Oh&C=s;Yyb6T`zKpVD>Vv$}ECqdQSzp}KaT
zb+E#Hql3F5hYtTH<_xFH9CDfMB^akZ?pY5EBD0^;MSluh(r(iDN0}su1Py(8eGN$G
z;1Ch5?!K%8(W1=4Nhzt-7*=3?y*7Hr+~NAf(1q*wj^;BRH(PmPCPZK`=Kd3baGr87
zarHM)n!-ManG5#w0^x++l&z~byF)rbLSz&`HcX@@?nOMLs@P+N=ndjycy>bK#y&iB
zdYf=lKF`5sK3K5z8Xg&e`trpVBxq!0bVGYkB$p-oHc%GD!niR4^)YaM{`xhZ)mW5c
zoO5<z10UnAhI+VAyUR;|AnMjjy#uMybXpYBuizKEXi0N(a~wpX#r~9oE|b}5rn_S<
zoGv#8P=FHro(murk<{Nw7KZMv#^y*UVlOB7g+)YwvXY|LJJtEnFrYPOb@b<z41rF$
z2|EH`)xtsgg^55*PcH(lufGGRUh;fON{(Vl#ekA$HpOQ_W9Y9&!%lBQ1i}w%XU1)E
z-svqK!;4yRbVi~6XQ@%&9h1lA$uPJlBE1P^F4z=f=f4?zFPa%L2$S*N8VTvwn-|ob
z?T?3!(rB_$&ZUG_>aMVIy^H>)4#BOoM@b7Pw73JdX<=8e8CE?1+ITzM@Ab05er*c=
zY4M$M#8!@Q^KiLiB84y99369iKsZ=|3N6CCKY7N&hH}RXNGKeFkS}+Yu2(R%3-4bk
zgSYGf<D-R|izuA4P=)Wtmh6ucx~XoK=#Yt$x0jcywFVM9$ufD@pK)3gdX`4h^Y|2%
zl;q<pic~miTzUdvL{Au$QzwGi^J;>#JohbYx*90LM0IPc^J5aR=3_mh+gUVaDacnF
z-pd>M1?8{3@5Uh*g6^Yt>e|knHZTizF}QQ>Fuje5cc~IsLjle6x4rq2UV5HgXmT<d
zYKLa@z@G_{_iI*d@2MH(4fxYl1v}4Pw`rNK`>6-1o<{qRqxPPJs{=;4RJ7icXoUQ3
z0d4g3^cfi$p%&~S1wzL^4uQDKQi*O3+qz|?3fJ@kLcaG^``HjZOyVb^MaN;iE1uh>
z{2wQthWeGoYHM4bck<ZQRvuOgI>LW9)p^CxlRTDrKTFgKAW$b53Gw38@5!gO1!}#P
zP@7iHXQ!)07MT61=C@M#7WvDCnywY1c=S)&i3TdMNkK++_YYZ8(S%nv_1FD<eSb8;
z1X-J*%XDFhV-DGn8cFZkyfGja^(2LB2ffiEMAbUnZkHR(x8x7|F0Zd#2NqsAR7Zl5
zs;jXm!Cu$FOhk<VE1bTWnXlGvkgcCqS*IQES99(!;)sL6C*c`94q<rb6Le{PqtQ9t
z6&=#`$zoixE_0)2j*kz?rWwU-vZ|SXI}i|Bxzsy8JUsl-y!e@&IA}~M(}Gera(4#6
zdlkG34hhZo3Zr3{$(u*fik}e@`hLTP4&&#1K}nO#+_nYveAgI!apE3fJ(=Sln44)a
zfW`(Jd%*2O$G<3id)gWElj^G}GoTI}@>-G%M}he2PfW5ugr50|F!Ph~F1%?yZ>tvf
zzFV$u?E#F&A-PXqUd-MY3H~0{1nt7f6d_Aq#exHa^|OBr6k#+5K$_x<ZMI7f5bQ5<
zO#RZ5``vH%!9`2!7K>wJm7J&d7kjFw;KFSxiOU#__AzWUGJ1sER`B{va9vR@lS643
z6CvMc!GRfgH_g4<6Gln98|BI_iK<>6dW+`_^+cogQ*XI%U9Wn`8s9Wyma->Z_*z*K
z?#bD%{$Y9|$avv4E7(0=@6l^C+E8E1WeKo5nyj@t0@aS!U{HIix%n4B#$Hlfy!qEL
zPCOqdLvKKm#`HxE=-x&g{~F+JF=RCuV*sR$42$pOB~?pjqZLWdhy)uuEgL<?o!kVO
zo`NbheyYm4j-0y{#b&RVO1GTu@6bnY-%y`Uloe;E!WmV=uv%em{zxhd2G7Bp6_B2*
zb~@plQ_1#<<^*`zi#3T5*7#DpByNW$;<e7%tdCt!xq77Rb;gDViUpN*fP!rx&S5{?
zi%QxD=0~!cQMjL`mR}Zl?&)fpE}Kq{jv6EsS2dbSbMr56#yaRrU->Gu*TdwTp5Y67
z$rlON(pZBna{PQ>u<{J^3WVupj0h(ixUfLZyE*ZsC!|M4U164Qxtn2G`Vymrir<)_
z9QejI-S~sXp%FayE(qW6ES*eE9d&iTWJFcmgc04YjCT*SpCAR>_cv=TFek8iy|`4l
zSn;E37&(})<4K*E^!qS?9UiAK+vWG(9B4yB_{GLf0bx0g2RBJ;>!LPgko%7J9S-~%
ziJ!cdR+Kp6W(pf(D;$`&1}h>yk1!F{)#atQ6dVC><K&<0+jz}`@mqKp*TAG0&!>M9
zva+H(eXsFxJn!3{00sgK@koRMea^{rYCY=qp=+)5*X9QY)~<E6yd%4o--pAll@Imx
zG=Bc<VUYwP2Dhv2eziKIhOAMwW}7W<#uJhLK72%M-nK8xsyNwgvg#hc^GaR4A{$)W
ze#-0>En#UT>Fn<B=iW2Z(7<lsvltAPsZ?c`gA1~HO#_+fephvASs6Y)J|IN@`QuVk
zP|#+vS{Go?TGzWiJ>6C|RG*Az@Z#g);qarOp*h@8G>SSK8XD$}QS&+rtTxH9?i?Z)
zAXKT*`0*|-==FS72>&D-0Gon6wiRNqqo_tqOst}!a>pBTJOBtG)!JB%MjsBa9f3!A
z|C#lNh>*AVO(Hy-amwRP;Pax0%I|kMjqGEsJg0)R2|1@;h3<i~yTuENSaG_q6sP(5
z`QlPiJ61xb<Wvq|lMx%e0|QObFuPn(2z7=R>m6;*`W;UI*o=gLKu=5iyTQ&rFKf}y
zpDZ=d@b&x#X#)zT>)@ml)zLVsBO;>OPU_|Omej^_N`cuG=p<p??P1F@eM<wYsu+Wb
z9T*x8Uc@GmV)1#a$ptU9efO0Vh^H9QUsT)$Jd6ZfXJg&vj_op+#^t~BXRx{AHqg#_
zc#&8H71hRDEoHcx8XFH}<~B7owY0cuYG%~dvXo_l-xUIF9*-{JvHgoPu-eDkU`9zV
zO~LWCc51HiM2b^p{Y?5<q{|TV>wIN%K)}IQ8ZoT)hm}?rCS~}g&cFOh&yvI;3Fgp7
znI}GN=zbMb0iFUo2-JQZnn`zWt_;IZQ)qXsfyKopyw*|1)WY8Z#^i7AskJrFHgC@o
z8Pp%Q&F^UTL(zS8x$<p4yT;^_wy2>T_44N66eupAfFVgUJWG3zV&!<`pKStvO+&=;
zghoWwh@qvXOIdv|*pk^k#)1aaMxVFPXT0g{-?n!r$RbbUnhW^p^DffpY+Vb(C1CUy
z_ir=jSAR&8Tw6AH<~tV0Aso55xK3AF;$MFN3ePl@_712Fd~4Mgd3E|xxx;%%K~^8_
z#e5J~L9wJ-dR75p3%lYc^)v;yq#}Ib2V_O2>PD0&#=OnoyQK1Q$Fx87nPqJ@U=2#~
z;1CcH&79=9Z1l3<zUBI_C*Vsj|8jm$s95F~WCELn-^6^fa_hZ1d$e~vA6P2?Zq-yX
z6!hTodu(bBnnPv8isvRKcZl^Ax5W@a;Z#xC&XyxdLT}qUVykZ8GIO7i%S<1Ik|R0R
zZCoQV)G#ERJ9-u3Oyr3$Gw2?l{cP%1;b+1%rYSk3vZ}R=rPnt3#n<@JHMfuj+$JWa
zsw}g6vAiBHxce~2_;yBbH%U>u9*{?hj9KaX`an{YQU<hX*1NnAiEnmeQfx@-i>-hy
zdo)>#_pR^=_dE?PCY5}Mg2UhWxqWT--Zq<n!IG2ch=VBSf6a>It82O~6gY2M)VE0k
z1*DDfXK?-ayAPLOKd5|-!0>U^fubHbLVJ1gNu*=V>J<y+Rhi4*S?Bs+t4t$r@@HRy
z*^5!(4Ip{9LHCnUZ2z{NP0HDJc8jJ^KbbHoGL2*1=Ofg<HGLiTZQNGftl5qj477$^
zP<fc0M?8<jw&L1Ew=oY1L9F|C(RSu38c>(RWjIexN+5%f8-&gO#zlaYn7ZcNIk1F1
zU1}dSTIZDe^$<oYrYsv9-zVN_QPu0yw;!`iDICtSti-)>H;#}J_V+<omzyWD_Wq}*
zr$EZ|enlSouTVudYl2`41%uV-hG@=diajoCq7s*OEE}U*<ER=>q*8xYEg%tY1X@xt
ze4k{y!B5||TTu0{l-6S9ZPQ+(g^eSYDl7YMZ-JLnM$W=eYMIvZ0OZru+kG3{ZfpV%
zXnN)+uCW>Uxyh}^etavX19OtqgU~pMJj;{J;uB3x5gfz7N%KyDzjRWlUZWXHSf_sT
ztQ4{=(P965P+{2eI^rGP8LaS&`R1OC?u0eDrz-S^J{KMhY=qy2$#BjPmC7x+KSnJ)
z)^0q%gq0M)bVuPzQT4{1um6jxDt@rByGQgcDJ7FRn9z3*e;=U5=5wh&P{2v{D!hg8
z1);(#_o9ViDqszw#&_&^*B2qEpP;LB#i6SY2{zf!pUtSf)_v{SH-Zrh@UC#{?jVTr
z@i*d>BQ#-{Ntiojc&Xc`zNzt^_<Od+w2<|!-y&r@8B09+U4Gko`8EAfd!QfYzS!Lk
zel3hDqTHP)&QY&ii?cjt`{dkbP7WCvelj8ol2}_Jr0J#C@5{Keh)c+h5iVtx3W#80
zse8=))*v!{@EE@&^Ws_yC(YtAw9g}4i)66xJ<i3-%Icy9uKC}{Th9ZbQ};tKkfGU0
zFipajzob*q(ZMsqf~8Fr=%x~(A0Kd{J!7=8)H=`2@Gh{`C!>l{Oq$zo6ZS=9)<Dy`
z4p5QI4Urg3-`Ko^$75CziVLD*A6;?My111_VBHlR85tVZFdpS)PtNQxt2{9+M3dz0
z;YW(<tC<~zaPjbLliL3723(4c<}n#G|L`DK5OC3))z;Qt&I4T>n1-kSAV|H)YYB;@
zfR_P_>Ju8tH{CR%jK%=;=48=uY0AI-{n%Jo**x9XA9=Eg$>GF)0z~OqPsIQOq0Suq
z<i%domqdT^FJXjGVUf(xX`~cQ*Mq7VM@oHGoJbY8t(TjdUU4v-6U8T$-+Doz;z!EK
zHly5kHYv`>^vTKAAjYNRZ^AM5ceIjHHAcB%VX${C8^R=0fY5$=y4HU!O`8{<!*baH
z;Ka_KKW4|Q-JIZlhs~wLEL*D#yHhx(#rN3DjLo3!;5cbg6B8<BB&>}iuwpX(`;Jpl
zT%pzR1klRqywTAUf$<v&j|+Jn2aN5r3GU0wOGIR3SsC{Ua&B&Jv;Vj5GzA(~86!N<
zU!K+<1hg9$-VCDbq?9^vrP%&t4@&zu3Q_=+)rYZ?@lEQ_j<spl{cm7Sg@T>f@$M5>
z**Mowhzn0#wAp<$<)7q_u7d?fpFVv;sDgbpXnX>vAvmLK1N7s7x0mWXuG-nRNM#m8
z)>ID5w63nMgoJuI665dN9la-(BxMn&Mrvjncpdq#E9K$*`Iy*oY?@YV?qs+{!yI$9
z_xoeGK%{{hx_fkFs;Bo!ju$Kq{6!^+MfaZD`;D*6V+n8w@#S3<Mh5Ty`cCU^R9Y#T
zb2)o;$NvUq&Mc+2qOUF2f~}B`Jo=K5;Gc7hAva*XAk7&lJkOM{YBgY(%2=OPB8{df
ziag39YPf&At<H~=A%9KHGI=nJ`(A+O#5iB2eQ|ysrOdLdD{5$DB%3Ah3ecX$pnB0i
zoR{<7rsAw3HVp6`<>k+f!R5Po&&>}QGy=2Yp6He@UZ|Df0azVCX9fn0_l6TMEk0*s
z|Jl&t{<?~9v$HPy`ZSLY^m2G`qpme)cgz3WpwvOduS}+oE$AnM%|$)aU@9E^Jp=4D
zbd64Q?h3#K1=Mh-0w#gIy}kZ<JyVx`nf&e?Xz&sh&&8$LiSQl^XN~SswN~>}28f@p
zj2lOJf2s~HM}~&_LScc8fxRI>kW3vQsSuO@e7ehj5lA_EC*$>9gL9@BlXDX^6rC|~
z$2G#|)v<;Je!Lv1V`GWa{d=+ccXx7|I7exeqH{R)f#_)Aai21r=!zU#feODnGKa(U
zi*{0s+3Z&!kK`f$_2PqHG84s6Sg=sO(0Wr!JJRAHLgC<6HetCEnp+>%?CeM?DxIqR
zqz<aO5A5_yDC7fxs?T6N4vw`56k*rUQ5{@K7H)aMri0`t^KEM>lwdwaV1Eb~QVr0b
zWquj_jRfjUb(Rtt<ztj#1R;4G(gpHzKs67{2jnA6;z~IQB#vY6Wa?`n1CgNs-jW6m
z_6~C%N$2v&wCgnJIH>!dgTVluL9tFkL695I=`PBd@xEJV^r8`ISS22lK<4j}E!T^6
zeoD%s!9npskN<3d3fRHHHKzYLbFjRs$VzQ8RU;xEyPVV??P(w5n+WX9eIfARyCl-x
zF+mkEYxWa-`ZH$jC^pi@KhlUtoUFH`uyl6wCgIW3TR50R;hFsVjbn9zwA0H{2+|(~
z9o<kzC+4}#4j!8Yf`#xu_o@BG?>w5O8RdotA+xoW0`p)~D_#~Z_Q^v=uIg&5k^b~s
z3bVojcWp>;g`QBA3?^Q&YU&;9sOobzt@Wn>SEd>2bLY$*z3k_mVAd44wh3ia729qL
zF@d(Dw>@1!FUz%YTZ0!IVD^qsVZWmKJbep`@|yEmP`l3^p9&gV9%*-n--_|f%kXem
zWsKtk)BM7eig=GFR3w&_^#WW?9{GAFlR;~a{iWN#$+TN&1)P|)c9kJh@&~mtG67Kd
zD_aj-9S`4wgW0z}nOo{s2Z9>M<jyR0)}RKv+`__rfP20);|Zvb^72UeS^+58r^~C%
zW$+g&e7zfSug1{G$ckDW;_KbPU@NhDSGiU}p=F;P?YlMebuU?RsdPtC%ybHhBxpC7
zDgqYLDJe=`$u%@8$&K}|)ZYl~UKSH1pqm%e@Pf`;A(u)@7w}Arj<!b_S1MC(5l*gF
z(bT9UOtqp#r=Q^X`o{yF@u}b!;9UunS!ro;v9LZgG_vtE%gf7|a?R{Ax-*JL%ggF6
zZa_6-ld-#oEfDn8DQj;e`T60Yk53MFY@lVh{|V+hzrL0T!wJXGJ#Hxp)V7Hp-`svY
z1!1&T&$_Y3(+eU_^M0GN@KJ7R2YLD1!oY2}nA2k@@H$6DRTlqQF0`|^r<6(S%M5F8
ze*uU@JSKNPOJ<{6&`l>=^vv%LmqWdm4}U4BC8eRgR@r?XrTzqjQv`tB?W(T1`N%a{
ztoZr)IiRoVbq*EQ%d?+ETY$=5(ds=xSPWGVhb9p(a|ugCWTSgoK@u4TeVPe#X?T{%
zjyj%}E5|CQe5?14nrB@kU6`GPhlU17bSINbQWWtDS^T>{cg3XNnxV&sT<($VcS0l(
z+HHY?4`oh?^&ipa4%@7XaHu>D@|6VyBsJ52f=zHel2aAKOLz{ni`L}sU<m=2Qu6l~
zHL@1(Cm)bt8aP{UM1(ZlGrjs0zt_mx{%)hiAW(@1x{c3MQ&Vtfvi8(=s&vqRGf9N(
zmwQM?6lhB#tStVZjR_?oeDszo0hMUb_Q{gs;F<nV6g^Y5`csc<cnN=ovfC!JO!~`n
zsx826WMN~YV_*=?cE&WB)9|qZz{F3VKT}GlT#9XIxHM#HWo#!>8_svdqSpXJs+8H9
zb^HMyFg#M9b>ZQ$fY<$zPuwVa`OFd9ZC|>%d<jdE*JoCkC>Xlo%Vj2`Reg~xZ?Hdm
zqAljszr57SjbGPdK~;CDM?mm<(B^j@COTuwiuaQidu<!Cn7V|71dwF^Mb9Oko{ns9
z0y2EyF3D(m<P~j-OG<XSFBlj%l$X<W9L*(N0~W75Y(QDScp|f5yAKe;u+~w~Hvukw
z^1{OT=%AbE>tN4&&PP^9(vBlBPSCbV{G<N))2XHrt*ix&_hm~GHa7OyuAGxoV__jh
z8&WArX=&*g-<=-mMSfP=k3wgaK<>C$p^=T2lUDy39Hf7bU2aE%+)o@*YyKK}Nc?Ym
zkv46fmE;rLCXkb<@>xbcyf3rRgI0CPB8g;*_|UoP%aU82DcGo5sji0Lq2-t5v6{2F
z*;kEaKL|54-Brb8=7!c`W)mxKWag+zzj9IXM*x1w#l>D@9}hY3wg95R2J$afw8Mw{
zLzX%r)q#M~RFfkmgWhzjy~Zo+lAJJTNP|G`yo0o2ln|+`aV)Ex&SKq*)vsF*7&Zk2
z1PoZPH`qBitOPt5cd&SFPcRgm3+Ua=U}a@>Xp+u*z(uUvZ&kFvuh}OshZ=g5sz0Rr
z8-ZEEBuXm%=Gz#3q~IiZpCO0>vN~0(_R{g=;4Ev?=(jB^4O;fT<7CXwNQM3t5@FDH
z+5^{O+WCZ08|~tLets&IzqWo-#1C#eDoUr9c0!GX%`{h)DW+zaG@JzVdo|@udu%-_
zZ9QAmS0J+L$pw<Bv=CH(|Blhw(zLWU{5S9Xvk3LPXP;stz7QP#0srqCasK;@hrQ?I
zJ+-GRWlcGJTY{bvg`&wnrxMxSPS$J(C+xbvQ@#Eyv4*SYZM$vR4TGuo>>|`~tF2Ri
zVLxW^I1SBJL9g@9J1r*Pf7;n(-*V!A%bsv>kRlr)6o_mZSe7vcnE%~0vCT(5w#If`
z>mkZ~2SCK(9he+F!P^;y%d8=byzQj0*u`$U2<tRf5A7cJ``vE6ncNiZ6DqHe=}E6#
zCis!uYWo#pj_pQO=9@C}V@c(9`g4)2486pLU2vyy8hlq&4Kt3;rH%#JKi`b~@`%05
z`~@%c7>=q@q<!(8=KJF1>!+(3Txl7bvFh4zI%$oGwq!bC5)03lGq&0^{;4%gkO03E
z-?|ZW?mLdwS73lGX0&ZqJQdZx^~y{D;~w>$@ptz$YBdV_LaP{BNW7icqn|tk&31aI
zl4h^1sZ10t7cprrD~h(E;7C0eTbj5*0<mX&Lp&E+04kcQare4pbz@zELWd`b{Z_P4
zOPTZ4P<B7LvWz-==BVk85lvokA4OR-hFUd{!t!=uSp?y0gLRvdAVB-fBfYvi)A024
zbO>(+tQ&?{tSM<If?)ruv-=r;@)s5?W%=Hyre)jWmHE2LN!zwFbD!4jB=o0-Hk_MJ
z{}qXfH?@}Ll!Jq#va<5M=55`!8@KSspdT=IT6II~h4&Np_qWhF9p%PLsWI)bf6{{$
z24ArD?w+`3go>2x`bHUfABzsA;XJQ&{;5Zi<<HLlESul9Qd;qu&bk9X-f0Jkg_6p@
zl(MDg#DV)5IXFQg;3UMwO&{g}fJ_a1p%w3aK9}v^!b2R?8GmitI~>#U1mh;!z~KC(
zAlFVTgMt#pj1Ewo0Rgv4dP6p{)yZ#lG#e7dKcgmy1O#)$y93R=qO_bg(Lh%{X2M8D
zNm+LRh-p`+r%?%qTMUL!K_F0h9Pyl{gdv*WDmS;1pyhLi#MgnqGyG)sE1oCIRL?sW
zmY!kyt1?UtvWrF;5xtpq6)x3aYj;_SDQxlaR|M$k;Ku@Wd5wRf((4HY7<lZV04A7+
zo&gyB+0C*F^8M$?&MC}W57Wku`WCG!8+bW&v>48KHUmMFZvDFKFv`8NxhIPu6el*L
zvK4FJ%lS3EhUJaR`SG}&5O9T?>pAz&3)zS$Gwc&yeZW6!wYz|G0th6K(n7_Gzsz`9
z*M^MeV?fZ3Pc*95!P@~Wa$u<GIh@MtXlAX&FuZRU0K8LUh5*dcs2=spmoH~pSuxij
zE!2UPm6h4qdUWcitUHtaLqGwAh5k$JSx8WDQ|`+SP;E?2oqE4KH#s`$CWU>$mjyUE
z_^*svcK}Ji*T?}LD8;MWDw~*?oXA=bAhL43CuvD<!hmN}`M5z&N=iye*|xC(m9eLn
z7O!G+U^b~;fanb*s+1%oB!nQZKKjHDefAmww;e?77of@T*|q>JN%bq{j{!iXLj;7s
zK+Kp%Mo(V>V2sd45Ppa)0M{Mp$?*Vf0Y2d95JxUWMEjp#A(O<CO=;FVGCfDTW_x?%
z*b9vtL(a;6tUF}0nfT<oDRg5_iANL+>|pGW>?47<F37%)Z~@}q|2b9zfU-ktqEZsh
z@lkcWSbbLN33+Fu#?oKK3~0~&k4xu3F#Z5#%*&O18$sV$XOkr}@(IQ?sC<tK12O#C
z{qpLMe?VlO1X{@!?5Cs8oLzJH^b?(h{r^{ZOfVA)(sTvb*uZ<K>F=zcVv4LL@Ne0n
z+czxH6x18hfYf?TA(#RMx8rrd%>da7xP4+NN)s5-hL$z!n4j_guUI!n&5j7T)j-2I
zx&`qEQs?~spW!TV=l<INJD8<~b3_uzNJF+&@C;+f5VKe-xahau@t8>W!2{F;Y<cn7
z`T2cQ7yQEJ;Lnw5jVL1(FCf56m#_W*j#Uwz<}paudPPM3`;<9A0vxly^h?79gE_c>
ziU4>V`!7+<{uBGxmGGcI{CAm%jMA56Jr3X420_ehz^-vcqWw+h(<3~P-F%!E@b&S$
z03Q>eJz$gMIu)3~EY|CMlOKwKYzcMFxUWEt1$@RnzLx9b!2h$QjTkEsdCttt5RZUm
zm-%)+$H3sno}OAL=VZjhTsE<P2Q&rez##<1gk)ta0nfen^30DT)*AzoP)0^Z^Ls#8
zYnci0aa2J{-{9b)!or|72|<RBV|_Hp!4!ZCT9@Stf0q2WeY30|4fs#9KJqHy^Pd*|
zcS&|6d*0kU!p9Mc|D%}r@9BKxaQ~09{O{ub?YsrcViOYR7#S}BLuNrDMHLkw0sz_?
z)YH?$U`XU29gTGSag795KL4LPv<wdk5m!^gZbR}FJX{5FgFxDnlCU4`+l($LEj|93
zu?~<XguGe)+x+uS|Jx4UC0X(C@%8ofiMznE;aGr~-+(DRoC~!b0J<Ci!Phwz$!3)1
z<;}EMZv!h<4D6l>XIX(JZSH~lFbo#T{N~w+J~sQ3R$YCmUGaaO^`D@|MvjpTKre`g
zx7y-;k;1GG=$+=G*zy`rYtA@80o=NFgDKb)Qbu;S_khgU-NmK8DaHD1na$ps_8GE*
z6VHyj6?@$P?12}U6N>~$X-{6X>J6;4(}XI-5u1yiu8uq)O6;}XK|(7t=o^)`7x&?z
zp|;fffPYPsI%{S+Xv%EtlE)SXxE4N9{oZE3Nszx>=6XcGk90`i&^;qPnK+KX9k(nS
z=Kfk7@`v)gI80sEpr8AO#C(YUWPdQz|E$HWtu0uk!NS-P2YN^&1flRckMQJCmbgC4
zYR@U6esjLb45VAK5w{)OWwp9HJO?}krz&QAcid?5*`Yp<B0(NTCw})F$6NIvzScBZ
zM39Sy`b|gq-p(Z&nUkqS-J)SLX^QIS(YZZk@B0|`xJ>@i;m7D^_Y#v&glXQSbJGj{
zq+iXY9h6=#u*++?lw@gcv<2Rd&-u9Kl`F8exhTu(kgS}@?0jCDiPMVO^>;>`rb`Pl
zEVyHCt{JJQYC|WZKNbmiY%&yBRfRK|cD7C;d7MrcYo7x^G+4I3#m=eJZs;iN-u(n-
zV88XyL!n4~bA%VQ)cO=krzJ-J?L?-f!ErjDh9q=Qhe;InrnLwI|CGV!X(veA&BUsT
z-usq%RM~VtBSM1}(6IyJAZLpWE*gFi#a}dp!U>&U(--#E<<7eq-*@^SwP8~0%prXT
zRV4G^rs2d#p+S%MxMm)m`CMBHA(r=3D)yP9#$9i<%;tI+YCE=t+RW-MBIN88`!Avu
zUZVf%;aVr0qnk<3P;U+|Bk_;z3CG(isl_l3m86U(?XqCG63+F*i*#x5l4~?%C?pnG
zSyq))gs_C3c5cP)JbWlo!F*)Pu*tDkf{;KVJZ^~CViivzJcsphnzS_~%sYr6eI{x7
zfks-wGw6V6ZeAs45Ge_CR)m{`JVwQFM!B1hpp4-cKjHF2Z|bg6D8iw=l%l^mk>z&r
z%BiTY+j&XjHSmok9Pn<p1|sDcxpab}O^`xaSjmOCQHRzg+n#Hx-846^?i1cb(j-J=
z)Eha|RkWtX#)59}t!MntstvcyQG_h=Lda`(+r4&q6C_=LR|8<l{&?4<3=Kz$iB;;J
zscB4+A+M?tqxDvx?5;N{#sW_4jLZHjqz47vod$tX-}<KjsVz9prjass)#YH&V>aU$
z5)wIBJq*B=VrOUXVvBTeGH>wSYP$iM1{F`gVKQPgkr~KbnU^q|+cd?*{7`z6iqmlu
z7KUg99c&^CT}QqQCp7cI6-Kq%f(YK19NZ#r*6Hopqp0b49*?(6_jf3-XsR!!OMmt&
zLG4%tzV!^DD`G36y11&R9Nl6D5j_ls-HZgKfzYnGn)^+DPbET$?<E5UnYS6Vc>&z)
z{``qS$=xU^>f@<MmzkdhHGE-3lS|u>uvzkZ>t)yteCz-0#4R)DCR~>b)(B`6C^mv-
z(}NQFwthyh53Z`Fw#2#~p6dt=uY{?u-njKe@GkRzhb?OuMk-k@^=!9WG+NL-oymq7
z1B?k21oNb>k>l^8MPp#-yir7+G2IPg&svJzmmB^qkG#avcV6Sa_Vu$Hn@C*F%^ARq
zXef?nv~Bm8zYMu=v4Ep2oat}k&T39HQNi9`4N>1)@stK7*JMh+C`(<45_`e_3UIrY
zI<epV+<pKI8_SI)M$PFe#Ih9VGlyha-0HU~vj+*GY+B6l9@-IBSs7}f7Yys3=xbh)
zTp<@>|58;+x`V|}b2kc+p%;T)FN5O>e-0Iwlf=19e)4?JphG1Od7V7-<Z%gTG>&^a
z9KJ8Wpt+3iQk;P%^Wv#My?u0Wgd<3_{&k&K^I3cZyUnrGHVf(rs2fG@uym><qe!S~
zKXB<gLp&}r5dwVyPmvL{lg2B5L2M9yQd@}nfY;#VCBpYc`I7GlNG$M3wN85!)<#TT
z*e+>rCt{ogth8?i`%_nWe<zO|W@9m}-!`-9-5*MRPq1)g8J@|)zWuxD5m@GM8_Oy^
zUWuwL^)eMcE^zkg=JIKaOz~}c<kyvz*~<TTGlhs?Uq*A|_YrP@F)^EVp7jk;ULNx*
z+QBPTH?f7lh8I{Y$ePya<_pdwfDYz^@$>4e{)e;vyw&7DTwsStb26U!{(pv5!C+y#
z?Du=LeG@szNI^3D2I-3g?fU-=+7i&W1%x6t$PFwaw3pv^J#0zTetOOh1{6umIY2Uk
zGf%}Jyif=YOJsO)2~4`}hBoAlz1F#1VkrM(9`tm(n;}2(l9G$YqI-VvWUqLF!$D^Z
zUG(lN-pw!a)gHzehS|P_XYsZUP-ZiY_$hqK>!yib@$@;hYoj=pCe=e>7<7|h^m@AA
zFqyy$FyzCY5}mi_qlw)~4{k9sxrSNt$+o)j2aF$o2TS>V4Dc^XUZcW=fK|^j3lqYv
z(3j6u$p5|)B~KsrkbTpXDXdPAeQc@&*LED~%oI+kkoKL2x9=KWwzEJxG~?c?ZDsX@
z|8@0M^VhFQ`iwNjY=;NO9$;Fm6UTG*T062#k_w)V^~Nvu^VQ8iJj0(Bpug`Wt}U67
zH&ANdRxo&l=+r?ELf2wCo*r$XVNSvEQ>+gd-iYB&Mj(O_5@xlmXflX+T{Hs)5Juv7
zUs?ZZFzw=SZk6~Hz5Q<m)9RjXtgNvh#VMvTghf2v{?eY64ecuI6AkO%i1d53Hw-^%
zwd#!GXg%m@B@u<4sY%wNGutDckcXy8sbnE~MCmp2pg28n|1y3f$z8eT;pwCW?UDFd
z=zXor=yh74bhg#4f^$veN8Zim5YWJusqbL3_ceM6T2?oEk5yj{S(22>FKtJ&!#b3X
z5f`qAeQik}+^BvRB8vNB&`>LDKoUQdi_6T;e(SJ8&Hv)Deq(rOb@j|i5=N4Wop*`j
zFOQ^)aQ2D)?PpiOs?|5X*=aTc5|k{+$zMx9aRxd^7`bU#583td{8~u~_bM=UWL|ke
z=xliiHBC(*muI+SlByEAU_3~lyWWG}g?gTj@r15NNM{ER|H@+89m0@^^~T&qhYN0!
zf3|n*g=<w_cJTjD;<G+XSt?dPgg>|5#MfgfkyF*$bJd0Q6-~fQeu=qyb6j-wDxp~T
z8s<FvGu;T=d)`Q}>kC8)I-PgeV1Tt8baEafwKBz$H^YVqaY5tAuTNtpQT*<A4`j>s
zr27Lo2iZ!nU~epA6T-^rXSXstuc1EXn4pAzXv90gFB9oR!zRk;XTr`U4sMGDH*(j3
zmkM#TW*3_GU8r%X3JSpuJXhkgNu4(W*x77~RU{MOx8ibgVmPD1%1z;6S@@2mFB0-c
zwEaUccq(>hJx9(7=MFsc5A&na29ps^l1*B}zrr5VUxB$ob&X)a>KPvlpj6e7bnew^
zhXXuS#Jstm&u3et3Tw~e9@>7Zy`7c-Q!bgM1RT7(wWsfSnDiS|oQFksCfMT5v8u0)
z*Fm%8N%ZyiBZ;EfQro$@x&qM0p<lW8CHaNtbj9T%ZwtB~zD+9oWCR5@1A|eE+%k5#
zEv!XZ3g<)&kRJyDccwCQ)F)O7GU7BF6De(qi_>a#(S;_h;Ww67$zIx(k)d;9-59-+
z_BB7$e>3UvUcm9i6P5cJH~q!Z-6LlJvL!coz7&P(XD>R$G?bNB8L#^KnOfr>bPL)@
z=CD=Pcb2f+a^uvtRaMT)<mKi!Zaog(Q!m6^aqq8$SXj{9K=xuHdLX3wtuV(}X?WG}
znT=iI{z~(ncVdd3hM={pbi{((7CrfgnY>R}$+KA=H(o5BDl_aU%_l)hAVQX~7`v4C
z>0ybwGM<_HERYN&fLLz#=bmJIO*vhgM1>v6h3*wtvsra6gWKT?CPfhEnh#V@;RCM&
zbl&pv@*fw>Kyf#n@8>R;L@DkpYb(h^@$ts>OYgl<&&6YFmGIh<F(WBy`KtE5OHF2u
z-24Kobn<yCUdf$;!QCN}?u9xO*rxTjqWw6kHPI5JXgLp&q6KD?Ks-^Hp_dMEj|gzT
z4SV!aSnX$o3+1=W9F6NK=NO^_Z)abm9?$hom%IFYqXg){V3jCk|44&w1Qf()?H}N;
z>%nqIo{!?i(aTh~eMK;-b8KI<rY9~2wIk6vq&@v2{;=laZMi>Lgg!mNiYX#gkoXzB
z0EDFno$95G-me!civXLbRjJl9(j5_XNLDtlEht@RjhDlOFWhq%J$S4S+k=Olc)Rck
zE%~Tn3I^q3=~<)})aeU65Z;05F?<=_f}m}dN;!>`gnK<dzBc)uVLv8yN63aS-)ef#
zZZ@Vs>}VWk+MOpb=2;=K>rMOfMb}52Slw7s8Z6PZfa?;+pM1iKIWAOdCJnCTOn84`
zpTh@hi?lAiZgH8z!)E<ma>1denP?EpLb9>X`~drRcgjf()HoHR;^Sy7B;o>Q(uCtR
zh}`NYG|gFJ_8(Gan-iA>Us`~&M8HQIMXW6GwN6yhaJ{fSmAu&{7hhU_r}+8m)HR%>
z!8kEDZIh(VdMGy6d_#;6IRY1X(!E{KA!S@OQN&Y55n^5bVxaYw1Yyoqdfa3&pOm*S
zCxrO<52&b+?D{az>r?=Oq~%dT@&t<{e?zW24;h9XM7PkmDuA5UdP=s}_h(`dQ9AY0
z4Q(_#h7$8TqcRG60ChI@dy4JO-<E!x5eK3`>5n(dd(fN{za_MA;&T&{eEL&moY_N}
ze9!(w;%2hU>x0EaPb`i`O{#`t12<upN`CNp@_;)XbU^L__dofyP6`4v<@C7gS?2}5
zIkRxCcU>9LL0we&pCUY_&|CAn3*wT<GSIZCxT5{K=qSh7@0kF{d=H{>bl<sdwz9nZ
zKNl$^tiQs!E-S0WfwdxxMv_FF%_spGX=zfZY>fA0r{-zbevH`ekRe0)c*{#~xi&*v
zS!W-*AK<9Eov4be*-+2P1X|yA$U!YZ8W%gf7fku}s_N!ZpiE?PRc2eItN742&oa@-
ze%!(LBp?X4wQn{gI<;KICy9klg$H}bKp|H$r&XG_`kogsR&94Sv6l}`rVDqXjWNzz
zSINmQg`r$u-Do8dfS{mls`s~Z<^Ove-al|y^NHsYi7870d;G{&o$gN`uW2?Ok0&Av
zIGg`hoQgy^(b}Lr_JJqrm|n0+p;W_a2w=_sUuzKjZwVqIDl8}nGD8crBPt_<uqK<r
z)V;KXZ3k2`CgFYJ4M&pHR8=?6^Yh6Z+@J3(2Qfb4Dj!iMPEKP}$f){yHUmHi2%wBR
z$f3f@H?s-UOliPRfYO}lFHKD~B?ND8??L4MY(t@Io#__@xbXwqZ}@P^3H73VZ29M+
z`*%^cF6_rfoA;Yz3X@DgZ4W5gkpMLT$W$P(F%gv_MunlENISd1%;ZAp#X(%*gZLz*
zU~OqWjeK0dzEO{U(7(=gEB4pnVzs5Ne@H=EJqj25_A*UQVIClk)?$=5?CfB^sBqS#
zntuk%CY@)6USF45Ibh^-WhvbAO<s0=A2SbcbA5{LF*i4F(!;+_ig`V%h;3A71^+ji
z(PxPMmSgm%4Hhgz0zhbeV^YwP_15!10aLZhKsgOKd8lllz$r$0-BKXx=LcNv|J42j
zyj}(iQt%^Zfl1c?XB{!WsEDyzpr>GvRUz)b%S-}*c`WHvtF!U776Ew*ko%_bECZ7H
zi-4!Mpjw0AVF3+8*Z)D;TZPrJb=#sO5Q1Bf-~@Mqy99T44ek)!AwhzBa0~7h+=D~#
z;O_43a0hGscb|RsJ?G)_0P`#6tg2ZxYK-1nZ>{zDttFKo=$aZDnMgW1i=kKyAwfaE
zwLpk#CdvzsMK4?jYBZqhi@Uq~;dVznW2QVMs1(ttmaK3{=ABe7)Z9jI^0E({)v}n(
zzj@VLMr*UT(^JUg<TT^D92JEEruYev_iM8__y+{6UV^jVPu6exbMGwJ8a_@PJ9{<+
zWjIRhxPANGzr;bDc0P0J&syQ+wyLu7U<N;-(@!!IO}?yLBkl%5U};N}6xDBJYkLfe
z%$e!$C}bT0RRU(0C-|Ahc$CQ4y>QR1%1zG~pFLjwJK18(F1}u+8^TCtZz96X;JG#j
zZ4vpgA-2qOMDT-4!>(<o7w?)6HlYYHDyKoK_am3?yZ|f45Z<R90-!FDN^Tco$sKEn
z{Lp6{>Xo@(I~+sAj=^LRtt1{qvR7fLlF5*pdcrHxLbm;P7vc4{(TOp@c5pl2%8c;&
zOD>c>mfXk&JSvl6k&%&ctYD*OJ`P4Vsq_2i*1lnq<2E%(jLVA|YkUNjO1nb@mV}O*
zGG9L;Gp<dtPb-<;;tf|4I%%+D%M=t^*7ra;{k$t`g_|mQ1MyRa)lXL|Yqb^Eg~4$-
zm>bGf8jDr|zA;)!=a<sr=2=B#*f_y8HUeQ&Z*EK#k05ANv_$!3sHujdBI_f4uCS4-
z+Y$BQ&f2`D0qWd*-og^U?8Jfb8{Xzoot=qb6_;SaS#$SkJb!$KA71?<*+q0nG@u?5
z!yDYcwpD)HC`Z_T!=~TeLdT1;g?HK5c*n3G6Z0}zL=2<@^IkB*Kq!<FjcK#Vhm5Rg
zw0`uxkO8!52W(_vV!JiZ*k#e&3ANXYilp25`D&~<zbT?m5AyCONpMR^sB@`E7nsRb
zUs-xPb;1i+zTX~B)2jIV3Ha$eTe@n1!KgG;(C5J=>hZcsPQl`Lx3&wf#2Ni_b^wd0
z#M+4x)_qcx;;4{pwg~x~l=b%S$$Yuf_+t2izf5sNwPeo)>u#kG-&u?7OfX>#q;5En
zr=*kjMZqG*3V5B_*BXY)o_*`X^fLhl0td4N$}4m?hj16!PoRZh3FZf;B+m!f%R19V
z4Gw!*tK@krmHiVq3Z~#;0sA9cfhay{#&Q96%So65t3DVnPbEcG2~C*G!s2XmOH1}J
zN#OV10?~X2W}V-3Lnud#WUk(4W5v`rWfxKPregLr_XZN;;W2X5<+1Xn@qUP{Tm8Og
z;gg!)j4_$qGd4ZWk8z^eoET(}38da0(GJ_gBJf?#99%GT*<<jQGw0H_pG_qc_ghJQ
zXN0GwDHd#+0G$j|$LEx1VR?gX>QMi>g8cL<;KO(%hg|!adw2s;T*M?ZS|`1u(qbSy
zbjYUb2McRf8ch~Cp!3`wOoFrpC+Z`_jN(s;wd-}X6px{_*(;?<?&FZKg-ymdQ}@xW
zp-9rvJ+A$VTy|gXIY!Rd2wF!lafrnBisFN2tdoJ%OzAZ*)la_KBoqhoDBbLcBc>hJ
z`nC=h_$$)X<QBeaRaAho@*P7hIsE<x8lOlRDZm@b16tsw>i83<mD;V(rC;8Jqc`?|
zRQY}G9RTiMAA@DZM~OK~EP!Bn$@E6Y9aU+iy+k$&PM5BE<({Pl&Qj0aAOPEF@=MPh
z5BVGJwS4WQ51*d4@VrRue82HMf6FfzaY)crv-SC@Vwm3*kJszR_+HoB8&*mUo8u%)
z{|@SCyEM;mJBz6^z3Y(}na6Wh%}@4=ho}71!;%%ThOp^h!XD2~2M<|Uq~@1gSZYXM
zmrcNY7lLex{+rluv)KZZj?i<ohZ3EUZAmL8-i)&${L5&Z8IPymqSTC12?nfTZ=@<`
z^7iy?_;dxOVs?H4yJ=nOg*zR6(^Ml_?hdfZ{pOzvQ>~b=JXfs8Ktw@9v-ehS&+P;D
zcrP>gP86VZNUEq{BpB3Ap_S4GF1-lknYrMXmX@y66zkF5oQ!eRD|yWWFW;l0_-1`?
zuWlF$uZ5Eup2nWYy8LTtPjKS=Ot=8#seqq`0CaPSiSKhBaM_9T>$=Y*C9aR%`MY}@
zy<pP`5;M(zk}fHWAI8IqP8EAZ6z5k-GlJ?~@~die;^>0h+{q90DpnQ0Al@)r|8k;r
z`_1xr<58(wxod(We7Wn+uzbp;cjXW~{83HDq%KJ1bamqh|4C6rB6&EY_o@sFHy~3<
z#s79?3i4$YhSi_INC`K_Jl$yT%@(n2CCtFrWRey9Jod|p3f6s|aO%(LX>;>lJKTs3
z>VD%l*w%_$P1X+eqmnO3U`FUUmXLc+Ptv$~a5IsWl~BPbt&Q*biWba`B5u&TUQt_k
zR-IceS?qx&c7{orfA$jt$xHN%oYU!G42Nww6(hZqSk*!iy>W5n9V{m-SZNnF$MYXJ
z9hwyt?Z2%10W=cK=%AXQS8X;1cwjZv?v?C&t{yH$xIHb#bVf!-nsuJEk$pc8sP{%&
z)#z<B*U%1*8D&NfoVPHm7T-o5w})%5w$IQRyrB#_y~bHYbZhdGddQ4@x5^6zN>FS)
zfA(Sm#YoeJL1k1-T)c08dcJ98QBg&O7pJ>4poy;cQA6+b(yIvb+Jh@>>A>^Jak-9%
zspV24l&HtyJh>yNNp^v71>vIDSvmtcBznonw%hc4(57sEW}U@H9`=|N1PawL`Er0#
z&7-IzgU@C)14Jm=I>9;*HZ88FzEX@$$#Uf-59OPz-~OTMI+I8L^|}ZXsn_}{n8RfK
z;g?p|UT!9QIU$6j!qDfB?|pqSMMbllTM(ZOmFsL;G#W3IB_!g2>>boGii=WL$X7uf
zN}6@ycWz&7hX1=@?H?Pe>Rqpa<k%AWXr{@fysFB<&W`b$z{>hMHg98HN=j>pWNc_N
zIhnDhCNwFz$zL-kvzKbB7cic-tHfPM{NC5s=h{7OM`B_j!`dp%1s;>-Q3TAU#EN?(
zQ&Z6Iijkh4o`wcKLsFDHc1#Y$FteaVC>IAuR}a^3Ycp6V;yN6((rRI|2~7~dLOj))
zun5b_4lgegGTrfK<>zld6{KoaGJoC!9%Y9@%wPH6l!k+5d7u;5$C<o%sJCxNW@L5q
zWmD@tDd_Ia*SPt%EvGqUc^oKBCbwXRx>9#*701qK15JN_s`B-HjWhy005E7&f2#D9
zYSne%gAj@(n^!WRw6wIi*s@cI1;_eC-FjQ%AsyDyQ(t?Lom-awk%@&}TgL^X@t%}8
zS4*cO^lc?8ZZE|G51j~=PqeDR=YUe{%i5pz6*&T@>jIUG&~is_O{pVr^P4WtKG5v0
zLdTkXo`T4x_XvTWWUz7qG#&thJzu{9?Ycq5Qu|e@wAD-nh`Ae*J_-s7+Eh-0g{mZO
zB0vRCSC-jb5A&;K&wt|@`@<tyB6W>jm@223hw~9UZai%pC^Or?uKk(JDDJCgT*k;Z
zLf>i0Qp9Suhl7JNU8W<Ho7LQmXY=+YQa!{0@gr~y7DWa&7w|3<Q*3`f$*e?MQ_I@H
z!G3}=$B@J2ZL}H-b6PjbE9d>`Po$dML104<Y&*<U^7O@BT&@8nYvNO`A|mp4r}V(|
zKrw06Z&Dc$>l1;C^>*1~exzs;%_(dA539aB7b6+`ol<WB*zg&AuTg7r^FkmS8%klH
zMb6F7&(`wr@W`~wD=@r$AK((80!enG;NrW!qVh_@n_1giFKDdd>9CG4<cH4<dT{u8
zNQ(>|0_RK7LdSd%>XP4KE3B}PiW<4ZfsU5;<S7jB$DXItD92%i^HX*gf@Z7`j>B=W
zhoDvz8|k_f$|?RK?3oIHio`PqkV9#;db)l2@^OC@(1tzH4i$RUucdu(RT9K(u@Q=f
z%kmwBZ%GHSG>4SQ;j-*MzCn~vYG_Ohk!h*@kK#h#TIP5V{rd7pO6EN7{U9Fc=H~(~
z?OGGR;oXo2hS0jk3^`d7vg#1nJr19z=HOsh%?9&WW41Y5sd!quaQaQfjSDUuo<T6d
zo?N3~VzxRT;_Lw@Jw4cF4~ET*&9w7#GOKSG@zC|Z=yHzm^_E=@uP#h=+C!~jvvpH+
zk3VynFxG)#;w>Al{-^_VMLBtS&aHSzFrfM6HyXb-Ki=A{&CPaZgs&zK1aIF4GU&Bd
zkLa11n6QEVfk-ZfP;cIhmDbnav<GGJbJ-0ydeI>i=HyIxegz<d%dcMiQcgtW^{Kf^
zmtp)LJRT5#&>>jXuMg&GLS=39*?J{%^y~XCBE;?NPBYT8s@%n-e63T-wKSvVCkb4X
zgLMd1?5NHFHAL{+YcRl{!*JPctoSfGbNdG1xb<m55)!6C9}f{>;bb^@C8bXLjA~}}
zb@Ulk2`%$r)wl$rK~8#jOxmycST7498;GYaSBD$(%|{G5dd!@V2zcGAy&v8EIel+C
z0^sr1Dt(^4d!rDhYz%I4<1Kqq;uckgw^-i&%=_i7^KM}-J1Q!wy!;q&<5w7gzG@CZ
zsFQi;%0hk26aRDx`uu7b&6eeTDN`?>+`^~e^5R8YR9I~KZcn3d&^ESZi>@dLNCfcK
z^i55-W9!37a$?)8BN;Ach}4wRr~m)rc_zPz-Ef>iC?3r_2k0ccyuALL1+QAZg0Ngb
z0}|XRHG17HQRgKkFQ-!jFQJnN#8)bw24eU%3fH9$sO)R-Ej(D29T{AgeKW3}SPhI|
zrJ!e^SI=*c!v$_Tmtw0_=pl<%x6WF)hLcI}5&ffdc#P2RS$SHhjgNkLB`4?H-;~{_
zZNoSSCmofuHqT0~QPXEmWI<pkT~CUpzW!p8FgCmBFUn9}Qi9MTIPpyn`ek*(Rw5fb
z#&9Qc+#5y2j$}9C8-MR$5b(30>&lm5YiCTK*hGS?w`n-MefzHFosqHe{@nhcADCNp
zK?CSAyWPLJh~bo4>goV`0cyo#WCk<W#uXxo1H#Py8|;Y;NXq{&;>gL_0BnEPY+iq3
zE(0>Cw@^@K=H}7jq?C3qOEL+q;r5wGuaMBvpRyUs8J+RUz}-@b&UDiwk*H>p#}dnZ
z7#{!UWMK&j*t@a7*55!E?B}uE(4OmCbMqNE7)cGgKds&qtzLUBs%0G=R3${Csva>o
zG<)Nf-WYlDb~^5s4Mbb;XLJwEoprnE;fhXXiy>b~;PQL(EC6>QKN@*q7smH|1%{)I
zaslOQ)O*bmGivji76FevED3b}Sg}Z^qBeKsTkc@sj2`n%FxssNqeRLwU~m9Kr77KO
zPQkTx_*!~`Fq=55eVJ^8{~(W_d9D8oc~q%@&XAP}sxRd0Hg>mm6zuZ2s)QxGojn+%
zrlbVULJ|1fm`86QlG}BxD1wnyodg;$Wx_q_R~rab;Jc3=n61N4vpaajm{SL%WeBX}
z@Pfp)fyHaN!ZV;))_#G)DIb)dBqTxh|IrgBU*z&GsAEsbQ6X?bfUfWxf2WVE#F{t@
zGxIweC~yQ3`;2LquY_J-R?J#5Uin^{-c@vPeAHVk$eMzIP8(ji*|SpSd8*cWzasz>
zB3VnSP9w_3ja|t6IZg;HtjwQC(bUg>OG!!sW&BKJmf|;6rab9(s|VJD7AUS)k9U{c
z&d=Nl#8{}P01O1dGXH_%D@Q<Wz4@J+2y$ehHbD?t11b=FuyCWxF@d<q^am0W!`97D
z^de82oird|V97p|u!Em_rzS{*A}UKzIpzx0s{map=#p+L-p3W#^$XvkZQ^3q3x~k^
z4OoSU-s^-cwRm_?e93LgEaC8(ueC1azV^6@U!k;W;R*Sg)0*~{u(DrtOsXk<@A%SE
zB2|OoDDIt6T%g^YglDYwPbP7+bf}*s=AuvQ)8D=&emnXVNwo$^kJ-X*#E(<cM@8C&
z>NLX|1+^e1$+_wg(wK(Bf(PjnR$RPa6`Q9WvM}EiSDQ+scwA=Z{;lbBb2bI8VRWgE
z3{^LUfpL(ziv2M+cUZBm!aZDnVr!XS|FmFG(s=Z}E85qH9Bf+udrFcFZPc!c6Q{x7
zxu-2OOw->F_)qJV?c>GF3T`KG-}Hqn{%;_FJYnZbv9aFSd4WY}C*1K!H2MmoPK7Yt
zj;O3t|1f|I4!*7GzDBI9sMtZ7*7)z4z`b|Hb@N@rmGw+nAnRL}+e5-h`}ZAlT=6`g
zlf55W)XQh?;tYyK>Gw!z{E5~r&U#41TTD<W|0)ea5+3>>M7|nTV1*RX@(r^g3)Jlj
z{EG%-NjMmnjBYgy7nM+juY`3gU<A*$=Df)nD<k*Ls84;fg_3o}psXpK)Ggt8Ob{nq
z|1-o6Wny@@mCclb<utUurNt~x;z|voG64)!mWv?%UW8BF2;2yhKkH*e@4;w{9op;A
z_|k#0y#7=D@)Jew>YmOo$>c`(y0C8Wj?_aieZ}2}uS7)j_rHsZ+I+50w_R=6+-;ci
zs)UcnA<IzKeanu#Q;!!}+=E|ITR4$J>Dbgx@weNa&Rg8Dz){`rU14{0^P4|~Ji<_8
z2?_-nuF9vw+dE0fO&v5uhTmkfu&2x01ls%_kf7!Q3_|tFj%kS43Z@}Qn?`&Q+LkXz
z7<VrBHz=j6qTU~9B{plTka4yA`CTZrQ~}1L1J%|mNa&XqRmb`BXcLo@3_6Vys+DmG
z2@bY0Er4$ZOxYfHHQr%#-FSo_eQ#cDC2M_4mHk7q+_QOIfn7@#BK`wyW~=G{%5WY6
z#b<DmaSl0dckZYrc)iQ)=IJTkyeRTnAotV?(z(@Yo=9khZq2&}C_{{{gy^8gI(W+m
zqfnk1$D0m{Umf|`Kwh7#jwji?&Rm)7fNzx>FaB6;ckP3?fRPBAX2(kr!VJk;+Im^`
zo7pP`f;4Q(T{v`r`k|OG!aJ-k(*y<{-oYF+E|4MsqJHqCTk@&Ponz6mdy|Fwh5lW)
zZQb5=+u7iP(nvt_3RY`(|71&bn4;nO(OF1+SWS)}_FT)S&X)uY^T1cX4Uy~jHzf5D
zv%UhVP7cp5H`wqy=}9HAI!|f#e{*|!kGdBV{So|jjyexFkRD+5i*|o2(hlJ~sSx>{
zsamAKfT$C%r6EP=0wdYCh6D}*Mf_}*mMic2N=FJL6}gR#nqFLW7Rr&fw`Zqm$8fWh
zw6yjtML(pXulFWHamVZnRiPks)R+~O93CIm3=R1`?|$g1swyc72|eH3_N%B&Y)y^t
zst2S6^_}+CECeNe%C`g_3(*v<Sy`!4jJAo?N~(!(jbDVHt**~}E_aU8gM)(5exLp^
zLqbGAkd~3TJ^)F8Ek;X()|C<+In1Q0OE6<HfOWYa(jBPI&ayly<-rQ*{K;&m`1ah(
z!qOaAY60vP8@L_Jc>xC??yH|)rJ~0zih)QCSn#Gw9r7H%BkAnu_$dJV4W6*$;%dC^
z4sNbWg+aR_vP~VCaIxtI8X#5$L~SQ0{_UTx?(TObMS4grw6mGNMpQMHY8zZ!Mvg}M
z`UDd5RubrS-0v>Tb#<49lPlMl&UUVn$JY`Vmdt5qI31#OwYohAFMPzXQ!O@DSM`mJ
zyS)j4pv?~(9ywS*T`KcrFq!qDBOvbe>mO6y@9@)VKYzQvKHlWv;Q?(thWe?F6xkse
z40bL==YIZxF8nXdy0-pvoes*EgFkC|5xU$qE|b|yQ|W~7CMvCFe}R5Yij+T`FLKUU
zZcF}}RmXP`F);WL9qj+ZQWuK+s%-pSeMINT{xKo_8L-rgQ-{n&yhB1mtKge+bIHWP
z5;{g&Q!|laJX&KpH8llD!y*HG#L+WV`CTO%LU?#vK|v=~8KAL4UA9=KkdWT?R{SdO
zJ3nZ|Mu%-#@2Ax=XJl`!E*Mx?{Lrm~2B-9{XN)1{|2xL5&S=3r;;>p#klY+;%<{-h
zezv@s*M@j~8T2d?dj}ZcA4T-Y3k<tMFo3<F8FIsfpuX96paoAp*0nIT7avqWeF&-Y
zNT3&tI=315MJLvmtA3^Ie2*8SfNsoddAM}hHZbxgrRJ_6d_RN4q^i?qi@xPmKtKSf
z1ps>{A`Ae>WMuaGcQW(w0Z@uyk$CX72jBwi?Ckt*_G(xoUdps_adFEn9@9Gt`m63r
zt#NVTF7kv8DgHKeHFQyKbDYebB`k4aQ-^J7_X8}Ul~<qCmY`tHQAl*X9C)4LM^^Ff
z6^ermc*OB(GQX#CyC9Xi0TD8>-~44)KS)7-u89#xC&PX9ZZDbh$fRChBi)>&9qIQX
z<?tPP(dEJB#(!tc$nZ2)Q<h#-!yRhpbzoKkbTdN(0~{GmP0eJdrpzudk~RH7!Ri8q
z&0l~Bm|%RCA+}xG+25xCJbl7$irF+iFD@z93)^hPy-*Av^#0r3U6pX?6A2@prd{vL
zKGFRkNpnfBSxp_?4(dLR_*xN5@0~zq%@Kjb^WTqpbuO1)CoAncV=^Y_nw$8!hT`P}
zt9D11NaN9$Avfd+D#ga~yL2ORzi!r8=cvAUVDW`K8Tc@+0Jf%9t<|T;Hm`e=&!187
z@rMXF_}jdBRG4F+Uy59WKeMvVfTUvLn<NZ1LAl;DZDx3QxE9mI_HakEgTeTMNwH?2
z8b`E*BXo>|52WvfzPF*FDESvHS0+Ki<sZs2F+B*xEg`U=JL5)BU_!!mSMa-mcsk4H
zr1<3IMzgUjARX`qXn37v!#Uh%{-i$kpz9)mjo%)z<Pq@m+-2pQCKNpP2gXMJarXmw
z<(S*1-4y%#*m+VeIuXhw2cw5Wd%k->FAbXXsh;-@>HIQ+r8fLWr3-<jpsMP938%$)
z@pyk-FDFDJ_u{gI+C~2B=^n^zxocRg#xAi&Iv-_h3>OKUkuA||_1-f*2A!jqkm?xH
z2YCx+`$oTa&P>_k)BY$QF+X_q*^;io^>poTF8*)gH&WD^-hYBnNwl-~x4mr})il!X
zw{{l?l$?38K4SIFSAJU8I3OwKDCspg*`4qYO6g>E1#T!IF#{KKJN`k}W@VakC}xRS
zB_#*o^b~r*W3*ZbV#!7nDvGT;ikP0F>$W3@pSQ=4(sgkm@un$U?i?&m<0VgYaxTBW
zzS>chl|A}hdVrap#bpgUP?~3bp%nO}#@{C<(;LLw44A0*Hr!ZYCOO0a!_sohGMCaG
z$#`apjFeL~o^ZsND*H!)d1>=nK^;)yi`s#eNZ~h4MaXCpa?Q=nAsO0)m>Ay#7@#2#
zbk#|4hUX6E%45*7yHGrXe*A<%c&u~T#x&FedWeuU6i^=?h@!@k#pZ>3*z<EJ*)7~O
z^ibtamh;kL&8ESeS7gB<Cs%n|7b}hQ_nS+6e35YQhq*P`96u~9AUTq}mj;zX;KrI~
z#fyQ7DXVL7E@ocC!#|$ROKRV1@d7nE@&KJ!u^VB>qVUoy+zV7_I-loVgtrugk%hin
z*VH_YVs1UnA=*=jNmtfUP;Gy-4Owz}OICXcTjyCLeo@rME8M${Gfj84s+NlLf;`rv
zn8<TP_E`P;b*bxWT-ZMv7Nq7k$YQ{@ANceazRNW>YEB33pCoD~UqtwU(@-F%-}?~L
zlz=r&#2=Dft!9oUl6Rk4;k-AwJ+vIJN>kFQihsbWo%~2Ea{k#dMXVcV0QN_EZvPa_
zUz{O%wdoUyjgaudg+XQH$Ox|TR_s$V^w;0CE+H)G6mY`f%3%Qr{$Q2uYEA6N(;gP5
z>N!VvWj!{hAKTd$FP(rjgxoT{eDgWG(+P$rk;2mLld`L30#`=V#{}=`qgzPaH0z~L
z-PzcfwCXEYQCj=HC@?1>FHdiI{Q`n|vFJ)}lxv(L(!H$^N>h&cYXx*h-vvC1SUo{F
zq!{~;%8PcDtxc1x9DR;IC<Pg~QKpl>M^h=~ecW-Bk}2zh=5PD$6WOKgr)GF32$k8p
z40{ojf6k)wl}pTY#*FHbts}dUn3MH^rCpzXx?_GXLAS-7_2Np$W<wvprnaD92buNz
z?_AtDLG+Er$C_#w3rp-^z5+khwCn^b$Es%r{)HX+xqzX^Cl+du6+!-{VNNov|Dkg;
zqA{z5g)CYpSv%tx@(+0AZZm}&G>L|e>CUgy2X`yxmnRPdHgg_RC{U`0O+hWsxLb|A
zMl)vAlUo$JXMBwP3#EY^dghWlmn-w`DT@KFLFeBcv!_i|P@5Kcs%>^U{QU0qrGAU0
z&S;D7aoW~k6JWR8(e>YLMn7LtyD>iNXbcIg)>g*Ol;!C@yPQ+t!`z}`5;M!iEiswg
zDsTDJo<&rCoGy=$YkXP}y6tJrl#@j02}x^w>T^AEwV0eHZM1r}HBWhrWt)4CfW8LX
zfWiLjt=!mGcPY!w1}X+huintuO!?PC5M1O47J{xsnsdwC6>~?QFzz<3+xUBSXMzX5
zGo6ogH7$}HRB+qmec~aIL-u|g(q?s=7nG`NYM)l^yW)@auy6d2&{feRVE)hnhHc)a
z;_|sdo#8m-V{1Qr!~E(8l%pXUII5D#vhwl2Zx7`LI85r>zpRgiin+?g_y_ruO*uoK
zrn8ya?CnQ18E$bscMU(~xA}ZwN@blSzDU3SCgeRoOpx3i=p+_r+?M?1*GadHai1Ds
zORQtU8C{wuq83T><KjRTN@@w*#nhUPnTz5_@32}1PU2pS+Oc^AaY<(fI>?c_=Weu8
z7dejhnut$qw24GHxPDDOxK};L<mf>coN`7I#kS2VNKx%M5M8`2<om(kMjdLf>*TK=
za5*4j6GJ{;<WP3YDZ_Qmx~n4Leo&}PVWnKXd@JE1S=s)k${l{~Lk+K>YI)^1+Be{$
zf);<@8KjE7OETa%NQ>nMnu!!);RpL@X)>$(v-gR+jRm0d%$zr2Gsh!R`5bd-Kh4lU
z&Eoq9|50JTzf<3tjL2_w=cn_1IFMHE#*bo09G_v*64$i8WsE!Iy@6NECL^P)9n5YT
z9^r2s@EZ7#k4+JAnQleL`EmVZx`^)Vflw%x0%!lwFmyY5+}RsZDb&@rPjNKtYWVtu
zu3aHR;jM^(;_|Rx<cyKpajuTk<@KH2rNNFPm)hY%P1$7zapl8V{5z7xk|hQ#ub`l5
zxCjo+FNZPY3^MA<iVplV>enHVDUQUJ?<H;O-mr9#NV7t@M(xQ&UrIq?b`1`;+<rck
zv1;s$2&{p7t!-6%G@d^KY%c53Z~w&6JdZ4S{qDZ$wS#C=n69~=P;|z|PVzw&yrP^<
zAy$04N>7pyH6EZ8_D73>QJB*6mxnuC)KOD&dERbJ9mf>gJok3nY%qOD-@fG?S-igG
z+jrUMi-vpK-`5AKkNow_05}6s?c*?~VmHQAUo#9TVzlysqvbC@3W;Q@(v&UhcvPPU
z<|ZMMxO6GoG)+NbI(cf{+QNVmbtygg`$rb4U#j!<8pfdp3(I+obVR#BWZpkenD<zE
z=6@!oWSlnLmfsdzNf+1XX6e2Moj6fhWh7_VOAnF58Tw!gVytWHYd&z{JyyLDnqH<t
zapX?kt#Z)a4d?sVU3Q`DSFcDIDYo3QqBBc7!+geWtwj7er~oGcajcy7^9fSp`MF5l
zt%bq3c*`<dxm>#F15(rCurnEc`%_aJ{v!{y7f+Cn(8Tc9zn@U2+j!X=85}cCIM5k6
zp%A*l?yyCc@U^SXswJP+)0_C%-0Ga4ZzBU3AXS&CX8tPKTdRLSRWqYLbj&f(qV?vi
zmdegu>`KuJb8}ozzi-hyS#bLFDKkj*PrEc9KZBNkZ_0%Uw1HFBWLpZmE!RQQhpY}}
zH2yXC*Fo4mO%01XIWhOArWu>~8r-vkYlL)YtKUGk0O6B4V~-2HqR$V!)9Gh{S8cT}
zeQ^gzl8RHY2dxz$)jRwu^C`fd1}Bjy%Zc{$R!B7xRW-T0Q+DPI=n)>g^O>KDZLSlN
zV&N6zdU@Tk_b#VPdqZJer0qK&EcfsA!wg=}Kh1F`&txPks{~2^YPDiyM~NGPvicPw
zCZGgeglT^W!SF<PrDkF}W0+0;d-jKz+O4!k0n)XR0D^HHYd<%eXtq8SdUrE~EV8=L
zj?)V1y#3EdM2dD8x#irN7%!peRO<1KzJ8H)?MK<;nyA{grz$M|$;;$Wvv8#H<S%B%
zI=0Ree}5%A%zpd!ZBn<>cB&*vs-gg=Iw~fnrw$T@`N`S+Y6NQWfz+@>&*XsQnccvB
z9%(LG+<7e1l++P3j7=VUN6&q&AZ{T+jWKmG!EIX5C5<1C;ERRq=m<10e95I_7rtEa
zjgQAH%+XOqL=5L@`cPzzn<!0A0UD25s9rjkgDN*F-O^TY8i2%4y<T-86bKlOXZjw+
zn;72U|CQW(K!UAdBiGYnCRQSCQhyTA677mU%MuB7K3b5tdm2`IDh@y3QgD~iRk_&I
z->|$ZgU1?>_Ar`XG!K0)%sPS73eaS#P%!ypswnXey=xiNd)z%d&a=f^&X3!k)A_tK
z_4J+r`z4LbiMr|T^ps@F8r8Gpj=nxwIjtgcEZTQe<Ut2`_yXt8Q<;~u6Fn>|M_X<0
z@9&?StobEmMG>jh<Sum-xpG2?jEz%scG4+l9aHXt!xet>-kVu7n`3RZ)Z@+bRnQ54
ziQs?-W~}x4Z;z)dW3F*=4eI@Gk%DTt-o^5Zff4+5L!WBWWR71oCpa19dQT&*q;b38
zuv!Q^UhItd!4Lo=j!T<&fW4EHoQ#HV?ECg~j3n#e5_(m&s)L@OsBzC7A>(xItZbWs
zKSFDd+S4s0DTNhCA6E^g<A}I`c~6_gB(*041S6z3;@h`x+1cyh-Xh|0L@nDL&ao1s
zH*EZZ8hQ5z$JWz^@l>Xao!=yN7VFo5ZN$tcJ6O2euAU^VBdl=j-^rF!#(HM1C%v?5
zfed3Fu&159wiqEkp7`5K0YR*`1j{z#dEuW$fv~T`(a}{V+GT%gsJlBrxX2+%Oi<7_
zEo~7v+Xvt6F1PWIQIL}6gsglJEnZ<&?I<--(Q<lYPH-w3OM#$Z|B-#cYro|u3WdYw
z(<2>Fcz2aE>K99);Dc&X*Ah|?_DNMxMoc6jziy*r;lt10zkmOQ$t=+Hrqqyb`Nf1`
zu8bzp7<5|Ekr`X^cE(9<pWh>tFL=Yia`@4`ST4Z&8;V=c)#1Ebe$n53$?!Bzhn}S+
zKDxvCTr2OCdEqiP&=MM`nG+VfySr3N)GLkpV|ZO?>Wh@~CQFKnWd*z0e#e|;tyie1
z>H6@)7kxH#b5}i?IsyRRa58HsArAx!^Uo_#4PnsT&`IhW9tQo**Fe>GAZ#>8A~ucP
z4cSNfsOX%Fq%9zZbPT_;-z@ohT?taVm@Y%B3m%qX*%)pKCb@8Q@lltEd;M&+9)WW<
zB*lTq7wU9;0n{VfQ6CA*3jmc(WvZ5l-{c1}25{71Hu=#S>ajJL#D>ATAXC=7nscVo
z8^qbTjT(6NSc5V`rrOYeq4x-ygc2Df0u#n72Z6z_deQ*TSzgZA=>s;@5eaYVU|W3j
z+>vtT({IKj$jkH@8nkOS_X0aNb|tS=k^D3jbAp$vTf1NeZ+11TEVB%~O-~JMPMg2-
zWZhqCFCw~YLVm}IYORHvlx_K(xv4L@moNIgG)RD~837cd{{<qPD!c)OYrH@q8<0!m
z2y=e^l-DR&F|1V2gY20!qE}H{{PPY~xLRH@+Lrf8i|w3rMon-n^~(pyw@3hZ%mH+^
zD+_@^h-eb;S3oOH4f8%5UkA#s=g^({VIZG*$)j$RSHqW`qk;EfO1!3NR7LMovAmzu
z6B@tIGQO(A{v8R9loVh=0Amitn<*{gg)?s1xB|+g*m*47s~VaI;ZOCGCCSP<Ngj9{
zscQh1ohY|}5*ReANM^Ivd{WIQLadsXj~%nGQVd>RzEh__LqmH=yt_Fygoh6e3)>^q
zTK&S=R<Y+lwEtm<{<FT=8!&0S>oUI}OzX10`;#12+b8G;I)#_h<O>BN?e6XZXLFTE
z6Hs8piMfR@K58;k?&{Hn3&NJCxByp)PFi`0E-3VoE@cT=T{(PjqLggiXx%&C9+^>i
z#R$3Q-{f*ETkuepo>4iiFaL5c<pZ_p)t38|g#+H=Efv_AK{_jg`^+ENZ{&>6qahs7
zR*ywQPyfWOoLy2<B2`e3l^q?ea7d0WvK|RLVzUuFsa0#OWGx5L6$VWSmL7!_iM0$$
zI2V`aN(WZZ`i%xh>al$;>fYT{neHr%Gfm`I>^m~7=_}t+2HS_z4$9ox`*lQ)k!`IA
z7kCa8k(d2^F9dEbA|fJAM*3zfnu{8Vg!fKQ*@w2zzwMv2*CTK7OAsfG#dD6{Q1OcA
zD5+e4s|``$<88ejxt`#Gk~CrhpY&$Bt>%}mKG++=*Sqa$Hjo+|Hk|i0O)l5%tNgp!
z{g?vR$1(Xcx7}uYMQC8q5kaRZp0*9MLzP@odV$f)J`v#pnC9&ADJ4x99kggLtmdi<
z_r|>4)+3F}vfjP&Q=ZS$1`jSoE6E3{ahKF=O37N*+`H*36Y4S6Cz!8^ujHlhxg2M_
zD8z&V#)Z!(;F+MoUT>JwdEAJzViva$KQq|{q7j)uu%H7Mnk_aYV(N!YKO#wyA+A?o
z>x5L1g~buF$k`VI1$}@pWh-=oeU5&K>Tl**>Da0q$!KB2Z?TBF?9)>@dr1E!a`0=2
z3j+gVD2aI+z)J;gFJHkF+_Z(Gjo~BlCBk-m3G&TeJ>8F<zxKP=JJy9|CRzBB)txwH
zvs-jLd*csjCQ6h*ksgRqK(YCj5S1BB{y(2iFE0-{JDb;|g)RCmjmVq=-*2eLiuW$2
zdnPUG(zv(kO4UUstTWj%yyx+F#A}mzHW{DeLpr;5?NVP*v6_cm!lvs*y)T_xq8E3|
z&|-0~Ow#W?8bZUsQ==!`sG8)XqYH<@INut&AG_v%O=R*FCR$`X3%bq9hRpgWXpPc0
zeKqu}&7zbuH6$VYCMhB;tVH5NX`}i^ug>10edA9#LZMuyb%ZfB7U!0ZVOViI^p#O1
zpR6>8&-P^}FpE>^m*Ho5+jltgM@f8k@7qlK8oWZbuO5SCposyP{$$m3<kX<z24YBI
zRaIHXx;^el8|~Sj07j)#TgRAmdYT9sIa1#pu@M#)mXQQTC`cZD93^TVkuB=x^WT=w
z)s@ej+v|Qj>PY{!?SQ9X*;J^kyxo*~r-sMv@VR=#y|`jj&clz;ac^Q5aKeO@9lm@4
zyeV7bWgqIe5U+Y&BDPsVPx7rYy)?e0;6#OtkMFZA_;!}P_c*cp5$pHh;N1{ily*u<
zhou6l>&KeZuD@O`rp)jYxqEZEM@%f~kDLpy#!60?a3M5dt+RC*wLA5g*ii>cNh0Wq
z6Jwu!w|gIk3{T%P8=>b#pF`sI#Lq-)Lh!ueC<jaI%E8HbxYUB{LCeAugnjuP5G{(v
zM?{oeZA>H~%Nh}J*{l#Bx{h@n{rV*%+}}Hv9sl{LDFNw5gO$wJfVJhF;nLl)b21;4
z@^mQOUBbZU;r@70BB1+G-8-L%=|p({iQD;dGn{{1H#zTkuI(es+7|Ab*+lKT+=nE@
zX|%4@JwV8)+<$EV{qPDv^Oy>)4_6X9VAaF@G6~!m=a}u|R!DRf>m7Uy+r_XS%MLH4
zn)=aaWxWD_Ik+AZu~)vQc%Q%iI~c@%Tg@qX%9@FV)kKS&$EToW%bzXa#09j8760x&
z#3Ec+`D|FD#rX^F8E!bsh1SF9&6<6DizV9@m1$FK$|(~inDn^v6tYFNUh{=g<4{q@
zNTySpMPZp^#QmsU>O)aj1#I~lPB4c)K`<rJ($MU0ZnApboSX>(1tsy0Eb?LOB;%-j
zvTA2cq*W8Q^(lWGq7HA7n<(@#g^F}o9W!VN)H^g(`r%Dv$)Gf9TyviwFE=lC6Frg1
z?~Le_6mEkqm@FXxE^hS40ypy2Yag)aRbNv43B$wqqTCi-lyMv$JmBI0_wI}e=V|_6
zPTiW-q6lx*Cue-ryo+O%J)1RzBz>__?Zg%tC)mj*#m|B}WeIXHU_j#OM)hX>2~9Y#
zrR5P2B=+y6uFJF=ew_#e1;LH<i?_?dgXrk~OT&Urle5Z+;g48mN6PXy1i;3{`{X2u
z@?Lm8{LrDKY)e|3Z$^JK27k$9pk%A*&8<#i*3yCa8RxdfL!zCDULSoMr@`B9GRlov
z{9GQA2D>AjAbDa+(!=vWz~KA-{d>IL_ZG6Ch}-jRZcff<@k=IG6qUNs^Q-yZ%(mtY
zf0q#k-R7HYrC<XIFsnXc0`teQ%Ify|nB~o1bG6p<tzI6mR7r3Y)A4IHC!L!ob5<1>
zXT(oP{3yEwI;XQS5>t=|jm4K4Eh2ga+kaMNs6ASfBm0;U5D@lkYd`$iHXrhK^j(`{
zTfHtmNW4VV;{uEH*TJD7;0Iq?Hf%W^8-(2?RBWquWG=t(RigBK$;x87FSmp6yL7<O
z(e>+D>;&XqW9exIFVpX!AH8vA$K<5bQnOob?*uID3?;<ZM1{JG!}$al3}7L){KhX|
z&XTE=+;D(+AnJ8ltJcpSWi4@?KjYDN<AtNUh)f<XXlq$_hjs(v1F;J;c!*~nB%Ftj
zx<7yPhX<{(K#|R1Yp}f*0xXAIr5#n2V1e-J;f~)ew8AWrCg%M9qxEe+-4|qT3IW@T
zZ&c2IANaV|TU;Wa*;;!^mimj^x%CjAT2t#Hg#zTb>>6A_|5cF3SK-vsh~=^~qhvqI
z_aD5T0yP1JPH^I5Ury%ZiZKJGgpYd#q5CuQtrSpkSrH(Duo1gJ*ag;)5L&!U-&YWf
z*{>SDtO`DFqGeIeT6Q9g@!}(bcbzoZaips1+=mvNcqAYV^x;{&$4~*iN(?GNldy^r
zfg!J(lTUhjZLMuvU`1&?y}0naBq|8G+V%dv(jJ<jZ%M5`w;U2=6Iph9;MIj%>?z?A
z(8HO$tEROQ;$vndfz9|hmV&IjEQ^gwZ8DVX^l)RBbq^|0;K)K*+1ZGQh_}mc)YPVv
z-$tIt+bfvxS;ni!NG3;?l(_kF@@gI;dt9F1a~eQT{Ll&JY6&tbG{==idzRDY@Elp>
z=uN*1D2z7cLAt1Q8CT8O7S?On!Q!w#CBSC<6EuZmyc8rN?R5ToO}l6*XxBl?P@t#(
zDqm4_ml_ILjWil*f>asefyaA1gdd$plQ&KIH=xgYa!bO^?Ej9pX0<xA%DV4};M~)(
zl5$bYwCw^y-5~5y6uVi}X|Euj^`7|M0o%=3@ovksV;g7AFE+k&1tgIRHc@Ky%k0xX
zejTIs9*e}Z^wX@e8{i)>@ZNLzbquBRe+kiAVa)oN{MTPWGAE^X`qtR;y3ou<DNnkP
zRyi>-QTawiPDZAbc>L`2w9v{7;~iK>Pa@@L91mtHKlwqcuvHI^CwI)%*%G3oD~G}c
zW5~&tT`V@rq_7SB`BT(w5no6MO3fvjGUOX{*2|J9<UF;`e|{2_S~BVb_u)?-_!bKM
zAhuJPkTY)2-eWBF{A<Nc<IY3dLR{a-ZN1ncXtUZY66?&Y$BoAA`AKq=YAMQg8KyWk
zx4f#vnO|)1>-SLFrT&iZH~ddO3{ew)5Hcs<QJJJ&>f0~|tns<#3ZXLw-8{jFVtJEH
z`6La78@keMV&t@vdP_j9U!*mYlYEgg=I;doekzxf1?Uo$^a(t~larIBi({LSF|o15
zcUV<$Z9W~~Qe^PlZD|3zH%Q(r8>`t}rtDcQdG*OIg~($q-+#P8$H0(F;}SVAnLNU!
zH)V6xr7E3AXJc3=8o#jDrTa;B+KP&1(M%KFVB6+J;>eV!^CQRN7pRsBbecA}b(|kO
zuoX?|=+M&DY+~Cka^{FJIKYK%kPgdQnR^j8UL@%3gsVSCi(3w#`TlXMt9i&wiiED5
zzbzgsXd$ouL$lTwq!Ps*M)k=tmD3@wW95}Rysx~%xkS#zVO9Q*belDMI7tUSI~nIG
zw{{N4Ok4kp>8}d0k9Z&_PXf@?uzH2GsICfnKwC4Rded~^=nwJ?E&HE-DV-_(XD;F)
z0e=s5ni*1esT4?Q$`{5pJ|>^wR!WyISF^^UkyK&^R7kef<*DIwE0O(B2}O55c&|N#
zRVaOO6A*Xes?nqa+YCiXvf9@7KtL6F^=t<F?(jH$=01{=Z8VllY2KFCf{5zr10k~E
zQ<D4#t5a*tXUw=oZ&PdTfmn_Pko?<`q*^yoMp1R~Q`vP_3NFJmX&!*Ve^PaKxvvHo
zR{T^{Y?{_<qlC%AU-%NJXH7!2aI-qc__aPdKBm3?RF|E^$Gsf~&bTY??qu1Q%xb!m
z&|9UR$n`;xLM|OWb(SdFmC<mzv71VncRS7jQV!2LlWx_X)np_><=o0tPJ7#=h+ZPB
z!aPZ+Rj@c*%7p*ys?l{-cj<`W$3`zwG4h~;vt_m<){nslG^!o9o0I6>@q7#{Qh$@e
z#9}EplTTss4>x~8Airr{9VwvZYfBj*jYi#}ZDda__Y6!QON<}sKB?X2!kS3Zs^m`<
z4e;3DUCe|Ziz6c9ummmhY)0Z>oE(ykKQNH!^!{1TR535Nq2cLJ^PE2qXhr3(dKpA?
z5(6}$1sgR}qAKh;r?D2Nq9yzRK8Ni0Qq{ZYy0J=~6FFY7*aoRqv!Y7UNv8BUjCI)e
z=9ddko5P=o3s90Y9H=x|s-m<`6gnMU{*t^6zFcEcj%Zz6te1L==`25gt3Dd9XGfEn
z?xzJ@cG)Z=6pD$R%T8#2@s;4aet+n4AWW0g$8d0N>nAqop_wKV9;}d$txjdkj$=8*
z$d-4_6J?ZgA=|*acu!m2M9S?=(@G9f5T2oARtH$wcM7XO%0sMkquGqQFIQQ)s?$R|
z`0!EBmUeeX`&l_p!g6W0sF{I50?yHmIw{9$wtIb^d&3Y;@@O_@R4PJUfcW(2P{NBZ
z`HH@0=H_F5&|kIyP6?SJ-}J@l2FdZnfPgNdRgGqQk4vL@lB^V%_bRkGC(_~QJJv?7
zrM7k<NjJ$52GE|5Nd<?zA1Ce8jgnJRLUAxfl~^bTy}FJgLe_N3^qP$h*C8?Ry5Z@+
zDVirgAYs$(_dmtcAZ(6jG|&`FsLWIaVdpk+3MzEQMB@?hz=wMi`Z2-Z(MWPQTQ$%y
zB`bA>;!S5U-K(-HHA6$AFt2!kj*~B-pTeqLIkDvMV(0a8cnp_5`C?9x#F#`$Q&I_>
zp{VnqMo~RaAqV@nZVyW@(ZLIZ+Xbz(I39@*>Depu5h(_5$)bpsNe$gmCBdf*1Lv)T
zDbX?pCV$gRYAJ`PSW5Y*Y21z`FQWPkoB1uxfjk_P#Xo7v(p-r@GIoWX%&lkz25;uA
zL8p<)A{F0Az_uqqO<kx{yRy|LBVTp|(2nDu^4qwPk%4q>7n|EBKw|}Z(=-4+>!}>#
zr)I!5&XQrdl1|=#36UJRg))s_Sd_}tGwW{pHPkjFT^6juI4}7T!)Id!8AJO4{YU}|
zVXZv=_UcWu+_K3h*R=D)t8c`aq0(uKx-iLB=;`LWiqYZ?8P_YJkN_Z00Bt)<b(-Rr
z>F}7l%+Pi-opncs;$2U@+1XOYu$E2Y+`41O&1|ujD^<`5SXIVv%nXv!Ww-uxcP}S9
zHTpb10k1ATuRCE!Zf<VFPzVSE(X=Y}OAF%40;__ChTHTe1TCc^(;V)$ku&+JED64i
zMA&(P6YnE@v4c7bk0zo`FPpJMpC)<p{|5Jy)KTbd)lG+_mrwsaT;Hr8oQH=l-sX`V
z-+Ml5oL-6vtS(eu>Yg|``C@6g@9z&~1rH%GSdd+>ZG(Y@=Kpr~hew9uQ~r8K9hZ}q
z#}xY?t=f&szr53~4Sfd@8=1@egN(v@V>6d<V?NiJ*%#rxL40o$Gy$JWr50g8@}#Lw
zJG}SmH3;SSgMIvbBqc+_!)3(9#czWIAB-#2TSSLftf@2M-=}t{$F&wGCfH7}a8$OX
zVP1!A@SQF@c=xu!+c1&Xrmv3}zYyc5N~oCQs;bTbDLxp9_?hGJbqf4TbO*u8S_2F%
zQSpI)u3=9Y=(iJbBCny*;I-P^bhBu~Uwb_6)*F7Z)<q%P$%V%Mm{P95sjX;mB8XPF
zAyb_CHxN3#Rmzg={(D)!M`2Gw_O|#T{uPg$<Fh8wbMCVR)3Kfr6R@Sf;Z;i@;sRr-
z+I~~g3jO6m?X$432nq?g@n7h*!srIKug2%WC{x9@v?J^J-zgfWoakv*Jji}BS_z(d
zbUD}?kosgYCv`p^tZJ{P+x^8^k5ACCT-)q!LpSl>YsnZnyVI7$=I-6ND0J%R$JsC5
z{?tuhRZOr|f2>eo&(6;N8*5L4`qN^IM}qR-vlz=h7dfjMHe9-g%Kzkpx$H6$1vQKc
zG>25#%QY?D=Ilwt^Mwq@xr+F7Zc26`MedVnE0RF7ZXo)&0uVLM&c{IV!c{4RM_yH*
zrw8nRgI}nGfk}2I424xSWb09mdV;urLSp|H9Ly{|2#%eJNe~ed?4q;XC}fW{7v7!f
zqeGSaKM}P69+#t;wZwjY)^CH2pWi{~yq@26>OW=B0m}4?tzJ!-?@UZo?=bhx$Ma5Y
z(*D3>bSFQVolA0Yap~~+$DHv!6v_Zh1ot50WS$53^)oYH$PKl)wQYe!-A^}oPR7#2
z<jNB3yI|(S8IzA_n&x|mXp8YYL~4+rzQp=K)5w^8R8?C`KQ5C~?W*w(g2+VRjkL6M
zJcHiwF`vs3-l+>9GZ$cj`}Y-&R;yMTh56><@=}2g<O8oJ_ZJ$Nsb7Kp{8R*S24>C+
z6kCAu@?J^UjP#CRs+X5I?f?GWJC&1L?LvP!!sShV{ucwdmk)~m`}eDuh)SGrY@v6x
zXt<>UM?zd|dU^!7@&GO*=A|2AW>P^XV<7zd{uLzn5KhF(%F3-rSuiyYSx@H6Rm)!8
znXfB1B~?Qwqv#fGYHG^<cWYF<8k$1CWofu~btSMr?Fwrw2Jkk0dsgET5gAzk&dUM5
zuOH6WbuGiE^z!*!%{g8aeVQWiQRyF!m3DA&_&4zp@B|FSn}wk&?wD(_FJdtgx2FkM
z2pI?Nr}ZZs3QDZuk>sj65&uqC#_l~WAw;qL1xDb@n-PKL$S!Y>?k7G(Yo5*17jkzI
z^(G5*9bIh<JX~pxonLA;dNt!ExOud%%<gaBSl_XyOkLSUwe^zptd$we6QHBUazxD7
zPF^{G`g6aKH5NLA!0l`sU$9bdfBW;H`2eUq{uwseuO{JTi208ckK@PRc-_5^Zz16O
zce3H8AhMJHDkS$9>^%Ife>u%=ulB0M_6Ixf^z@AL$VG6cBr~$Z=Gq$Z#7A&zSWtip
zZ_y#4ozAo6dL}fk7XOAfQiRO*nd^1J<075hos8?Me;yAt8(W*F`C5pfDN@*#hAjr>
zow@A^Bz2tczgLumgJULH@WZ&4L}~(@xm~tP`O&NWCiY4n0famI3%tUX&h+izjuBPu
zs=bc~x?%Guv2UiaQ@RWB=&2Z8KLi`~;r8f%<fE;BZ_4d`*$@@6;U`t<U~m7QXQo2E
zME248W--*}qej01vfuEf4z_6aNtsp2f6Wg4WdF_%b<7x5tv3g|m*Z8h&x6tjj?smo
z)6EurddpgyPfh2f#xHd4vA&Bgc&=mjr!7WH=VyqJy*#RrA1Em5u0Lh|Uy-HPK{oF~
z?R@RHkWwjkY|Mw7%q4RJk|g2L5;_6vUEts6T|xW7Fw!)1K;x=xHt~u#XXXv<e=hRX
z)fIi6@Z|w}eo*+om&|av)hk`;IwWgjAl;JR=PB&p%|dM|VyJPRn=4%5ke2@$j6fP$
z@4ppX@P&kg<lp__^+w<~xs9p{2Z)J?WPZ<V$K^LKI&7yPPU{XtCX^ou!glJ|K`VIs
zRdXIBbN@d4AjSTFXMq3Xi>KF(jg3LT(NmrT#4=#Qd3jAF`0^V(`Vb(Whk!WAevj}S
z%yTP-Kp<@Z=1FB~O-(L#b}TpW`hoACPxvOH;^Kzhy@n`Y5I)V;8BeS<=!OG$d)qk3
zwG?14b)22Aj)$3X*lohx-~Rii@csXO5cv;)rrrg-iVC8E)bgJfH2=<A|2c1qi+yr(
zAOrPz_6$z4Z_oyB`FiI+WRi{czrF2$ec}Jh=9dfdUvCm%0VqaW6&Bb8<fGEb->Cn2
z1(5?z4g7rl*YN}*%H-e8MF0A%{?C6yn1c%ee*XRU*G41IKmpv5sp)Ak6&7V+K0Glt
z_Th>6pAUas22T}c+^GE6uA>5a=>Ist-xsS{h=Kv&Ox)bkLW2h~Qjv$n8cQsTLS1H7
zRshg)|NE93FVK-)v)T-FunRr&1Kz0${FgeeuC73`xxT(0j2Ql(9q-ZnkFWAn%R=(u
zLnfFXX))pBfXw`a9pKo22@ag~3ql%csi+FUDue)?-I1wHpD+`I4)+E_rJR$S8>n$}
z+Vik=b#*~w+OFXcI>eXHf+-MG55ag7Q<*S+dU^sDSsa3vzd;+(7o)4F@qfM##Ld8m
z|KpYOhLn*R0`4$ON!qLOS^NZNWykf}M}X|7Rbvsrl5G%{)5(;~OpQlcIJfJ1BSQn(
z;NE!HqE!^;k*3?0UJi$?|Jj@IO2}Kmkn56t#62$kp*D>CO+zH^gESbs-k=wj72pkY
zbqOnr2#IWj05QR(fx`2~B*G?U{&-fecg9bQ@ad>Jwjay)WVU5_j$R8_hS}piYcF=m
zz>}bk%Xu7KV^BO^R8qgEG9*auQ%g-CPmD+P<c!f0=UR5sNYlI_LW2nQ`B3=vlFSeJ
z3EOe$+kt^+zU@jx=yLbXHr=cZxDSD8R`V@G?Lp`oX_%!fJGSXh2|CihoZU96enR??
zzcq15@><EP4^!337TrD8?aHf~kj@ug;>;mw=T3ND9rTS~UkK^@TPhv48&D{x>q#_T
z-OS%y`IlP8qljQNB=|1Ml<J`v`vVCAZCNyd)$YROvSr#};~#YL@BX|D<Wg2{59W5<
z#s>bxQ`{+R_#?eW1N$`=nS!^+vr+-{NNje2$kC?Tq{<JPzb(|-=8{ym{)F(sFFfrV
zOo`BLKc)rM8HH?2<->-~QBLRlb=~jq9Fr<PZ>jbac~l)@H6G`04ZDq@$Thj5=2I^>
zO7$IV@=CczmKsV>WdDCCd&{sa+pb&GL=g}rrIGIL6r{U5q??=WQ0eY2>F#c&yBlth
z?(SYEe&62Td+fb_tmU8Qcn&V{y3QPP&N0U@PZ#w(B}gnZ<*BO63luDH&@3npNgSw}
zQ+(Ql-*2J~^}DIfrY(1;rIQDw4_C%ddr<<edFbb{`cH2w5=O;Q@9P&TwZkgll6vz=
zM;bOz++(w27adhJK0xpxd3i{^5{URV@YD%H@G<$=2Yi1l0<6)gF~hc0?xauZ-<u*c
z>kqAAWK{$ycD%YIvB@#(WsPl4Oh}^|6lJWME&f(UczVzO2V2weP#VrfGI1`Q$WOG`
z#hW2erF^%_Yt|gXm-tH(-4S=a`l?w^_5n+9R!J8|ynaFQ=P~b6@-HZ@vQlFtMWN++
zVTy@j_gQl&`r!n!M1~L{340@1SLl_DefDwOS}XONr1!YUR$KlH)35s_X9#i|%`=Am
zIHQs_a#P~uaEeFl=vf8JIz?3>HRLsQqEY##r1_?r1%*#TMN;_hY&j<%44-!NxG1{e
z48904rHW1{a*5N{Z!l>+I>uAvzsyoxO(lIPoke%+nV6uVV!m80baf=-d1e1li8K%$
zMH+=cGzO=5d_r1q>fkY?Sy-5O-Dfn>!>)hQDnOf<Y&fmuE*zG|J>S6O^UGhEk>6SU
z&CXkQf__<YcqE8P%R<dC<wkJb5j5R1vxU@t+lYZua-cx<KEPB?ak%Oo=PfKLt3Cz=
zd!c?LrUK?6U*g5CCh7H_BPJ)Humsf}@|Lq{ByyGV)@FsP9O85=(Le9^CRH+SnJFLb
za-W^tdlpkHUR^!9EWylJyj%eG?gS3F4#3p}61#B``F{nVfcoIyT2lkKtTzCU+JdcY
z&Vps)+XLJBfltnvE})9~VGk}cv;2tdvktj_U|s!BZ<P69frex&n$(HSc|Kb{2mkg+
z#)hv-H2c#@LZAC<*<(-aB9GQoAsIXsf$F<`)`4RcM4V}XB2(ey#TX7!164}<oAY#S
zm8v-vRi?sZt#>ggh;a~0egdeQCqrnJpx{U)-KGaup-4@nXo2RvsgW9-osTI+NJpdZ
z2U2nq{B&kwo`b-Qndw4|w3@zcs&&W0OG{A#AK@D}qGE*)(Z8vO&P5F(jVGy|)z?(>
zm0IR7s{)~d?;Jg|v#P?vZ_Y>Xfk#$p3)h<F;W)ewQ@tn8&HN4mPVPv@?ca0+sCpg|
z7wnB=;C*vdn`9Ug)@WXq5S(@$)ReomacXx~i!%jCmfsz2lpl{zt&2H6Hr0L<48CKx
zDKzvWuV~P^<zh{$9Ur>8(x3jhEsU2F%zb@er;ZdSP|jp}kt1U>7*UBlI1k))abM#i
zis*<H!0)I;@`o2<rEjbu)Ns$8_1FoLLP<}XYK9@{{0Nszyw1zyuj$|UQEBy0q6;Pp
zn<;?h-}O<MF&yPK9K(Zq1?oTK`#QfoE)<f~eD{`~?ACxGU_xT{st2Jv_g<-b{B5$0
zQ+Hd<oZK)3JIS;$frwIw0%dkc1}b4zVfu6;%Rt89O+vOc4$e%4ckxd0??QMQzAn^j
zI4Yeaec#n-8IC_m^d(&~*zI-aq+v~UjNuH6M!r1jE1qA}F-K@mrN1`6F1?)0i>K1H
zMJu;mg}m~<N~xDt9<x3P;>;lL#S=r5cj+Xf8j@~8MM1J!OroS2uUqfw_!&}FYBW)Z
zlRiAzGt;K);RtC(T#rkO*BF7CTw5I2$c+WfD<8gYiICYgsHV5wK!Qz7bXoPJD2)1d
zqQ)vO6XBh-f*sv3<}}G4;W9`zAnjsJj_>5zA<0cBzblxEjOJ>{g~)|^GC?XcZTL$|
zJ1$oPeJn1Ilkf8Ded@TnLu!OvUCR#6uQ~ht8kT|-lj{FI7f1%LobApx2uWE=bziZ&
zM_G?cP+(Ee6q>Oc+8BJzk4(Jx4ck?AURDgo&(F@(Uag*ilvsFjnrys3B-ZOD7e{Fg
z;nHJSj1^#g+h}<lRY?0zQ5h2gWmcP)Fr!`&&fov-Z1LvZ<tW1e`()iFWD(<k{%ude
z#8J9|`g4=_0nsNL$L()wZQw*rIV?(u8MDZp8b5*wyNhmHcq7#>1$+59s@D#qGpCp{
ztO7HSN-J#sg=dp>?<i3h=wK|5i7@=aJdx=5X{_nx4%q$AJWyFyCjlWUIuXa^(Q0Da
zqZ8wJw7Ygd5rbDn%w0ff<@NE6v#z{`lGYF6P|4u`0y_`z41!{O%k#Tvs3<N)ocB$_
z1tp!5t+1uz!&M3oR%81EMga!mhD*FHI^*TQ=swwcKf4nvM*V|T&$-?26mO=SrX0RB
zov6L;B-mC{-+rB|`olBWhO~Iayv4f79&)Kw7p%9#H`nW4xQowAk;jOl?FV2&)al&A
zvqtVQ$)#xyxtuPJciC73`$qD1QF1+9cRVjW7Ab^eEa!@R7=o7WM;#CI9oN>yH(Od(
z%GbBDFUciPxe1flDZA~)kx)8ya&c{EJ+?1au}kcX;F1*Kx`YnqkNxL8M(=oHlr&-r
zwWAVV{KJ3WTVZO+)_#3`{41?%_p+8GP#`S6eO}3TeShIEm*=p@@Tt_C*qFfZZ4js3
z(nE)StZUvOswzc7yVN{KP34R?;=My_6HhBDeOm~77uo^s8%Tp)uP3vQfNr(`yrlsb
zz50r?LqU18O3|Uf%fcwhZgW^pYc0uSej*~RAnNSAuZa?MD7Ly}^v-Wo1JD;XYW3P`
zO;6@-Z$A9hpHWd7W}rLRdG?^I#N9TIYD>x^o<OJb0y07#7xNIHUW?s$q}@|~+BC(_
zqv61?w`j~~wA5j>XpqdP2X8UtQD)ujL2rc>vA)*1bWloVPa8yBcn>i!5vnfK-j{f8
z0&|CH24HTh2X|b5v7`86NlGtZ+m*e3{%q%$XcS2bn{~$XA`#Hvq}3ewAa6Sysy<Z$
zRr(`LAt}~a-#dGoCgk{$V^%wQKPFN?Uu4r$KPQ#seFcIL)MaHhX~~bEq+;N**+IQp
z^t}+RVcEIdG%~wyZz5@2TwIHpU&++Ap{WvXX1G`;+IlNX6;JkJ?`?1TpR5;#TF)Wg
zvSy{GxsF?|<}(=R8=n+KEjZ3iT&fEDJua?3Lgz6tRT`}7Y1)D`_unpMOJT=4?m>C9
z<Cebpi;L$s2jt@g91O5l24H92rcW=9i)*72X3kSGZBt9il~#;_8)iyQS8*66-0*kl
z6RiQHRwXs;^SaBn&yCAVT`f2dYdVGxU)10~`vP4%Jfx*1Ot7D?)+!a;rQg-+Zo2h?
z!!JF&+nb%)vHjc&3FAOnZzX=9NF<BF5YA1Y+HRdthOZHtf?LTHQ;s==i2@CxO)woN
zjA^#@8U=QHKzgMWOJ_!@RVT}EtO-L?%m8Ut^6Y+h`i|S--j(!BN*^t0+~schQA%nx
zOnv{Y0aI+=)%Vzsc-`u8HGys1`A8fED=3D<in9AVr{lpChVMie|Dxl22&)0Rv6(vC
z`#$<7Rvfk-P<;S7>=}P>3bd`+O@}H$4{8@zez@~1UytSZog%qW(R5IJqQS|e=vCy<
zAu6`(c9F1Ai@%v>RpMS+%!aP`A-6|0afGSPP7GZy(sZ648;WI<-Uh)T&5>briK#J+
z`)+oW#!aj1(e3QGYWK%NVc24Wtt@^m{+1u7Z4AN65GZx<PAm;lDvffnD^9vMACrer
z1jW$o=xx&U`gB%QYr5}!1~2az9ut#2njMD=SE?P<+{2#<;wE61I`b2}`Fe`D&cY1K
zq?{l_p?~|~*O(C-Hk5i>Q`d|niI{*sy%Y^5{xBM)-{j2(YGUmZo03!Uh9?4-zO@)i
zq;c@3M7IHo9e83nVsk}j&h-xZz1DX-!>rp-zxSk3t<0~ejAr@|p>Q7WWcS}P3x8A&
z(c3tz-Om+6UR{Dp7WH(ABVvtX8I3WS)!u*zR$ypiBDz=KMZjujT9-3{GAJmZ+5Hwx
z9SCb;(mma#mgkq4XNcDd%at+rXn2DfZdHLVuVCkCC1<p@|FR(0kVwp^&B_$>1nFS}
z6{-{z2!Fa`QY{;3zUPV7eozi>8>4w&C>-b6QN3Zq_W5TzdDvcDdEgDukL`S+(0nsF
z@U&uwj{tkb-(O|(w!nk>Ld%aYI2oQLDTMmTr|w90s}_=$%xV59flF~jTCR9MbwuDc
z6*CVv3qxiT7_QIF&Yo8!W~MJy;8HtGG=E3GeDTxEtFxnQ9)uBhIKmBbgWQm&3jsm=
z!S61hc}!OHtK(jzyTMJa--T++(bUSwi?1egv}q<y^Akx$CgW(2@{PSgK{Kw{FbH@&
z3kwU_PlNFcgz*#9fN4s^(0MJ!4Dk(p$0oLInwf-&?M6)JN1Ov4ZKxG=%;3$XTlV-8
z-ut%~cKVXr9ZfKH%EZD1GNF*btLXjsG5>%?s88-LOcl{hi*e3jd!B)&XIHjtsq%%I
zN+oNGuT<Qx@cQJtyh7Yj+Dtf6xsW*oW~^^g`8LiQ_k_h687rHceow>llziBU1(LVK
zq@;(<aoYRwtTn>{<a5<rTgP;{e>%FuMKRwuk{-4+RL7sM0E)E{w6vsgk8>)rZ=LQg
z2V*bHVh<CEM(T5lZEAKi<U9VL$eET6i4JUp;nrnZ^kyXxa8?dmZi@3n&vG8X`^I`^
z-f;e5(X4oNp^+^y_n_sxrNT#*e&V~jMsi|e;J;U@wz5CZz=BttP6?7l1zTX+_39|S
zw)S#wzhwo9*(D`86l!9TKepk4Su4N3cr}F-njIHM5F%z5PCnL-C;($W&ix^UYg(k)
zSDYYSm$Ua~%mgcja)rXhqOo?1!E7=nAST4kVY;n~wj7XK|Lr7u)s6XWvB5AEUsA1o
zjm^~I_q-mzxiewM;IlA}b^+CdMzf)6%XqQnG7nRVpCpPB>x_xa@^{wM-9Zy;l7H(y
zllXk^GMc;V*Mok?1fqa<Bt{+Bg9=+Mo3i+iLgWrz2Dhn9@=IsiHwN>25|U2nZhf7D
zgOWf21`7a~2Z|<jm<7k1S5WBz3I)i5=^9F|bu)rD4Eif<8*Usc($68}r6warGeoz?
zh7+YPd2bH5&qJHs@}cKn)e$w|MGa&_8WB}%$CLz%dOTZCj|+9k;Y0Zv7lT--SRyIi
z@yBfoZTT|kFQ(330@)jc+c3UJ%W;eEtGepky<?e!n+Kewh5^*t8~9bhomi|K{l;OM
zbe%`X4=L!nsUMpcaqOW^U%MQ(ZDL9hcDP&JM%p?5+VdxRSA@>~vWCk+jLcGS2|l*(
z{oetOg5`!NCyO85&gM}2{XcLp$`y*69&mB9acQ#E;xQ*{CqrIFc&(v&@>%d94N}%O
zwYe?uWANWAvOxR0!5R+tPft;k#9-=E&a@aAC4(Gd*_g%wxE@BO6(||=@S^FdhUS~<
z4T{Km43u%}uLuXP-8H`}Q4H9Qvr5evdNRqS_X<P`V>N~r>Q9`RBo7SO3GE)|_HTaj
zRwPWShAepiHr3MtI8v^s@1AM9U;ir^M{`|D5;~<y{?SVvLwX?ADK*8EirB%^+V5aK
zEq=fT@w}NpoY;28M7`SqE$JwI=^$=;vzCD5(W=j&vvVdTH`R}K;gC}g!!LzHNL}0h
zUPCn{B7ImYEsc_IjV_eBDg>(JoJ(gRHU~hd$Dr9%AZkC%8{yP)O=8g%v{ZFg32oC<
z!(mNre)JE7T-MMg4ylz!Ca-p_YSS2@xC?>AQJo=vui8?UMjjIe`#*i9%8a`1q?jrx
z<{XCNGXbZjD=4}-{|TRreJG`<uEicZiq%6kuV~k1oYl3``lWoKSkzm4NuP!|71j0|
zO1JZB9;|nM*>B#J?4yxJokZb=nv^(Q%j52wg~~)_@8DA&|D3;bkhoHkT>LoX_l&f?
ztHLGrhqdw>kdM5|0WblO_i%M4xgyie(e^M@=k`CHKznYaXcv-O+$XTyzXd|`Pl&gI
zFCFj>sTXpiEKGz~1!=j^+;@4MHVgkuQrCQyRuvWP?GD4!gM0Dpw>I(SswqJ?o$Be@
zi4C#`UV~b0QogElFh4~|p9Hrgwh>lv!+A#~t0$SDhC#xi>wb-C6n3!Iz_;313T~dJ
zdn?cJNXx*woiP(h_gXIAv$d=Mq(iBf41z90?^dDmZHw#Zup5*#29ZgBwQqh%RH|WJ
z)mRT3%41uk249+#-ej7|MnBGt+(f&O9+LXOM7@i5e%+LsIi)ozbvSZ4Q^rU$Ku^3a
z>8$a-pYy^JuXD_`3Bk9nDtay{zR%TFvU<0~$@6W5-WkX3L!jbz44!Dn&;x8eNd;1=
zhkkz5_C)UXXbH{j?5eOG#LW2HcH4TxoyG0vG5EZ$SZZ}3%*3L6X_}9jF_r|l{NM0@
zvM$D!+nY}RbQ1#FhX8y%yR&##q4mL2v7x|iI!+=gV!3lE`L4D@TQI7^@%a{06wrFT
z2|T7AB=d|HDYj{0nw2uRA+w(#dLZ%aAy$kww5nHRj_)6G9a^`^A1h~tEZ$lEE-UtE
z7a7dzER3%y_f{d!7iZqJJRuLeEU)HI_{h;ernUa?TMl5dNRi~N+v(D4*S(udsjZYA
zDwf>ytM<o~WM8J2-${vy4Q4A0UglECPmSCYzm_wWq{=NtIoDlkw7Y5#vLfItSh)rl
z;dfuL&MWVTsxh2h`d$;AofYjgtC{~nR;4(_4NypP=gV8~l9O!Ghm-#Yk`?bE@ky=`
zyM*#ok2t!?KXkAB^!^N`ku@>x_S-ss&(TXHC3~~UFGZ-fO~4H`krgzFEYMS8skUvY
z6+d=sN<h+-F~lNnR}mFAoI6sZN>8?A-qkW+&1Nd>oh>1(C0-i4YDhkGFXBcPha?vC
z%B}zZVVv!M!njBAi?0Adph`+gGDCCP_gSZN<=F-_N)Z@=tsKxocnTT*(hB7C39Oe(
z(O{+vqf`F1y5`Pil5k(t4>2K;woCheQQYosl5!rB+Iv3&M=6U(#?zQ_)w$bHYJ1(c
ze*H=02@(e^CT~{rq6B(Hd6T+prK<Pysrl|Xx`lS#4iV%`$?NJ><qW)0&Q-pkOX{8}
z!#jwRmm@$Ji==2UCPXff3%7O)ME@J>6{A<|*yoO6<(AvjN-#}FUpcJj+y-;kc5z`?
zRiEiqixcNI-keIhT&B18u0i6=*5F;Qn_Z7D*FK3b{-OYK;V1<&zHpG;X<gc_;=Ihv
zRbadRba{Ky_j&Kfk(ig-S+#ZwGos@an;U~P#=_yPos~zI{X>a#Wv^p>RR0>Qt}VCM
zri+qBPMzJn*<nThh2-N!i)+K!HzC>C#*}%9nM67rvq9y?yT~XfQE{EY=@f|wH3tee
z)mbBveYmGhbVC(taJLU>6-<1Nip8KWA|%xHaMX5FJri_eCessrb>(Ev_XN0}@ZBq|
zMxncuaUZ2tG=MZ>lajm!f!gFzQcS(>69l4A>M(p9nxzJ{4p9Emph0JT@(}|+c>4QK
zknP`p{-ZE6xeI#4fJ<aTs)5@0>DgH_vl-eB5Qu+z_Djgs)fJQJgn#?80t-K=#YN~n
z{ov`Gd=7vgXopE<g=lr&X2;I}`j@Qu(+@=$pLql~H#e7-Lax<w-`-yA%PT4hqCRCQ
zp8lQ=>LQ2FeD>3mph`5M1)i~L1lsh^o`JhEO>tw2e1QgNE<e5TU-S<P|MBb{=AC*j
zsN7ip_v69ZdmtBLXviHD#6Cm8TcFv<0*H){KCdfOs1y|^o0^)Ing~oyO<Qdp3;}K6
zJ?zJAszesE3mTG~hkF|T0ASJ#9FZTkEHkr;B8T<ws+R;XL(uT>N=r&Y@rc12a6}u{
z)z@3IY0yj~tFL#5#tb_6`ZQlv8|38VAhCBvW+K9?Ihy31*H}vIy6Wf<r1-?fD%pq?
z7!lQaOb#qq+F^hU*&zK@BSM6pc|B%nUdgGW2ZoU&(Guuk0KLaonm|YRx1{_t0(yhK
zXrS80t#iIN@sK!5%_g`vAc6LDI6jS8o;q+ub-TMd-o}y|+mZv(E3IpZWT5L;ec^wQ
z4-C~W%VnE-&{k<WSK_z*`xrb@Dc7pOUT3-56#^#eUv&8Iv545%*zm~f%MmD8UE<W$
zU|WNhyC471Dww$)G-PCCw4n8mvcp>O8#Cg3{QLwI6z~4Q{cylg=+S4$U%R{c#phL2
zTue<(ZFUTk-dfh>uXh02a6^DxB%oEjd-skx*#Tr6hEuucosKoH`L@&DV`-bJ*{Xia
zBwqB#u`gQeNKREf^^8K-XdE^Mn)X&!bRW7w_342EIfm}r?`M>hg`dCr_Vo7ZEP*iq
z>;4D_#>=|K`&Sqc39;T2!RABiju7GMqa-7%lj9%p6Usc_9{0;DUvBrKq@Yj=7s<*6
zG<j8t8)e1$vYocf7ol<ub6YtuZ?s_tbOKBqs}3wiQ?`c5CF<}#=rcIkw4J+4rwK-6
z+pDS1VKFOiNTV)$3KTSkMA&RIuK(XgCT=>o+Ps$}1Vmh1$^7ZjrQYJiuiX7L1;d>_
zG=8{12H%rcYSJ}me@i`eZ!}YxVh`aqVb~QtiaK?agDKJcRF<P!-0!yCP`y7EOk6l<
zM7x(u$wwbvWwuvI?-YLmQAr^c4?Wbi<1DWwAHB!g02Ykuv%i{}xDJti^4Qqe{OowC
z9enZ*4(_G6L=sELlCXvbh}qmgXDgvlu#D1^ke0lKKAY*og=tVjxx19aXvsQXm>)X=
z36HPDnAHro^W|i5XAwFn%MjPDhdtC_sXSJJ#&4N*@SCT(-gLN?^}FrlQ>uOUm#<8O
zGA<(-Va5HKzP3JQaitM2*EhSs<MsC=jB#xsmSMTDKwbQij_HS*gEF0W4%81RHIN*h
z@BX&gg+(T@J6NbEi3v|jJ5=UpJ1zr$wKX-kOvWD^ke-GJ&!nqnN(zfXGLQ{-X{e$E
zS2jAWsH7!7ZmeSdl<E4OAtM7SeUF~hH)UMF`KLx5&UsPnb!we7&Pe|K<`z_oHYz^4
zrkQgtY&k_Qln32ofKrgZL{N{}<T!F(ujy`6w2Hd<Gtu%Qvf~n~DpBvl>dl$#<Cbe2
zivbsH+A)udwDq^#=a|{Gjc%}kZ-+)xjXTx9_iJ5ldvRh2@<JU}_9f!1h2)xi-0fFK
zx6KZbD+5uO6sWrH##<2%n+^#NkH%Z1X*c8%tHo;)<}{sJVu}S(Ddl{Ni!FSFyO>M&
zlarHy+^PW(i_+JBYH)(g+_Gbb1_i-A(&VdstDFVAL$H&8kvWhaC6VuB(Q;94H?iN7
zG7}FtZNz$t7r(uOQFU=9AIlSXlR&_I_eEcZ75Z|t*EYm-uB^&CaGz*bJl8^@#J_N>
zdVevazerJ;M2V%sD}gFPs|ExNV2A!prJ6!yO5`KL1^<&NLngosT1C)!^TGJbOpn70
zjWrX?2Ae>HV(O#5O2~0@cNd@0MWoEjd59jDl|!1lKV+h!XCaPT2T$Oop5+aPUgsWG
zK!iw&Xa0`)ezFbQCb$;VtXvhYsoK$XzBx(X>tZxK`-{$CcBM5qj`i~f)tJJZPJ@Bh
z4gE0MYUox3!RsrLfxJgz`N)O-{I1YXrq!6;*M5%o*Ct)D&bb7eTAYto0fm0#p2oby
zx84yorHCQxw7Q<C-GcIu_mJF{?HL*q%S~xR<}~`usBbNQ+M^3q;B>3{l(Ho3frfQ-
zX*=jl%M=b%a^X@tx%v?kGdYs}wV%ra$kraQ?%j9%DUXVhPe@(AeJcXu*`Wnw=+6g2
z;ZO-_=_R~}m!g`e^@HQT>Yhiboi>677lMbH@#uBWH4rOT+^siCL<$IMC&y<dyn|kH
zy<79Np23LQcO8>(AJ5&TjKQ$o#>YX569;}hj!B?ROS#4g2+DcAK?G-}-Swv1LnQR+
zX5Mj3<jd|w_8cDz6j`?nyE!}3q!K<zD=|HO?x*{%M{x1+C=8Tw`*LwFl<3%8sVhm3
zNA=r-Sr`-^h*BE+t%j8BQLdJ92<zn}+gPjCJfj)+oZNQ|;2trlP5r1HywpYEcwjn;
zIjoOH^>;^D>GSDJ<=@R>;}|L);uPpo!;l3&ZUrj%O63AEjH9S`Ttn)}9rS0QA@X|o
zGmw;3W>FIn@lQ)zYJxsjpg-1T(3b3qy-pbxBP(PWkId`S+<3i)L3u2f=zO7+w7J8P
z$E+MNF!(Dz6wB0#HqQ=e`~M`M&1IQ2B=PL)?ag=5@z?Ybp}49Y`v?xsCrpxeX#%6!
zdv(ACdfcX#uJ#tB&*z_OboGBpp+To^Vui6MCa;D3S#Mf=`0N?Lx2-yPsa5KKmlfA^
z<*>+2%1g&t%(vq+6!x_;XrN`W*p8emNMH;(_&52=yM|IhdQpC14gGS6Q4XV?6a{mm
zE4{_xu}^kCm!z$SQ%q%vF11*H?nK3V1ATq}Rp8vKy0^uA`IQ-1v*?4CqXmm!)|WpN
z!$6bKe+7+^v|J)f+dfGns3^^&Z*pc*D71eLIYBOu7u(!9+?!=fjx*44gIdn<nocMG
zQlM}2-=aI-afRV>t1TxYajR0r0xqq?)1ol`ZUU3;H=npiT$+ohu6K-|!jBq<LTQL%
zH8EcU8|YX$vW)YxcL#nTRCagVe)I&speD8>*Bd(}^|50!H1qMwx?ZR@K~zVw!FW*m
zuNaFQ=nG|a+@_SS*K*MtP~3c<azh<8t^}bP6#K=r$g8;S@7TFc#)7ds!D;H<pw~Zz
zDVU$HTiiHg{CV4c!uIFET~t3NxnpGSjORku*O8bvOO|5SK4!8oF=B$D$C(~^UH$@6
zYEssz>c43dm}3^r<DoDwKQ<jJeTwVXG#ak~)rhBN8>?6Ee12j-;ZjVSM80s#iMGuL
z!?Zr(aI>+`1odC9AO0k``tT+ms72kLV@xl4Br*8FH0$n>K2(gQ6*^v8H&qU<<1G%V
z2@VkHO`fMf2d`J-*#^mM5<hGF8oU_`b)CI!Nup0^IGu`3Svs$<zMGqzwLs|JZ@gA&
zMpG<(d#4IOG8vVABeefY@qdE5;YYuo5dDLt*06t|$P<!yJU<dMZ&ROIj;MWL3p=bM
zUzN1WGXy1-{G4g{iyu&m0Xb;!VmIL2tejP9>o-jgZO`U65cva08@!ZjZIoG`5+Z{r
zQw>!4ih%6lOmc{g7$$&dkn1|)CJ&%^$oUW5e@T2UvuO-GkytEE*ewLP)eMD{ZW3_b
z1J5Bq5$-^H1%4qQC)xLdX2^OCOI`D`z0mW$)P@qx#)Fyi)iBzw?&kgE<_yVOb@%;0
zIz+<FZkWX5V-Eet_qx>u6R6M*#biC~!86bO3}2~ncoP?${fd&AeE`e>5ci+@gOul&
z#vhwlvz%px=rE;v+oE@|pt(0w$)rdo5z@5k$U@I9E>vid8!=vuD!%kN1=*dLX-kbH
zs&FoAlb5hqzrzWlpVY8hpd!DBV4mFOefyk`#EX<vW9jlt6-8Aow>rC7R^H%GFa@QO
z%8=)j84lzl&g*)N0?G1f@MYi~zBK<f5Y|9%`VtzIc0;mv#B%@HwRPiD=k0B;yJ@p^
z`;dhKLqLD;MM{<{D|GC9Y;L_dv2R90qyGElP4!O{-ds!FeJ-9s^HO`8<!{%L5S+oY
zAXQo^^JM3#`<;gV7Cq{GjrZ-4#6J%+T_vBFSE2)h?BTeS47b<jlI>wP?Dv@C#M-;S
zyn&f`cK%YeDat40ue|J#-^4C%?a$^wrD0UUD4V5FkJMg;TRW%K`1Wj7QTlBB^SU%w
z{rA<KvDaQ#+{6<?d#(z{l$BJ?O(-S`i*989k;xTZsA<z}XS{Hfjcj891OFv))Jorw
z0bItm_bz9ZMzCo-2B6!lQbn+{6Z^fN_%E3rfj5UT0TLuUu}~Y`iHr<TOCQgXlu3{Y
z^YmSVr(%_^;~7|YcJO%fTC|~nIl!*Gu%BDp!ujECaP7f#39&l-mD7Dkc^vy|A)D;t
zH>oKzC-U9MQtxs4QLg?!X<viwwZI_3Q1DjYnKDy*2dQUjsJF(ljUbCe=xAsWPJ=e`
z{zbrlNJv_`)$wo<nMC3fLx)GlTXP>@au$}U-@m`p5K{^jN{5(M!l}HzcGhlj8=aWA
zvtZ07m&^%wOi_0Sy;WXyD>Kd0lNVB>=hC{A#(C?+o#XI+oY*H3F**BZI~5jX8m1A+
z83gQv#KqfNUU2>EpI-fMu9#DsfWV;6P;euFcMV;!ssrp?2m9vhZFw}_l<??i^Tnom
zFR%6n`v)u*85u!O(yw2??&nx2IrEkZ_iOwfNLi8xl_x1XcHh){M#S3+?)*rL1;Q5S
zbK;VT?or;xyDEla!_rp9Mz_am9iOE^-KRFu+`cd+)$*9~*3J$~K-2rqwCV`NWr2J=
zu=T1{RlNrEKM9h;yu4rxM#IyMm!})O86IE00RCUFP^mUMj1C{IAn=0#x{1@{rn&!}
z4U=rg1*rz`5fh-x7pRg#3?1%3I4(ir)jL4yB1fi=6hK8SIX*5AT92=bR3ATKnFZL<
z15gMAC_VmunTaAj#=r<((WL>O9T5SV<hwzkb2v@=$30BJRH1Td@`6T~pCCAx&{d6P
z@}q%*%hXb0?nt7Fly(63Qw_(Y%P}V=PuCm#8BCJaXmr>Iv~NI4lpu*pdl`L4Y=ab-
zoJ^ELlarr6(p2wKsx_F%B<p%gLe9ZKPEWrJZJd~x;9_YcqoizITQ*_vYi|_wVk=p(
z5d}&epq00)s|&;%nL-wL^e^H_eqF%Y8XIphl=|t5xi@GZ{Iy9+4qm&vbNhGx85@I=
zHDGDR#^qfTZEVc+9o?3xP+?&u4aFj#t0zolc0L@f@U5LQf}TY?4xFcQ!?TA5kR}*j
zv0?*g8FX#{U0Kvnd`e2n(hPQsX7tgN+lxMgC3*T%ZAMih_VXO-hw^Pkmscr{c;BFH
z>sB>V`%|@$B@i@D>)(NV0Z@V>qoBArT*7O7I&7cP70X#Gz@`D1Z=If>-=C}p{d%mk
zUZs#=1O#?=je{C9?Xj=>v9xE#^u4YR43M4aDgxez3UNp;N+&(%U1*SEf*BBs;ELQ`
zS>e1eLTlOZ8QA@t{p*(%h|AykF(eFtIRYw4Ecp1}x6sZU9UO$f(ep2dQt)Tp28fUZ
zteOzyU!84_he=r)8O4Q%|8*eE#UD)Rq36IAO`a9{>hww!`nmtBs^vyxKMCT(fk9<m
zVf;nQ)%Cu(w#f%>?ZYo^k(wU12c^~TL;9)67Rw#~<o9ykZ4a1TY<L0wzVqev<6a6Z
zmjV>4+lXE-38R0emZ%RGRW&0|0jZ+ec(Oo%f_m<8$3j@v*qL&FOEL083=#!|D7%VK
z{KDRFApTmrE6DtFO#bHWcBbnv_h$WY^Je{J>e7Pk@bc7zERCo2t~)ODPNXI~l}WC^
zNT@9{t`F)QGR{QbIlY+;Z6wrj*9$T_)tH;#SHysKab~Ypg_9-oPyQ0E2t0)>ET{#Z
z(m`N07@z%`ZqNl510qozq@=mZufcs9EE3_6vt@U)P@G$L?T<=ZN=Q!f)ti%=)5G8D
zcVi{l#6U?f*52FC%q^glx-x%7@N+R*g^sxolE%gPlG!sXDI5O4a1zB@S2}p4pSIHg
z#k4_Feam>Zq+e<6_z~5kr=q5YBtisq$2N1of@Y4z3Ip-RsAqi4-}rtyjAjVJ!+5RI
zjDehlfx-GBnw`Bm&ZiIGg&xx<L;idNBH47N6J-8w;G8#n_M7%uh!CDR$cL|Vg<$zF
z`iF#Mj~Xvgv4PYNpsv?Yf2Km?RB=E1RM=%bCbF}r@#(yOGZmuBQ=!Rcc<;xUAV@G?
z<1m(-!Pq#P*`!-&{3psnmTVYt9Yx_y!f5`-<dLFZ6RPLLstFMfv)h+x-GT}$LguY8
z{)JQx)^3e)ap>yQ(Xp{wjSdLxu#N`{d_?1{(rw^adEy<j5VNu>1G4&x=`$P9C8b(v
z=$zbCnrG0#XZgmJ+QaESIyhJYuMhY9G75*K?Q|iEx3WtU-U+0I*<Gvv)9mT{4!;BE
zqXiN*Ik4?NEPq5pL&L$5%$94UmBRmU#C3-?oK>bw&s89NaKCF|u!U5h(>STVH1qw9
zSIPYyq>X%f_oFD`0v@gHgvkQ=`y@|}==^<OzD%C(;&WG}--L@w5pI(@<Z~Wb3$ywb
zf*;jJ+rQTvaz(-^lLeiRi0t1^nqj}kIvlpC(qL}0nT)3|Y@Wg!D$3HJj|25>6K6s`
zk|=cMQMm@-&hrtsN7O$ksIauuk8N4z!#em3ZkwMtMkc`vdv*ePz~+c>8eM?^thpZe
zXIGN=5h9_IRapwk%1ceoj7&^tK)Kh#c4uo#hAjFwsdlr_?-LW(1<O0NM78MajX#qd
z6hUMKOgiEj*ELf-*r}H$%y?00LTT;cFog-Y@bOl822peQd&HLnX&r*b32GJc*d^J8
z6^q%OT^Uyfaf=1Zw}*`<sTjuyV&~M(G2xi!-ae$F1<s!f4@mf(xE?_82u*$kDCs^=
zFQuR$ARvHGx(vDofSmLPQtaozyaP6f8j97&$5W|Nq!~&G$o1n&cTYGQY@xrG=7odi
z85X+VzI`(_-5DyQ!P@0o*}+`d(X=4$*$s=FK8z3T`k*M0B<ydx-P}Q)lW&nT%S!l`
zpifWmVRz}EQkP%TUJ%@;5b2*_!kb9&aoIq<%*PjI<Hroakj?z0f0_tBm($TR)2I7I
zUx>G?680*v3i0UVWMcBq+8}RIU!NFIV6wiTBwADSSkST}I{h{A&_p(NaCf?6?ItIO
z<eN!C{^sWQ&SW94+tptj?LJk@!^C3ATv-zok_r5$c?o{_0rm9ieD_mr8&$6xS4_g~
z<>vAxXiN7YUNf{{1K&<qT>JaHhS6wfVtU#S^RC$$B%;AVZ&Y%*ivD2(9}p;bqmxoX
z*$|nZxC0C?pFIc=5fRB452T8`$Li^{pn^TV15JW5UN7;4g}Z?ZsEw_ybRttP5c3`z
zixeO@Z@L1>gY{_Pc(%GKaazewhxHdjH?QVCc(}FRCfn-kU{Cg%xZ|}lmll&DoNCsB
z{3yt1mdS%AK+DCZ-r?aeDmFGYi3A28ur+m`W=Fs4_xg6%=`F9rV!v^Z`8+hX`QbQB
zRh!;bYs;?nHKTy_2gO!yAdg2uL2=^Qqk}Waq6*Jn9uG!e#tZvZh)|Gm25~lNo0kaD
za-B14+GDNjYJzMo{SD)K;OZ(NL{YY0Ux+<E9)krsViT32TSdLG*@Y`9J}hwBZyMj2
zM;<YoohooQXv8KTGSID6EYfmU;83^tDkhBnPuObvxXD#VdZ|4Xy0zX26$$Yv@YxC~
zazRO}id~=S&4D^jsGE`G<mJ_YX4#9A%$f@^WQ+IVJ!rT<on~Ws`3Kw6y_P^P^9lGI
ztv|+Y`Zq4ds%b>$Tyw)Dzyo#9{6N>up95F>qEb>VZr3L#FO{tqk|*lw>$>ZU=yD4R
z-kBcNqT>P~l<I2c=V7h)0gwAHDHrM7xbz;X)%1Q8EvAqzJ2*WoC*5M^4G-_DAt71W
zZibYpZm6JDvMh)$&-ndmag?|Eq_CBm%^*x#+Y|g#9IIr_@{7LjU6wO*9BY<B7?vKC
z?c!N6(TIITrGZF(dsP>nB@fJ2eG(d8%V~*t2))*;?#}ZK$*_j+O3KRGrWwSq*{#-&
zBBH)0zgzSEk_^7^`@pAP`c8P`YH{?s8L_@7mwpoiZ&c5DRBtt7oIdiEcUgI9V@~Nd
zFyK{l-OcUlh%4p1Hg!9{yN#z8Vti|1mUeT=M#GfczZ3!%YZxZ;&S<A*5AS;$bpe5_
z^z>yvj|*U^wY$CTY!EbwhlNF{R8&@8zHN~Yq^K?Ds!aN#$@j<7xLv0xmm?ArpM=r~
zBI2W?tM=t!8jKjf^>b)PFPlAB>zBon6`J51vTXeM!|y~@U_HU$vAdd24`b6^8K_hc
za5+M08L@tfU80>#jue25!L%Tw3fZ3Op%KpI-mz4bR}Cb4Wovs{v)#jYZsq9j&Exst
zrlFAxjNMXFa2B4@hhGfgWr%P#qZ>Z>9EA*<kTkmlUgdl_d@=p(wb*ehkEkNr8>Lr`
zW=Q4xX)B6&X$aHs9?}!ErCb~zUcq?%AtNU@xeI)@#S6+$K=A9uX1crs>+{WYWngdh
z{u&Dvb&@9K-a<$@8%WZJUsQo=Q+m2ikV{d1KA*mC`Xqq~aR}}~GZu3s+Nt$PlOZQ_
zDORDPc!l$dGg_wor#y5>zidh~SDw3VgT~0n;W2fM?O6pDO_vk2Y)tTE-!<EiJFwr$
zY84!+Aj~nY1ro7&z*9+?^l1%)MQmmAD@LurVt6e|eY2#rkhnPKb761Tpv7DVWkI{{
z_5>=5!0V;PNBmnu6(Jri8pdJ054ZVlagVCmOBFh1KKkT>>_OLbttzGyQ&VDMVij@a
zDpV)Gg;KkLHv~S{v+tE#I<9ImG9n@pE{ju9_Z9NJbIm20U+a1^?1_C;Di#a!FzuJS
zLgflI_8!&9H%V9|ylWfaGdbtwmUSd-KYA!eG>ix~ZtOX-r%>dvp`+$)bD2`|N#G7s
z(b5)AS~bnc>M_x6wNR1*oqvakPx~NrC^zHR*2en>wR+$~vug0^v!h!$=VonDKKkf#
zU)}bl4_BpEb=AZ5Y59gvuEc3WG0H|dEYo{e@;|rluVnUZ4gCyx-A$t)kWSAx%i~bk
zEW(9L@8jA9hZrr*3`$T3%;liv`2+4Vx}#Z;(FP6HE{mz47$syW=bCxaFf4?Ng?FYc
zOmx|(ji86$x^W%BTwLh27fNf@GBWzJXf^v;cPR|zxawjP-OQ@<FJZHW&h$96rKkG_
zpMc;AwMf+h(_JB;MeB$2`)QuSF<u6p3JT9<hp!c@6q#&aGSF)L5-mi)*kp?0an-$=
ztaa2DOU&aaQ+a=wW~j~n7|}aeSYQ@V#DW@W&bitxuv84cl;-%B3avn|+fqS3H@^iI
z5oo);1)!iBl%~$PPJss3>FMb`lG&}nv`T8a(va3W-ftcA4(2sV`Ydz}+h~&v71U4f
z!mwKVJd;^UzVJL<yvCNO)?EG*y@F21vlG-1A%JKL^;`@LKv?h%F$9qQW@cvaG21&j
zUYV4B0dYv~5rF^TB4fKh#7^}q^|bVK5<C@D_o$lJ@tclWYo;V8|AF}wuOvLb<EcVL
zRiQ$Ydi%_Zj0(+cB2P9@O;0Fkcp1Mo=yVav<G6LS+^)}nLGctk>BBtBc=7D}kJr4s
zysUVCR@Yt**|_f75rev(va)g>A9y4j<JsnCeEi?K<$t*V#0#vcVTln#aBEwms)YuN
za&a|*MsgW1@E|@T+xOebnnaD_l=WYWRb5oHWG!h&u*BJRr`pP^kimibHpiu-5Cg25
z1R8Zt*-)&^-@9B_CnZjW2U_d3gQs@ws%dIowhv<aV_=hcV1p18Fxq<BDrB))J|M$*
z8PIL<I_IX{RbRX^+Lo$#r2V0)9(_$f9rdfi?QmAlJ!#vx^0}1$FxF-H$^D9syLHtP
zfgkH@l%Hi*7OM2Md)~x9mQ8i<peUkNkxhkX!=aKlR_x%t8CKum!Lwf9Ip@}pzec1H
z38**ky*QRLePF1dd6XXsn24)K`RJUW+7HhxXswz~ZuGO&Dk}O-(D7=buOxh^N+)-+
zH`M}1oOenQ<BILn@?Ufx`}k#&ObtUOMAxy%&ExFoARUuwvY99~K}K}UnygCl{(*OO
z;m7ImH0eaFM)f}`zMR4yb}@h>QyT2UAJZ|-F9$c*>{~2eG!t|fw86K&*@cFzKbr04
zE+Qh5lNI2LAWWC55)pQ^OO^HoO85uRl<I+&Z>-rF<T}hZm|9!5QR4a5Ojd^OmU0Tg
zHJx`0Gnppa3C13(2)5|V#5P#4qxG|B@e}AJ8ykJMi?0*k8|EtH%$Kl|)-7dIAkRyC
zs>Cdpwco$5_)<uNtoYLfgkSnyrC-K+Mgx=YE;A6dE+@n1>tf>P8Qlw6r_HU6Q3`)l
zNI8xl^Plew!wt|bvZ=BI!wLF5Iq~9qVp^9@^$5fUEw?XW;?xU!>%{?Cx7#jnowk%W
zW~v4=ImJuYdL~a%+tFyJOp=DOlndXQ1NF}scN3T#lRjQb)k_8E-L~%42y>{)Q!d=&
zE!9obOg}Y(tJv{why3k&?)76XJ!YsR(Yndr2}c!P5Eo&5jLzvAZgk?U*3vS&Q~=%S
zT<qjS2=y)PvKf3`)r-^E;Bb!CDgJMcHMc6)FgvHVjj`UZ78aZd&12bPJBtt;<=8HC
z$_}9@*g3Y9!j>r}yO_Sh#TJ{aAPyU`ux?@)!eNGaA<Edn*<$@?DH;)G2jmR(yJ%OL
zeMDu9n<UYB;R4A@lwEFR)K_&*-IITm^h&>fU{J5wU76DgVul@Dk_clVqWikzTDO#(
z7X>jk?jk0<SWr{3Jn<1G`T6ei=z3R_kJXaiQ&a60oN;pK>_9BD(B$pP%x}))KfxB1
z^&@#Z=8Z<{_PWlw4ufoyMPitARV$Ut&+`?5wO5J!Y`}u<co@&Yo!+O@zu!K)t!m<g
zEZ6d+V(Q^LaupimS~}+%%J+qC!`9+`kB`Ok)JYm&-BctrG_ORUR1fz1$O-=81bR9h
z`X&jkrSy&yDW(fy0+A5@^s!Chq9rcxxaMXiZk?s<0%zSTU`r4~=f1%1?9C8#z4P0(
z55{K$+6?Cr&<Pie{M$X(0Q|0Bc&VO5uTNVyI@Fwb3$ubz*497Te@QRdcI9P?`}oZ$
zZ}7c;C=El>9r?vV0=gm-v)pNrPk23rbLLR7HP+<NH?@K8-czQTe&$j~LeFZV0Isc}
z!J6q$5<BU~{4peakDS-NusdLVfHObjCJ~#-1;aJLucwqnZFbO1zB%Fpr;oBiZb`Ut
zdaJdWlvglzK2dJ4v?L{o_3`ClT+vrDP6s7deF^;vBXI&*2eEam^5YxaR09<Z@`I)Y
z3RS(#<egcrUo;P&09^=BAkg}Kx~$}iD^&(yU<i46wdLe0lwZTWdk4@+nT8Y74Pvo1
zO!ET>69M+c{oVo^#bxuEsL;{7Q^o-29hh<6;omwfUw^8S-l|z8469rAHoWc*yrvBg
zBFlmk^Pfc7`0J<An9?M%){XaWY97KzVHoc1Jz}6O7QH7`wEv6#1CiNfL+JZulAx&=
z6y08z!VJ?1tZIQx!}%C$cRJ?A#K6G#Rw`YO`7`RvU4v^Y)e<kaC>sY-dV#<9V3hC~
zJ$Sm}npEOiBK(v!*dOkfL$^tm8~f*rT8g;4wESr=AxV<cMy}P;w_I;C!_19vw~11+
z*iJ-8d!)FxyBLn<@j+{OY~&;5%F8#`3%`Grj<l<*t6wVVG1a`qfL}X2*oF9X7(5PO
zYO}lp68x;saK<Ml);g>q%3;WBZMrw&dhj-)h;}Av{_B(Q#<pr!ZL3g13eH&KJeu33
zdjp(tM<S2OS^IG4@#`UU4#V9(bM+p*RWG^5g#w#3c^RW^8pj<Ou&$m0+Aw|%4QwzU
zv0Y!Rm<ho3>FwwD72$zpZ>jYWs3m#|daO{@HpeB&nHA2=+5h!g$1q*r%l;ka=kyeG
zB|Lw|`{BP#Td^fGY+~n=ihb#@K6Q3>%6PRRh*($xLQCIINqt!M_cpgnGb7Xaj(FTI
z-pk?d{XR8iTxU>!fc$$84|)X(ZjPJI&LHVj-eGF0qLRYG+;ObA5YI57%ws3FuP<ME
z{Xu%`eRJc2i;GM2F{qOOE-xo_iW<sTq5=D(lEX?wLINHELDPB*6FoJ_YvKK|s`|;b
zyC^Jt*tPm+@P{N<fByvi&9y|XY?vztr~wHpE2IBcj16;yDnYW_=7WNlc2!gajLv$$
zG2sWWVOx&;;sZ)^J*&2T(LPgCZdMr{-33ON7#R6G2?%&*oqU$)n=K)&f8E!5J~CjC
zls&qCF-7)TKe-`Is$6iu5|56K<^p0T{~dDkCL-dAX=s)%G+s93`1)Fcad!hUoAWg|
zshn}IHa9N`X=x8ZdK=_?Q!Lk4H#Z62HC%UFP4QV&V-yq@Hv>g<{RE**L1AGKM+e+5
z!vB@YkxT;VVFxOoc9OMOHbt~f-F5JL=y)R;$;o?%hTfZEgTd7oas=8I?S8A9gX8E(
zm{zT4(^FHoH;X*ZCpubM>|Ud646wLi(-RX509S@!rG5}?r4%9D0NDdDY9jK!z2>+P
zFcA?ceei%4=A!2QjUan9XUzLJ)$FiH`WM3WbG(2JY}HS~_BLveiP{y*E&-tI3|uEF
zl=Bq|s;lk$m5>5}y0`%Z0x>k~8XQFU{O2Q3h8GkPYQSj@S#N7*+ub_!3{ckDGbYq~
z&SSSdAKh(TOV2RAFr*bhYV$ZUvQr!Z?wDxwbwx!*`<V2yvT%295)zUcjW7U=!5V%F
zm+ZP@NWw|GK^!*(<m?_F?m?!|7X~I}k!|cPW=Pv|oq!<h+?>tE#tsV$VMk}c{q7eB
zhtbZUutRA7P|ufC+X-LaE~8IqL4rU<)fWK>0!;cMD6>L*FL_%T_k<)s-W{+F)G+C}
z^)C;gpm=?o$?M5zWH@0G{2)=ka&^_CuBbqhAp4%g&{u?`l82R?LTFys!*Xx5>P&*E
zwaB+VC{fOqHiz`1=PAaJ0wF8qQQ%sCsj1(tq|Egv<U2MVt`uKs>>IUin~$t8gSv7I
z5E#Seo&w=xQ8U%h6!W=3<*M5hc;o0f5W|-+;9IoVg8bFnckc+=+W-8~Y4e8lpYbg-
z3G(yn1_ezQ^_|kzo6~t%jual?=_;9NQqZvKCgEf&xDd*%Q<Ku)aZIaVzY%Ls!qp&b
ziyjiYtz*3W$$1Gc%?YKt@b7@<CD-ws=-P%f5zEp%(3ov&!HwwVWBvTZA^1M9_3;yY
zS5<)J30PNPqH#IwUYo#qTpj%o%LZ|MC6<pd6XxvPSw54mpI^|Iza{zUquU7nsuVGc
zRj{28Q;FQ)VF^Bi!yYjEb3q12uPYgcvxZ;dPWIKSS7+%Ty_no}_4MX*@Rya9lnl%5
z8o&Ebum&yhl&v7NcQbBi7c!&u>_DB7Ed4g|TIx%G+}tgc5PyEo>bUqe$sp8#-mE<Z
zCDi@Xo-6BXKR%xWRSH`>8*#=Gz^S3E#|k%0{*QbGH=Jm0%&P_|hZ=Zt`*@PzA`kcx
zTUKfww@uC5)DF!f=eP(ekk%q0nYy^yS$uqA6M<33N6P_BNy`N`|2!ZA3BrxDdN!Mr
z{r!DpB&3#Zzp*hnkLM?1Y5oQ1v84t1lqLIHY}y2}?$Zp}t41}J{GF-0I>w`|oIDYK
zH5LV}2;Q{VZXAK+7XH7Kcyj};c9YX+hQyp!<cE-dhocKZ27wUE^Yb@uq(hQVf=pCY
z@S|8@4RVt+gPDIyf0L_FDd~=snTR`{S+!OSp&MLT?XCY&%P5*Jwlz<9^9GJt@+_$9
zNF7f)gg%@zRk2D=vdJ&DUNCmpw$8&kM}$a9f5hsmL36e`;if}kl#(*ePNgAejpO6f
zmb_C`RV60_5Zry)ifv5+e^3`xh;($87Zx=9^A3q<X%Rn`1B(eDQS4s%D=SMuO=$yX
ziB0zty6lT<b%9;Coq!4sjzsaM+KX0Fo<>+HWx^Lf(vE<?oWi)>Nss6knqJ%%1qFHe
z{+=G=IDd`E4^scW`q^Lf%+Zda6i#(HtroX{y!Tn@Q3(mxgDWC%Sp7cp&gY7ySvJZ^
zW4b(2skwi&N}QXqt_z80@5HHtM(5UCj%%<dRa<Z7SabVdBi!RI>0Sz^$3d;=yLNqJ
zXd=-q<7}$~!i$Q`IYUM@u_BTh1r^R@{GKfl0p+)<0Du3t?DdB!bGo3;BqDzQcb|4o
zk*9w3^gMG|wXk2%kR0x`X7F0SR!!wO^!lPC)o<#u+erD3YMH@{6rpt4#2{cJ+`u#{
zS5{Vbl;-)sS0cQ{{Zxhlc~UkkaE$JQRn+8i9(doC2|di&*Z>CT>)p7cw`S9k)vlVt
zepXR}yJGLk^_vxCnRnVkaAcM0XCABNh9u47-$Lv$gdA(K+u9#}D_4|J^WikB%!WMT
z#`Av$+RFY&9LlUg>R3A-I5`<pL+B2|VfO9T7lp@6iHaHz$%|()K_Z|4*9Hkb{<F`c
ztr;sInQ(7;2V@UGemAMblvY-xig@%lt#Iv)Ne|1*B?0{~4JWQZp-kHdg#o2B*o6fy
zN<qZ0L65hqr-vvLX8R6**xA<aR+UZmE<b643!^+EAT}CQmH>(%G&%tU;}6RJX7l?}
z?AX}|=f}83!`AFEdVZUBVJUSZ{i0)WuKF6i_fsc-t3D##TY^RHERJk}3<2#pK+2E#
zhhy0cL_=(?{FpC)u>ZiZ6*ExGWAK#FOilUCERObHxAe@{s@>wxL*wRhwx}RlpZ1Qq
zMlzY9D<k4TmMQGjo#h2t;r!`#4X7Vcm+rV;Uae<j5CUG{SRiK9CdAk?jx;oWKZ>Ps
z_a`?zHAgnyN~NaNlu~p_0)B<V{v6npXFK0IJ33;SpX)q@PYf@wxbxj>9ibA^iks?%
zHTUC@SBDSF{nO1_AC=QZcUC*#65xm*OD-<nCr!@(q&@In5w^Jf$dE~B#{q3?<9AIy
zFOwy?*Zt7XIMFvN{^PMb6ne&UI=Z?Q1S+s})LmhJdm;#d?1VcRoAC(##S6@k*y!jz
z3u^&vo8|gi&7mu6zYD?@+v0>z1a$9lx!y)RfiSSD)O1GSSO!j_FP+N!__8f-dd`Q1
zN+6ol&5b&@zAs-HRO+1YJBJax%MYY%lb`T*c9(D>7f<dVA~6nS)m<ay5|dC?JvTph
z8_dFa4~y4tg7D^ESv%g7?q`qBEN|?a>P14xrj`f$WT#QuTb{+a{6A@KS9f=JulOR?
z-?ZQM<HM91w;1<_6*G|7F1_HEmmhCo_W1!>+1Z&J3)wv1o!+a(z~PlS=mFQo&7B2|
zT)N`q<d3jVyk{df+&t4#A3CkA?Uyv4u9v4YOu$Ir@OT2%>-Ga*tKxpu?QLswXSbA^
zFO~3DNZgk5`Vn7ynR^(B`)|GlJKD-`hx=ZxBj;WJuioA=tje}q8$}TiRJx?4Lt46|
zLAo2HySqiY1?iM-=`Il|>24+=-QDnA=;K=Nx7NGX-aq!S=Z|^7!DQa|eZ{!OILA58
z;RVYjAQo=TJW+p9)6$<Wo#^tb9BcNAI~MF^`9`|a4p27Nee>p`3&UPHwuEUJ3mG{m
zGBR-E7XCRdYq6GSqc(gb!AI3b_w&8n%E%$?=WwYh`sO8O)bF;ow_iQjB#@?i>+bSn
zLhRT|n(t=FtJSqpswKCnKC=r^OHJ(h1~$C25KZHH9eIpJwa;&^p>qwUmAbxKvKGfD
zf1bNskPS9An9np~3zSGT1FcEf(S!`VAJTA+B0MD@-}wyTfB2qd4GWwmK)u+Yvi~*i
zz_G#I<c50x`qeUkQrYnk0o?Q(c5-;qNVeC}MwL|5=V0uLKQTVOEWn1I6PTt|$pBYU
zyZ+sSH1N}cl+>%XQ)Wy{ZhT7VZz{$*)0ITBbHu1oPih-#`-^wq)Z_Z{ttWsMs?Hlj
zuL4<kpFCoM1HFMEjmMDIi<`@1`txj3C&7zm8Ot#;oGyyj1y)8Draiu!E7$onvv|SG
zp8ZO~*ZmUYWNakmb=e^*isIr5pSJ^>DYhxO-C&}`h@;3-ng(oTDOnjdcn8pc)nV2B
z{-iGOar5%`mP)oF#)a8+u|@S#aq?!pv$LV7qBS?MyQKa8DMZP0RR+PQ2F$?MfVx6@
zUORHd72X=5;^}s?JV0g!Ev>5EwSt&)3jd+AB&fuYMX^aL_(ql=<rDs;iv00Z>HPD$
zY$s~>p5|@}TNL%&IQHjeo{urcp?#kw?~i(&yb1;9cu>$B{6hH6%9kWmbo9R7-p|p|
zSF34bMqM|!_f!XRuCqUy4#ce*CdhRSnOFU<-j8~TGT@9_kCxx+(|F|R`CWIIlT<ye
z`WWc5=4Vkg9+m$Lx=g*gD!?1)gC85n4K!G5Ho4eN6sz$$fR6Df&oEyv)-jLj1O<lx
z)xXViuCFJ4c`Dm3^tfNSF`aDcHIHMC`(S^@PK`CdFgM1(lzYarro<KFw|UaqXJYqu
z*pLq7QXjSkvY4+!AktRFEG!O9zJW>Bn7+BmbGeSHMrASU7ia5H(P9s=*UD}2$1z3O
zd{thL<4KrmYZ;e*jkZs?S-G#ZVlWR2{J78O6rf=;=*z9RYA=G#G=7HK8bZRqh|TQR
z+oi$z-la2^e%rGESl~$^CDv%cN>>rLOJ_+zmge2^#5H%rWt4>k&!$yh&8eSUve3)T
zvbhdtu~Bx@x#)o7GnrFWmqW{4(+)!Jr>cZ5N59~o$mB_;a$<ZPg`f`W+E<=4Wak#w
zS6S$P{Or;7+|ZivdAf5Hc^<o(8YS26bSv3S*PtaoOghqys>(dFdHK~a1Wsi^r`%Te
ztJ7&oqS)Q{Ftljtf{zuft4p6hjYq<_AP`t<^B$~7HRM*jiYqQQBOZ)D+?b{*oT@$u
z|Ll+J@3%A+n}2FF=p^!aHJdJVK6`eJB4X<%jFH)QXk}NSHhe`u(DBjlK*EczI3Oez
zwxpGdb!fZXW2RT&r>Gdq0JInlM}tvrZ_|{z;L@57jX8v?)D{^NA;HV#_|y_<E5{|t
zT66I0m9atGhFQ&hN5gxw7)Z<78QfovL@X6+okHbN@Cl}G__>f5w5SYpP4d1zk|u9m
zB~(c>XN?t<8NkJvm(U2b(>z=P`~$l!SLow+W~>Tn>qXOCHx-N1KieF`Xq??Wo^HE!
z)%$Rf_u{y)pV$)@Vx)RqKgc@6$Y;K#hOs>3_|gbYrNjN=zV!VLtGHEiiM4|!TPT(m
z%2}N=%K}8?%Oy<5?1o@Fc`21NFY(-)2{GFcvwr=eH3)f<cC88qH!d&p1y8G&>vuV1
z?B9pW2OVe}8kaYlMbZi^)_9YgPfMBvhe;2IX!eMy=B(u_>eU0Jlge35U`a-yeN`-I
z-~(nG9L#jI%N-}|xu-Gxk8Wh8GwdC$;*e&&KIG1*@M}xcdFU$-9#3t)Y$Vuv1_e9E
z^Q0Xg)$v!S4p-faw1r5ERDv}5L!3(qm*YF_ZNsnjY+`W3upE}jymP1pG`~)iQ~6wC
z&dr+f98=sf{ioinyGIPrYwN)(tc$IZV`U5{Q2m%_3=e8}^Eok9d6J=pN=`9(Y9I&5
zvKYgA^;%3KNadgl)2+3C82gRUACUC)_HG{&3ABC!N4~1M`rQ~8ia5q)vP=dElkXR7
zR%zvpn}yHjGPkreeS-AYY$mz317CcvA7>d#i(9JJdsgl{p&}}{K<uxRc+luEXJ25L
z8q@w;vPFh$YVy3dp5|=D*mr#MwH>_X8E;A-uKlHsru&iVJdQ(sMm+p*&{I3VpkV2B
z`k~8Bg<)@9o!f0b_3PJ74x1x{?(0A^*4D}CZWqZuX+0vP=Uyz)wL0Wcharu@eEgnN
zle1Y%97BZsS&!FRTb}REk<4=`>lF6<(-oe6nB=iLr>K-shCA666{oA@IZAz2q}h+^
zcICDeVrtDwqkhT~gHNS-ciF?fosw}mo_zGnz@%FV)(TjjG7Sm3g1_qO+WUmn^Nobu
zY0w4Lqwo>1&>!|!KDSP!Exw)?Kf5PmFb`$Y$u>k)jN9jNFSg!~V8dM$I%~2eq!}DG
z?(FDT1<iz7&Z%ACALIQ33YIgj)@h81eS`+ypCsJg2gW0FFDLuTv(*+4Ake4xWP3=4
zOHmb&i{+OF6&T(#ZMdd)7Sp?=qF+US#Kdjjq&%f}@`?=7e3MbV9n)Uzpfu4~HKmH)
zx-+7|;&CsPKN34#${jU@64S84)+n~UwRLfM`GPie&5{d%H?2}K++|Z@>gu3dT|=#5
zlzc_Ih)deFV5^ob<Ki?Z&f7o7xbVquhij3J!qaVkS8bYX_hzmuUEDk6>PQn_Nl4&I
zJ&f?0@y%+?<f{81eFsmn6=zD`Ar|z>=W9`oce;lLTPxp`X-sxbRr!<!we)@-cVV2O
zR`~Lg!c<v!B-or31)xWg(y-(kIxf$YgjEl{d035ay?xf~k2oQ$KH4bDOu7LO-_yO$
ze))_R_ERqxF@8B1ezUrWe&UxIYhN#w)S}=ShgTh6%L(g~;JOQpChz(4ven0s;*d%F
z+x$h&M3g}z({-=w=@v#Nqmebj;_Gk}=V1yqE>9xKzOc>%#EELWxmWnU@rp0Gvb2a>
z%Ul}Ktr(>&nfud{Z<}95J$WLW!!Ds;mLMslpOBb%%C8UQeT4?^i(A6w;czXK7M$CR
zYlINK%FHyz`VkEQK#U)BW{AJ=1vlS75I)P_a2}3%U7FtAn}!AGqlvrXG&Ujbc4bCQ
z2pUdt*TdzkwM;s@L?{sVHPska^F;@rk$HqaidTNW5RD?gKB-;?hiLYNm#@}bT<lwa
zD%*FXM18fs85;_JeL~pl1*tY&PB!;n6Nx@H?|>rYs*-4UwblUtxT=O6&Q^10d%J%3
zr&oA5!R*nlveeJMcGrNC3=(4lNXcNz>RBP9$H$K!85tS>$;W-*!+s;(Wnt8dleOh(
zYq6Mm@xHSrR?|e$v1Y#Vh_k#&q0#8geZo((Z$lx20dU#My0PaC(uNdvp;CNTb;l8$
z58Qx@&R5XG*#F55SX+v7He=1%Nm%d}gukT@5+aUBc2-~rbevNC!~XET_X^s8zyf|A
z@UBUa3ORarcb%x8!!|diApB+8%m`tKx80O-lq)X`Qd5z@xVj4A%jTP3Eg<z9CN(0B
z8^H;QVkJu5p}X((g;FQs6u3U#|M$rNT)xTCXwsuETx{39ECDSoUY3@Y-&SA|N3qd_
zY&A7O5at7QM*xa`S|DKL1g~<4M@B~h^wDf%%)mzP<2$evzQXm4uSH-C#qXgm5BMl1
zNZKyEu)AmtyBFOgR_qyScjbC_&wddjY<@)5WAW#>cr{@~L63HM82It@Oyg6?OVU@b
z9H!$@$r{njxlEov`R}RvQxRH_ano^M^uE7!e)sO@8BL<e8;Wovm$CVZ+|2Bj76O5D
zF8F89Dnainf@fgc0+M0xx%+p$a*;4>MuR$^g|T`(hsQJ_$0U*afJGG@6XR#|;79{+
zYS@9(202cJc~RUPi8AHD<vi$Tgdu!k@13)^TGt7Fuuiq}KVhbR&DXxWr=LV-K~4h5
zjYm0UCnc!^f@K#sAUB3$(lXVkQL_k@*s^dPIZgSqm1>X6$&uQU8D=G}?B*UnZ)zK>
zB<L{YuCu*9v&8#qd(i<{Tuh1FMt#`s?(P8m2l9hYk90m}W$DYyM|+|HW!z@k0U08j
zW7LUsw<+FtI(rQsyrXv4uC{wVF82@<x~$C15#lO9hgxX$j!bz32M717!8ac~)&nMg
z3ms>iA6ps!T_*j*OB4($Dk@zm&<>|+c1Fow^eMmQ#4vOIo3?3CzJAms@im$5mMwug
zB-*}x*6%!YD9m@sxRG$3%|7E7WPD*kx3Xm&+07)|hyPrQV|Zl54q%NX!7pCGOV!oA
zp%Hryt8ZcwRCQ)yVFB>GS4`fBK!BEeNJt{2smVjeuM5Pp1?cY<KTGpE71FmP<G6P@
z*q5<8-*erhW^7v#`>n5eT^_BfFtO8YdrMU{Lrs5&Mj=He1<wH-vM==S4?@;LYi>50
z84~+`{W7S@?*=DzG75MiOoZlhBg7I?sA24^J37_O%W-!o?RwKR_%V{)rsOpl4rFfo
zX5RR|VZ2Ec`F=~PN&YT-whO;fv6-b$2jwXfHbdU0{+DnhE?b#el6et?rALb|YjY#)
zE<-i;k2lstD`-z~X0LddQ3;wA_Xf;Ua)YT_c<FZ+@Zq;{HMo;oo19sQKlS|Do-Fk~
zM&~gB9hs_yT{gD2XIWm6kU*Euuz}P8;D<T`2nnz?aImpYLJtm(kKKX$|MkE+^4*U9
zcL}v+O(#}<`z90;z9l)mhM$GTMeF7xMVT#JTf{+{#j+HMUmusE87oWX7kzz8y2m!}
zHT)w2pG#dHRJ7nFVRJfAKEVJZ^}w_20AMUAA{P}ERVZl29a>xKPZlF-Or`o&dM}7@
z6n|7+-ebiiR5x0&68IFZi7Ql~f9GdiTLiNFip=+IO%Gge)Lb?)Ov&)0S&G4dxBZY6
zBP!oUH5TENDKwIkBMIA={z+Don4Ic0+ZENr0ulVB^fWZ)&g?v6Mo7sv&d#FBC^?`E
z8~tga)mxk)P=yr|7ACYzK|+!>uMF<#>@hJmV2FRh(S=hg^@GF0ijRsK27}WzXonTM
zJDmR`=JmEO<HC;Ns36Wlyv&!i>kKdkID)m?-?qinZ}>_Hy0V5h;^14|T`nxr*vcKZ
zl?`5k5(dC$S_Z<v4_u5JrcbO=K$5uE45SGFs}t1ZgOpIX$ad}`=$T8eX>zMf%EF=~
zDr$VV7s|fZ+4D&JUunXgqfK}1iG9RTrW#zs+$VcR^KmK`7_=@Gv<R!087UvPh8DNS
zo=Tf2%Wf54MHlyp-S3=S53UeX+mh5k-gzLG6*Mu_W@r0JlGpa@`~ggF3qHri)qoU;
zjm^;Hq;(W!eSJOf>^{`#9UrYq2uTxV5>rtHx4rjZZvF7~_=73vv1z!iE#v|s$MVKm
zHmvx90xDt&<mx&3U{yLgl+89o^Je!AidM7a8PRi%F-J6)OetT5T7T&*SgSHiXEm1I
zqr;W`x%$ytaRx?4-Sj6YC{b|^LKbh4fw_Bd)E+$w7D^fwl6WU945Q)lqqEbA1p)1I
z&)E355)PxXvAn&v!}G|kDEE6ejh796W>Z$D6Kt%DZC}j!?vrhPp1KK*JU#E~#lz*Q
zeY#wBnJ1VyV}<=@%YIJ1)fk4`(zwECqGn#A>Q=12vS%iQxGFR7k$t}lDb`n6%<YV*
zne(Dac0AUF^i`93Gr!eBMtx=X626q9EyKQgXg~|yeIt?w8dFS7PrstAXVmXJS!(Vg
z>;MTd=*N#g#WA8{H&b8qhioV+RtbWPWq9XK%Uz33#HxGd*P?H|#n}l#HH2D|fOi^P
z9)Sa$G4B}IzJ$EwBa{WkUq%26tdR%92#vze-{uO?*O>TtP)pVfqzK6T-vTp>GKt?h
zSwj>jGrQu1U-6dqr^oT48GfuVx>8aZ*|3Mk_F%{Vhg9TVM+U$h+qsj*Pp$-&G>C|Z
zA|fIOpKYueSy|6cPc7A?q@{U56Si0y6>6GRMpE@zo#p8;0-l*4KeBP<h7pB)0LOFH
zWQ5HPD!Qu^E{B!A5t36iPx=q<3xeescY~1nL|fFGRXSGiH^(*Ajmv7TPB;`3K&;@f
z;dcN$p@%gA!OSkHeBbslZX7Ts6$-|_Q8;R?uA`_-BCG5J)|<9>Op{la!<&G7y_p*;
zzdIe`+JxMiL?cMpn;N~?t`JH#pkYULcyN%LljFFB!N+%BuHW_L>ov?RIdG?@ihy|c
zda}#ijca3yi^uiSn0iaiK0FdYE}uQqSVC*_-yeLG-NL*z1EVcUw`WU7|Ffj9aEsa(
zka~q4I93mqt&-hgLl%sWx1;PP0$vX{W$bt)>^WC>#`yS&0XN|nNK|yRI^>CaEJ%(D
z3twJcffMoGy^kqTQeR(RiKlW&WE9qD%<<jbH5R-I^h3ujQq)(h;L8D?2Q?&SYP#9r
zxJ}-#K~Kz+;5#Q>?|P=yDJX<*J3+eIN&_nYA9^dt!~h=jJpq@;Qz3q~du&8bC(O`l
z`+$J7w6xEk(S#n{i3flt2?gjs#41(U&Uf+ij#9GZ)zYhi%^VN_JGN-;TUco3$-{H-
zDi&miS%{G@E<b<%oB<2w;rES%<>ynv_JXFqFN}c*K+2b@syAUA*IY}htALqa&BvKu
zIk&)KO!|;tNchJ`1+}f6BO?{r*+NbU6JI$<*t>T>p3^g$IHa$$?{kQ1_LsBc{pgYT
zlAmt^`fWTmplTgg;OO3+If|~xoCB3RcfW4EjwWZ&QgXO@?pmK1S<0=2R!M&RdDLti
zX?t3LwPXaH(w)Y(r@wmL(SEgT(khH-jX?QCcWI_V&uSW8;&J=<kBe3cia$@kh)qmP
zyh!R00<tQlMpm#bXtuVt>`VUxAFJ*E4Ihi*liC9xOFUDqVr-pUxWh!FZ!nxEDK0H|
zEq<KASx$kt76mpn+0sYcbIHu=Ytz2g7t`Z)?bywl?SA+@=YpR?do8PfxP9Orq4)?e
zZ^d_3<JLHHE*zH&ZjVN9?nkj-bdg6Ny>W8IpNSOCQtkU$=4yxZuHUnC{{SyfD8bdT
z*UXy~ZCBy~2b!+rTN4>c_CRSh@>J?BuX4-(ntR-w+lkUJo@oeJl6&ScG7}o6cK^yj
z1Ftm9UUYTn_<`;G8*+kL?DFm0Oz5{z(^qV!k9g69M%Xu16a6f_aG=IrS(^ex43l9B
z{e{_&1ZjA22|lU&D=lo#%rS>+SX>QvrFtvr5#YV{vrZXC{r4QD<Lb({y^*bD{hNo!
zg(%E5(*d!xoz5J5MuYZ$js<WdAJQ*wNT?F-SturcUaOHAW)smz=0hVmN<pqO>cgP8
z1TCH3Iygj4|B@+RA85+YEQm_d!Sd@<>cMri3ZKn)f*pOx!cB6AY4mT;ck9rK6C*X)
zSMIXFes(iylFbnxx|jC4>O^kk8H5K{1NjOlsEdrL+1WsUf7{IU>+}8dgM$nH&cpA=
zj3((rmR+I*w(ao!N0AuEIz(7<VpuWAMO1xlNaapku8G}|eCWlskdKOeMDXw#y)7}6
z)2XVn{Q5*<6MK&M_}`^4OK7NmM4u`PkLR<Q1p}fJjmFh`Gba`d3`TFYA2&6d8hewy
zM@to&aF^t(JJNbVi{<m{ezHIX2zU`d3h48}$%)XL&bC>dAc0H(QndJA5i${$Xw-hy
zDSq)Uyqn`xXla)wcOk>BiYMcS_l>Q^`d1M@u1cMBojrVx{UfFum}W7LrY^pW(bX2>
zU80Xw{>PkuNavT99sreV#ad^8dE?@8J9fQJm6(`_$EeTq$8~?qNATCPf%-=11DvNZ
z<Ws6=tahIUs~Wc6mv(@*1u6;(D^CL%fAd+1rog>F7!s%(6hI4jNEhk24yaj`vZ6%C
z#=dy^^us?SSMZ7FuO+3VfJV$?LT2_}%Sdrl?C0bZ6mb8rVZa9%$}ZN^z4sQH8#g5p
zaC`y%RmN?gkeW7AmXe1aL{{v}*AHG7ptpjTv0#oo2pk<L2IEu+Z<mX+TJOoU{mZk$
z!(c9ka;y~yy5>8(CGEM~h3_<uMb`gvMNW7IPUgC=*L!62=yjUiVgm#7inu{L%^(ki
z7cYbzKsEtBIx{09Dk|zxEF!qfpH)+iEKZ85rWZA-Mu@hciBhXClVIt+>wNP}DYf1(
z6+UurcuqLW@NXT)9Jhw1Fo-Uy+fT{!J;LC<@CehS+bIw|OZ4lXc6CdHgbddtq~p53
zq2)pAr!Ugo%ghxrir>mW;SNc1WoFnGR^|zjQSY_eF&K?gp?wXKTLQj^D_yfwQ-COZ
zz7IhH2dPj;JiX2rqrYbSX}C!5;NUuYEjal==I~0k3``xUS{t;SLkk85vYA#%MJ4d&
z_FzfSyPgrme@4*9ZzcK1pY^JYk6}zYIqb*@DMG3xH!l)Pl4Z-+1<qf`a&XoQze3lX
zu_1GY&P4u2(O*>G^eV?~4trZoj5GCr<v*PMcv?bgb7^4Q`$;~~^t)JE<RiDe+>XHg
zCVeb5mH-`DSsRdN1(lmZLc$;gi4<Ae($YdWNB{+uDzB*cqebmDF(@Pi_34wR&|tP1
zWT(Kf0SfRY)7>bEh<o_0(r30VcuDFrG9Hcg^4<am7YOL!IXB&Q8_^7d;>?;`#ukc=
zt*tZt^mNNv&<Xdf`?j$2noBOT-1hDE4cft9YCylQum6Fn!J~h*VL9rNJ2s)O)7Nco
zmnEabQo6q^1^jfAs1+G7w4Pb|()oTYK4uJ%Grk96LNN@1gZ8jMA<s$2``(z~?sI~~
zWQ3(rxhgU|u(I#p%4%wcnS_D;^Wvq|H-4+Uj(cB|u$UNhNu5?)%(zb&sPX<#8yrs_
z^9u@Q=|}>)QYxSK!Z7ov<jA1nqaOV9QbNxGbuBt33AKdtqP#V$>axcT$GciPN}wWw
zeCMGeLd&}%>QB9dwYm1b$eo!us=G<{S^uUTzhG-u?NqriedCf|*yIbE*f1{jgS*xB
z!Sr&~ohkC0+uLd31j)7bK%{9MJ))PKj_{FPz(vA-3=ie?hIqt#!(Rj$)n^9o!GC=U
zd^bi8GV}OW_HJX&aGbg}CqN<tqAUS8zl1<B@h>IEFw3~=Z0w8G4Q0L;(?*NQ7tiaL
zRt_m~!Uu%q88tk~Z7S)w2C1>GTa|y-2fsjDDxquN7^qQn!5-C2(NW5#a~S^0ajhkt
zcf^+SZ%pd5<I}Nj2vz>%%zt4TINem8`pi1*%~6q&jUNUf*PD<1h$0bH_k$z$MR_P_
zjEj$l`@56xhyV{1ws-YZhSj)!mo+XZKMUwIynSIu{N(YY!S-6J3)|~PZrdLg=~dHO
zSTT-G=!-4|mLUU2YyNEngU~#83K>%ReTWrYE#EE$(!*8p+)NK0+g3SMF)(7mIy*gJ
zKv~iq@{5c_Qse}-<H3#eO<=WDS5uoVk8uM${S`$H^iU;PStyq7;El~q0OG)KTV2>T
zemi<zZFeTW*ma&I<&5@<o8TOTYJa)>P!2!~O4Gm~dv<tMl@1TC-c$Gd=qPipu#h^R
zU-}p5o(%%u885QqKrim_Hku^*nUP3XPZi1HqOTwt{SO1&$|po@3#yGkzov)kuGQ6D
zCAI$~YGqMW%_`p7O|Bj%#?20f+9TD4(%oyV-5z`ajwnf2L@}WJA>wm_+80a?ct92e
z^$MAKL<(A3%?3TF`m?L@f@8oBJaJ!Ai(CPHeHidq#2=XY5BBp)4D`Rr0X3##r1GH$
zi)(vU69$iczR|zc-b5R=Rq#BVADiOJP4?}mzlN;$`A2)JxVs1TZwabBIJ>#IEiHE+
zVV<d@0zw$-1Hc0WRy16&U%upTw0Zl=kBDx&j_QN2NA76;qiqXN+3fR#;1Bf?48CAN
zVPV6=|9trHAvP9cWG#)|Rxk8GR$e}n8QbfS4Dx2<e}O|Z{|9^~J4$~0wB8_MtuDfZ
zAe^Gtap-XF1@96~v=ck2$Ll;6yb~Qy-MhA@#8@<A;%om6!^v}64J$&)48H)$--h#g
zz|}E9qoo3UMS~7(Y-|`97@(F8i+~)0P{y#k^_orGtd+j_^71k;_W^&;($-g?b&<8_
zG+;#eCciHw4&MO@q$beq?7gNf60K}SF;b^XWxN!Usf!M;PttyuHf;jVYOPFKv*f=b
zEU{F+_^+}%YhMB&COOxPt`$9AE`QU>G9!x&X$@DjwqH6YO<g5Y;B<-m*ZTT3OPjXv
z>xYc$$1P)^R}i0>Nem{)0zI(bSd={U#3OpY1u$_gsc2$X1|<3POike@L6IgfgNs<g
zd{|hi($?`8{jhA9p)5>H6}7ds<>gVIOi~Ol0SgFl9Bb67BPpjwUwT|9^;kMxj+46z
zFU1-D&ExSqetP+uvr?fKK)d_}m0J=OOeD9Jqx*c3q4__;r@Uz7&>?qdp^fpkA-oT`
zaAwp@FeQk&_ijaFVly%_Qd3hwnHz|0xER^kKodjM<*mF|<m8<OOiSDjKi3Btavppz
zuwSvimF1Bcg(74tO`1NmgEtip7eAgn?<L6E75$kiHKB~2fx+`U!Mu~C-p1c<#S6xE
z>Si=U(vs~t&MAjg3DCbLIArZesNT6Zhm>ls*i5;2$7QV+qQn(-hN<mV-`N<EbqK`_
zInh^R<isO2G&DBWj$&&ChhQ3xPe}k@@Zkmg!IO|+GzuUH{%Y40C5oUq2^krA@%ynw
zW6H26>u=v~&G5W2rY8B<BsX70uH*QUdIR;gmb%635FMw@5*%KO<6gwU>#C&49+|8`
zT`}KrNhDZF_Az;(UB$C#_b8cV;wnknOAHTRc~1##vTP9^-6ip8Gwp6OQSXVw;8$DS
zv$KsW<Zo~+_4K}DVu00${GjgkCv@K@!!(7QV*|EL;nvnS(XhKy9}P)`S;<O$&WE_&
zgM#Gv3!@OmXlXzqsvH4L=3LHgHJVU;T^*ohO>P2OuEY8Oe$YdB^xJd%Mo$E)vjJq8
zJise(KFTifJ5RDL+K68^(eMXp|Lr0E_WKyhzQ`8<+#)NlFG^)}m8e#SCs;<N&TD&u
z77r0F_Ms^F&ortc2Lp%gd(SrBZ>sw>y9^-3C2+ek{k9Fim*xBSjDYM!9_mVz`{8$J
z{Pfvp>$$o|zY`w+d<*Cj`I>^l`08itGTnvEr`ciSb4FZjAPwC*_c?m%#nL%%b#bPg
z30`_xc0*$MYVA7OPRM_<RC>jYJck>-301~fo_3p2BPv~>tY-jo252>4O-`2Tl;-CH
zm59$NDR`PXz=LK7HO8j8y03tOEf5p}%w+&^Z{}G)Yg`^~@gFRz$E$&hvMJ%x8|F)&
zYjGMXTrrByPD?KIs;4=5=RZ2&3w_{)_yDfly~>9lIG&yL{J<`sjDKu>llM(@)e`Oa
zJ4OAbda_?^e+<@^uhcbuo5}MU?;HK$^GFg*8dr;FQw4tQiUw(FJnF&RT!ux-(tIHZ
zbrB$FQu@9Bhog^H{6w|Ck5_T}C<>-c>*|E9CQ*0+CQMGJc7e{K@kFv~!dB>hZYS1>
zw9!q%HOFu7Sz16=3dTQVwVjP`!3>gikW~Be6`P2ZS_38RvI4y8UVR=cf`E?Rypz=-
z+IUS2*2>QoT>hvki2W)z66`IXzP8W&G{6EjmE`Dh6}lo-To1xD6h!N9I!ex69gaz+
z<J?B(6D+skEALXL2YQbJMuVhUo8b%TvB|2orYp}W73m2ih^WE3SZWJk*vXzG*t(+n
z)zvFM!Qp5}@;_45AUBqd2daMgYcXxhb!_l**>cO2=mpotLR`JKc6@LTA<sAQG8}4U
zoP(<3e_?IF6BSiOXPFx{^o5qNps$&GWi~l$uMoXPXSh9B+nyyDlQ?_>sY}y39qfKR
zJZ=F|DSRMp@VcHc{TtH8Q*0~465DY5h?pOA+_rShJA&lOj%%4(m^|E7p!J@1lM5p?
zb%;R5$yQ2!VPWPl&n~l(zP>$h&3IETK^evU@gSe)u+PVjz(e+=49b5E8cJan(UY*G
zbJ)Y0Zk%L%0oga8Wk`rnSX4woL2=$cu2iIann5JL2|n4qxhwb}iJg>`#EX8ze`tSk
zu=MTQx4UpcQY$h2dDTANj==$#np~1ARK92DhAp>q@fR<NV?B&kWP%9EKL_6^DJysC
zm3u~{6CfagqXDdGgH!k7CI1qS=pt!VvEusXouQmkZPBsS;*%5h8txWpjSumuM`Uhu
zH9f@JY$o^!KRG{*yj?cK<e;CR!n0K0*m^m<XLNUaZvQ)IPJ#P>Ac%MgB>yKt<Qn~@
z)%Hk%CpcIpx^Q2b=DB%M<_*<zvh@buZUu6x-?g(5nrsE+5*y{QC9>qk@TFA^r<BnI
zzY4i`YJDvz*e)2TPGM47QUcUhLCwc)?tu$HLuD>lS1i!@D!)d-$^we53}RV}xc|N`
zKN=`}gn{96M-Q=Dh8vauQgd*KcpK~PFbGPCHW$WSgPSj)xxJA41Lo*Jdt5Ran%UO0
zGGYJb=375_w3!I^7Ep}0U-Itx{Yd^+SnHTFw&rP2(}Y{^?LT}!Zx`Zx^Rfi^(vgt5
zr}|<gwdUy4Z&71ME*AgqL3(Zi-1Eu`ZM&4&?$Mmg%#ZQ$=j-#qVujrh4+Q+Emrpv%
zY%2uT&(k#Bx2)B43EZYz`I*D*Ve#j%uxahznYI7iP4=w)Y?JW)I&F4xC()&R$|_%s
z%Y68ZWOabtoF-%_gaEdWWhF|pPC)Is|JK~j9okCEk<h38F&zDjP5$jk`sKDS1()!~
ztP2!{d)u?j!RhJVB50n-1kgT6@oW|iX9boD(Sbz8tTPtV0uscSFR)x>Y?9Y0x3(G$
zV<=M)-hUa6&A+fUaci04a&32GE>R{tu`)w||C(NIva<Qs_Hhg6_0KdIpJmLb&pd%V
zczG4N9l7BP+5+5~m+)LpyPkgEku1kvh43^ao?qYM{l?>;0{HcNCcTeLLdt4Q=p%YU
z!okQ<Y+ub0Xs#x|MHC2LOhdh)qq57TB>AeonZ-MC7JdOwVM$u|=hTDOZ;U+KwzG%g
z(W|D0#`z1p%a7fKer3X!^s@(=Zx$9io0gYTdQIolnqAzPBRd<`(5muUri;D^`&nDM
zr&W-_1Y=OG49Pgr_idnA;wO%Rea{fX6AIsaRp4_sUX&Mx_E}7Gf=<oQ>!$$Uj1Q$}
zk1NIknOY_LHnG`u&H3j0H<&YTzI}KrE>1;Fy_FY?@MJc>&<URqMr7yNyZOn6i2u$`
zL``irbVf~;JLcWi6?1~Vf8Q+CMX-Od#=J}c8GHl+>;rQfJ3F(T*(B9oH7n~{6+?!t
z-d^w9(jAlM`|q`S+weyiUBqL{nP+O}{X1rJY(h=Vl1<O%EUvk-K4dl&EDv(X*BmT<
zA&MHYywTPPYOMDbm_9~pcT^q<h)CoX;JoJl(F*fxr9cL&5Fs3E`Ift@_?L7^YR{(=
z?}2*_x|)fP*8**_G%uny?AQ71apN!`OO|Fh9te;xdfp$nzxN5LQLZ@sDB|oex0WXl
zJxtSHM7}6&O0pgLzWg1sgrZq0?bwhdJtl`m$07^C4JjIl7rJw|c^tj_^F}vx-{hCa
zQ;cN+XQd}SXJ-}3Nm=Qc<v9^=R0LdvR*9}72M0fwnNV4MT`K3O?R;16jKgO5seSf^
zp|x_Ay?Y8#>939*7n)0}2y_l0OPtELD^XU(oargX?#=A^eZjbv&E$JH`i4#bD<c%X
zpgv6ObITacg%EfIoD~eW)3A%`Kep<|<{u8jfp?n=0WT;^TN5w+j-&>;EP#~4h%zC)
zGvt4AMj|N!gCt{Ky9o-;xaGKZgzI+~JNHk`pG8EoXh~yCa0aslx=l+MS0t55TdwR!
z-DGm<G@ESU+y1}`U+vXY`NXCESS`y%Y89=z@gm16>WR1O**n){eHDo<i|&5<p#^O`
zHpzF{3F?&xl0hyj=01*5QI((YQ5vSAI3X%X6;_(^>k488+1hFhVU#7jA*lXp>9U^6
zwb#VNFDnBFTCSRMwRc;ag`QlA^3o-iKnyAJYpiOe_SeLsR(~pP)59$6oG2BsbP{s-
z%q+q!0e|)(J)y2!t_n1av6<#%2R-QE>b2A~kx}aUdwV5-Y78}s$)k2Mu^g{)(?ZgA
zIYlYT%orxAgh2U`3tIA5B&*una~jNh3#qT>PE5(AzvSqPtY|#rF8}O>!$OF|2bD@Z
zR%jq3V$OhzI%2lf^~|o8)TuSA#)CQU&_m;=zy5h*n4khit6DbcB^p*_raCT}etpxi
z4_v}2WP@P3KZ>~bJLj*7;-Mk{4nN?E4u0ux7uU#&`C_}lPGE>vmXia`1fGo%4e`tg
zghRGl^K(d;1cgW}{4<9}FevtUZZssq0*_X8+@)6X1d-Mtgp-2i6-I1eU%o4%e<zKI
zNB-eH+RL!YYjNl@iWbMW!(zvE>KNZK{rE%TP8v7OJ5QK^v$nUAM<+MlX_r#Fxj{b)
z^Ot6G4Ek#BaqG44B3&_-3iPUtE!7YCX;DnAEY{Lw1Ilr+a89`$N-o<U%O}fuPg$KG
zh~0n82f+FRmORgF_e+f0|GRCvXJhiyrNavlItsjatkZFvb`rp|ixymwtX4d+e;V0B
zb#5$A014(%k)>r4yH)3>RT+&uJhW^*XgjD%<@8ngQR9da-l(nEtEWi15aFRI86}Hz
z=_6p>!$HP)eRSF_k%2O5N!vIhC)&`niITkCroH^~CO_n?pBS6&=rgrCboJsu$<TN8
zs>}&9%px^(qD`{W=u&O&E;4SH=zjiy+YIS>uLE8z63Us|s7~)&mYA=D$7X~<yniAC
z!nXi)FhSelXw8*Q1qG1PjLAU77T^)i{;k4sIw_7P=dC}A7@Vr2%Ap7wPAq?})Xici
znpzo`cB9odLbL{={B0E}@=iq{EB1ro7he-pRkVhOhK8gF$Ui3nryCX3?sRK>FddiT
z>kOzu1OOvQ&?%1ACS*~l-nBNnM}`!tFS7F@;rr{NMv0QrYh@BubPHjlduY+pVGXZJ
zky2F;p+g5Jix|lUsTOF_3abg3HJ}Rjy<w5-b$q5f5{>#RBbOWj8v*txAkp1GHTxs2
zZ2rqzue$_Cqx1(a?fN-$t8O~S9+JEun|`Ucw5exVNKOd6LRw=aTtg)JZ4cc@S{xdA
zwQW*Mfp6CvIHcip*2X-4VfQpD&^rD@VEQLAKOvBZ9LcagmynRadaZg8xw&MdTvODA
zgPssj!k5u<+dJ`XH8W(V7BgaZ6H^8GmzAU5zDRr2BWceh8II<ta-Px=VUC4Ey{)(v
zo&CE4<v3Ob?>ZgX^MKw>IQhV<f!FNl@s|oUs#d!@Xv+o=?(oLc*lr<~>FX7Hne~a_
z=hwWOY%}hfz0Xhjta%4{yhwR?w1_YDBN|1r<O&pL3>+_y3=u*8<!v7n=h|3-!pz_(
zz^IYrBNOZEO(9-)_m|d}rmamONtZfJqizC$dfgTEXYk1+SfPlP^Ot+Q%C-x%@EMd;
zdZPS(pOAOb5^e3>{R;KM)_-Y00wyL6EVWoET-)z9=oQkAqs+!)8+Eo??*v*|M3v^r
zGS3v{ff8DpbD7SO<9%@L*Kki;UnURDo<PWrUD44@(#7OGJ~A?qL74wDEQFq|NgeOi
zPasnG@qEz-l+gmBRd!xpI+yd;TXzgh%&*#>U1OWI5HF3_s9k#;-rK@Cb*G<GRm^c`
z=4z`7wdeXXvvyCWD{`Dil_WY)FMs(F%8O2s{?hjBD9}1RVMt|b_a3Z)8ugr)veXjc
zq|}L+o4i3&)y*QTEGr8N3qzMz28|B_nM43LR4WyT4Zet={5@_$z(u!Du8PrYc5?)h
z-`~6TtXprDORTOXt$@lE=s6bGsE{XJlAj+WQ98W9QYD_SX=}6n>lN#@9lqpVUEEo)
zYF<p{)na>Zl{3o=?=kNy&BTp`JPU(d?US^nVy%`$LYg^V(*hG?vRGrb*q4!-?l&~M
z+4)x^^nxNibMjx)pS`3W$qx+;t!5M-s*nKcnD5?&48i9~r#7g3=H}tC-haXm&=#P)
z#qq!eV}!0t_V@3&A*Uq+sd0e=F+eMD+ELwJJe+riz^8YwmoCgoopr(8pqSIwE2T(A
z;FOc^8ZRk<n8D0^yOC$@;-n~fCBDwI7XC$I!wp6<!!ds@EJhA$?WWglWDe^-9ifGK
zHM7a|F^vH0Es-^Kd9jDJkGowYuaVMZNRDSr2AmZlBcp{i8Lm?1O-9?-D3LWMCP%gD
z(|7CNEMdhZ-i6Y~#>dxA!viS6n#Gig!~LxI%p<n1PtsCcOAB8VGz=#)f;#UR8DYv6
zmk^hdI_9JE8)GyZ%WwOcr`1Hha62N;&En~}cH*vLj-OEVied9Cdg(X`>x%Ygi1ppD
zl$_@%lGY4wG6T&;<@MJe=e~H5*%~TtZ_XKSH$A;ffeaI43Xq6woz8ZANMnP&EIm|d
zeC+7)0tR8F%&|c0Ty-Q3ngXWlB)*rkNFZDUji-Nlex8StGGBr7fkKh+$N>k;eM7_5
zg}&BxPKyxhH;R1b(mwn(Lj%(IB1LKupyLL5!_T9lp?wCAy8J%Fi-qe9X0l;sE6I$7
z1Ry0rFF#lT7ZOyZB(bwK#24fcuv~9MIL6y~bK9>y|K}3}o3oQ!wp?%!4mr26wVx1)
z+S2DQ0PC@~*7ymy4@PKj=z2{$%utB;@XC@`QfIR}q)Qvv(g~BX4y@y~hLF?kOGH9m
zkNzYUEw`y4p=Gu^!{hE28$XqB(2BzQEflvwzzf{79*Xph`9^JuXfaYe*dNaw5W!u$
zYbAaSeGNLIj29~5go~VWQIx~Oi6;k!BY)d~gL3J9!j?e@j0$-D>pPSltiwQ=Zo4#4
zpi;?D_2}9(EK)9U+pV75paCD%gB_yda@x5#)=_qJcBat(Gx&(upnV0vG$9_x{j8HA
z9m_yYb@ZVx%8%z?^fBqRQ-Fze+lqO=`ZLza$;sh?BEks0QO24l&G=9~24-WfbwVL8
z;h_f#xEX>mt^fRmlfQS2Dpvvw7MSIMj8x8Q0GR*%U8Cjx&Rs_ba&&YAS1d{$i-_}P
zgWkMGy(Bm|*mWiR>BB67Z=aV}Qpy2R1s=u&ZpM{ho;;L0ujwPO*@x0l0FXvx^mhFO
zHGwBX=cQnl^0Sxvpz8?G>ehXjp`e#TwBQsq3peRdQ8_7^&PcP<*IxliALft0pKl|M
z!+uS(-flH(pBntxS_Xe^NETE;_df;R1Tym%jf#p2u!fAAE9(P&qQIr!>Pa5@&w;AK
zzxS3oYy#2~S6qN2H!zlEbgwO6MZrtW$vFcQ_xS(ZT`hp?d!GM7Ngk2$#;LQ?isru`
z-qX%<dl^!iu7J;Hr>e)oxHF3H6hh#w-IEik%`JaCB2ntjBPPMADDEet16Deqsr<bl
zsP9t;yVSeM>QvSD@o3p8#n|%f1S}nvl0j>=jfG5Y-A5wlA~bh4oQUT(Tl)E0?j-u|
z+Bu<iO)DK^v|X(Ww+bb%E#KeSw0_xp5kHEQjd+@>W<yDLoM%x!H#1G~qo*a(snkiC
z7;W5OKXQ;KRfB?Q=Q<l=vTqwbi-XBqQ!7c{3!0q#9bS;hyguwAjQ8@&<u-n&E!=-Y
zzkpIZBf@Aft$FR&9~CMbShMGGZdk;=ALl0<gI=CRBp~+GX|S)Zo<~55vD5y@Ucl8;
zYGT6BKALSO@{VbhgRnqEtNuFYZkzr@!(MS*i`Yztta@^J-4*VdOy@k^gxJFVKMu0Y
zdBgr2cqI%r99-O&{9sbS4v+u}Pe41M&NKs)^-L+A0PEe?3D&A+L#5`rWMR?mb8tx2
zN>aZLB0K7V=gW>L6Yp-m3Q=g>+SKK=4!BRVe3X0LkUY~6<{;aGSHBOlTiBP?Xdg)^
zE+Z2!M*5ui_xf+>n44=FL~|O&^dx<aJ44!c!d~Zigq-YIk4O$%YwbilL6wuwdvU<D
zt0m>zU^u>MZ<y(xUJ;e(HtZfy>2zu2Rcv#eos*dwtC%*gXvA%|dstc}p4oEJ`$>+e
zHfEdo^@|lODfp*P!2$RF?^pwYQsQ!h%Y|pOWF!;Xc17%ocr1ESGlSoKNdJF1Jouf+
zk7JoKR)K7B7|}n&0D;^Cryx|KwElWS?w?~}-{j#~IEaY6XkTXmlNf{1n(tcQvZ?M`
zGf3WtaJSP!b9hVCbv}TPbv+aEW(%~^`MbQM0echl7`Y_$7?zsI3MCMjv0uzyOt`aC
zqnN1QonIVTmH>6zf3GfFqU`SK%F!TW^OC1vn?RTtcM<JGnL=6B<?iI(sl)r`unP$G
z|J{e(t9F9^hefu5C;9z2;pA>fm5q(pfT#L*-GqWx8kUw?9_rQLwCL;Y7NY}l8h<ZE
z288Ya7~auF<2JE_rMYcZWL`M_&mTD{n3&}2Jm(urTUazb)+Bk`;h?StW@b3KxHNV+
zYt13dKlH*yMstvcizfBM$b(*fBOEikdj+uNS)kG}3=ixn7)@ZU{{8#ETKGRdgH;Yb
z`k^sjvG90(u$GR`?pIJ=9>wpkc^gO-`_9~ayLD&jzkgwaIuLD3OjO%G0{plo<=2>K
z4^kdb4q=*f^i)*CQJvt_1h4=5odA4yULAbuGj20d)L`|JT*42c;Rm^VfJkxm=)xTE
zf9K3&rJGT?_>2HwGHLMPG2W^Gi3C~d*anC+xfHO(4=%-jdB1Rf1DKb9%iXU8^x;QC
z(U^bV?+2@>cR5&ennUNt{QvcUdrR?7QWE@^mrz=<BDJN3g@*&|`AoI>4-jZW-Q<7<
zPb@euZV!;3At6!xzTQ70`)7Rr_5HsO&HpsYf5sZB`P|#5<_l)T`cC7+gNO@D3w;%M
H|MC9-RHu~K

literal 0
HcmV?d00001

diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst
new file mode 100644
index 00000000000..d7457ebced1
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/index.rst
@@ -0,0 +1,24 @@
+.. _sip:
+
+*****************************
+The Standard Imaging Pipeline
+*****************************
+
+The Standard Imaging Pipeline (or SIP) will accept raw data from the LOFAR
+correlator, pre-process it, calibrate it, image it, and update the sky model
+with the sources detected in the data. This section describes the components
+of the SIP and how they fit together.
+
+It should be emphasised that, at least during development stages, there is not
+a single, ideal imaging pipeline. Instead, the user is encouraged to compose a
+pipeline which meets their needs by selecting their preferred combination of
+individual recipes.
+
+Note that none of the SIP recipes make use of IPython, so configuration and
+usage should be relatively simple.
+
+.. toctree::
+   :maxdepth: 2
+
+   quickstart/index.rst
+   recipes/index.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst
new file mode 100644
index 00000000000..443b99d4165
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/quickstart/index.rst
@@ -0,0 +1,210 @@
+.. _sip-quickstart:
+
+==============
+CEP Quickstart
+==============
+
+This brief guide should take you from scratch to an operational imaging
+pipeline running on LOFAR CEP. It is still under development, and corrections
+(preferably in the form of patches) are very welcome.
+
+This guide assumes you already have a working installation of the pipeline
+framework; please see the :ref:`framework documentation
+<framework-quickstart>` for help with that.
+
+If you will be using the LOFAR offline cluster, it's worth taking a moment to
+ensure you have an SSH keychain that will enable you to log into all the
+cluster nodes you need (lfe001, lfe002, lce0XX, lse0XX) without needing to
+type a password: see the `LOFAR wiki
+<http://www.lofar.org/operations/doku.php?id=public:ssh-usage>`_ for help.
+
+Note that examples of many of the steps described here are available as part
+of the example data provided with the pipeline framework (`available from
+Subversion <http://usg.lofar.org/svn/code/trunk/src/pipeline/docs/examples/>`_ if
+required).
+
+For the sake of example, we will consider the ``L2009_16007`` dataset,
+available on LOFAR subcluster 3. 
+
+Set up your environment
+-----------------------
+Before starting, you shoud ensure that all
+the ``LofIm`` packages are available in your environment.  The typical way
+to add this package to ones' start up environment is to type the following
+in the command line:
+
+.. code-block:: bash
+
+    $ use LofIm
+     
+or add this to the .bashrc or .cshrc files so that it is automatically 
+added at login.  Note, there are some issues with paths when the daily build
+fails.  To ensure that there are no problems accessing the LOFAR imaging software,
+you may want to skip the ``use LofIm`` step and add ``/opt/LofIm/daily/lofar`` the 
+paths explicitly to your environment. You will also need to add the
+appropriate environment to the cluster setup in your configuration file: see
+the :ref:`framework quickstart <pipeline-config>` for details.
+
+Make a job directory
+--------------------
+
+This is dedicated to the output of a specific pipeline "job" -- that is, a run
+with a given set of input data, pipeline configuration and so on. Note that
+each job is given as specific, user-defined name, and the job directory should
+be named after that. The name is entirely up to the user, but I generally use
+the name of the input dataset with an appended number to differentiate
+configurations. Hence, something like:
+
+.. code-block:: bash
+
+    $ mkdir -p ~/pipeline_runtime/jobs/L2009_16007_1
+
+Prepare parsets describing how to run the pipeline components
+-------------------------------------------------------------
+
+Any individual pipeline component you plan to run that needs a parset -- such
+as DPPP, BBS or the MWImager -- will need one provided by the pipeline
+framework. Drop them into ``~/pipeline_runtime/jobs/L2009_16007_1/parsets``.
+
+Note that some parameters will be added to the parset by the framework as part
+of the run. At present, these are:
+
+* NDPPP: ``msin``, ``msout``
+* MWImager: ``dataset``
+
+Prepare your task definitions
+-----------------------------
+
+Refer to the :ref:`framework overview <framework-overview>` and :ref:`recipe
+documentation <task-definition>` for more on tasks; recall that they are
+basically the combination of a recipe and a set of parameters. We will use the
+:ref:`vdsreader-recipe` and :ref:`dppp-recipe` recipes, and define the following task:
+
+.. code-block:: none
+
+   [vdsreader]
+   recipe = vdsreader
+   gvds = %(runtime_directory)s/jobs/%(job_name)s/vds/%(job_name)s.gvds
+
+   [ndppp]
+   recipe = dppp
+   executable = %(lofarroot)s/bin/NDPPP
+   initscript = %(lofarroot)s/lofarinit.sh
+   working_directory = %(default_working_directory)s
+   parset = %(runtime_directory)s/jobs/%(job_name)s/parsets/ndppp.parset
+   dry_run = False
+
+
+Prepare a pipeline definition
+-----------------------------
+
+The pipeline definition specifies how data should flow through the pipeline.
+It is a Python script, so you can use whatever logic you like to determine the
+flow. For now, I suggest you keep it simple!
+
+The :meth:`pipeline.master.control.run_task()` method is a shortcut to run the
+specific recipe configurations specified in the configuration file; it takes a
+configuration stanza and a list of datafiles as its input, and returns a list
+of processed datafiles. More complex configurations are also possible, but
+you'll have to define these by hand (ie, specifying the inputs and outputs of
+the underlying recipe manually).
+
+A very simple definition might be:
+
+.. code-block:: python
+
+    class sip(control):
+       def pipeline_logic(self):
+           with log_time(self.logger):
+               datafiles = self.run_task("vdsreader")['data']
+               datafiles = self.run_task("ndppp", datafiles)['data']
+
+Here, the ``vdsreader`` task reads a list of filenames to be processed from a
+VDS file, and then hands them to ``ndppp``. Note that the ``log_time``
+context simply writes an entry to the log recording how long it all took.
+
+Prepare a VDS file describing your data
+---------------------------------------
+
+A VDS file describes the location of all the datasets/measurement sets.  
+Preparing the VDS file actually not strictly necessary: you can use the vdsreader task to
+obtain a list of filenames to process (as above in sip.py run_task("vdsreader")), 
+or you can specify them by hand -- just writing a list in a text file is fine, then parsing that and
+feeding it to the DPPP task is fine. You need to specify the full path to each
+measurementset, but don't need to worry about the specific hosts it's
+accessible on. Note, you with the current cross-mount arrangement of the 
+cluster compute and storage notes, you need to be on the **lce0XX** nodes in order 
+to see the paths to the MS files.  A list that looks like
+
+.. code-block:: python
+
+    ['/net/sub3/lse007/data2/L2009_16007/SB1.MS', '/net/sub3/lse007/data2/L2009_16007/SB2.MS', ...]
+
+is fine.  This method allows you the test the pipeline with a fewer set of
+files than the typical set in its entirety.  In order to **run on a list of
+files instead of running vsdreader**, the list would go into the sip.py file
+as such (otherwise, use the above setting for datafiles of
+run_task("vdsreader")):
+
+.. code-block:: python
+
+    class sip(control):
+       def pipeline_logic(self):
+           with log_time(self.logger):
+               datafiles = ['/net/sub3/lse007/data2/L2009_16007/SB1.MS', '/net/sub3/lse007/data2/L2009_16007/SB2.MS']
+               datafiles = self.run_task("ndppp", datafiles)
+
+
+Anyway, assuming you want to go the VDS route, something like
+
+For bash (on any imaging lce0XX node machine):
+
+.. code-block:: bash
+
+    $ ssh lce019
+    $ mkdir /tmp/16007
+    $ mkdir ~/pipeline_runtime/jobs/L2009_16007_1/vds/
+    $ for storage in `seq 7 9`; do for file in /net/sub3/lse00$storage/data2/L2009_16007/\*MS; do /opt/LofIm/daily/lofar/bin/makevds ~/Work/pipeline_runtime/sub3.clusterdesc $file /tmp/16007/`basename $file`.vds; done; done
+    $ /opt/LofIm/daily/lofar/bin/combinevds ~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds /tmp/16007/\*vds
+
+For tcsh (on any imaging lce0XX node machine):
+
+.. code-block:: tcsh
+
+    $ ssh lce019
+    $ mkdir /tmp/16007
+    $ echo "for storage in "\`"seq 7 9"\`"; do for file in /net/sub3/lse00"\$"storage/data2/L2009_16007/\*MS; do /opt/LofIm/daily/lofar/bin/makevds ~/Work/pipeline_runtime/sub3.clusterdesc "\$"file /tmp/16007/"\`"basename "\$"file"\`".vds; done; done" > run.sh
+    $ chmod 755 run.sh
+    $ ./run.sh
+    $ mkdir ~/pipeline_runtime/jobs/L2009_16007_1/vds/
+    $ /opt/LofIm/daily/lofar/bin/combinevds ~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds /tmp/16007/\*vds
+
+
+will do the trick.  Check to be sure that your global vds file was created
+(``~/pipeline_runtime/jobs/L2009_16007_1/vds/L2009_16007_1.gvds``).  Clean up
+the temporary location.
+
+Run the pipeline
+----------------
+
+The pipeline can take a long time to process all subbands, especially if you
+are running multiple passes of DPPP.  Since your loggin session with the head
+node is likely to be cut off by an auto-logout, it is recommended that you use a
+`screen <http://www.gnu.org/software/screen/manual/screen.html>`_ session when
+running the pipeline, so that you can re-attach to the the session if you log
+out before the pipeline is finished.
+ 
+.. code-block:: bash
+
+    $ cd ~/pipeline_runtime/jobs/L2009_16007_1/
+    $ python sip.py -j L2009_16007_1 -d
+
+The ``-d`` flag specifies debugging mode (ie, more logging). The ``-j``
+argument just specifies the job we're running.  Intermediate pipeline files
+are placed in your default_working_directory (in ``pipeline.cfg``);  results
+are placed in the ``~/pipeline_runtime/jobs/L2009_16007_1/results`` directory;
+logs are placed in the ``~/pipeline_runtime/jobs/L2009_16007_1/logs``
+directory. ``DPPP`` leaves all the results in the default_working_directory;
+if you do not run any additional pipeline tasks after ``DPPP``, there will be
+no results directory created.  The pipeline log will indicate whether the
+pipeline completed successfully.
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst
new file mode 100644
index 00000000000..aeaa1e98525
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/bbs.rst
@@ -0,0 +1,8 @@
+.. _recipe-bbs:
+
+===
+BBS
+===
+
+.. autoclass:: bbs.bbs
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst
new file mode 100644
index 00000000000..6cb2cc99b5c
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/cimager.rst
@@ -0,0 +1,11 @@
+.. _recipe-cimager:
+
+=======
+cimager
+=======
+
+.. autoclass:: cimager.cimager
+   :show-inheritance:
+
+.. autoclass:: cimager.ParsetTypeField
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst
new file mode 100644
index 00000000000..98666515380
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/datamapper.rst
@@ -0,0 +1,8 @@
+.. _recipe-datamapper:
+
+==========
+datamapper
+==========
+
+.. autoclass:: datamapper.datamapper
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst
new file mode 100644
index 00000000000..2fc26c6a45d
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/dppp.rst
@@ -0,0 +1,8 @@
+.. _dppp-recipe:
+
+====
+DPPP
+====
+
+.. autoclass:: new_dppp.new_dppp
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst
new file mode 100644
index 00000000000..55a89e09812
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/index.rst
@@ -0,0 +1,23 @@
+=================================
+Standard Imaging Pipeline recipes
+=================================
+
+Here we outline the various components which make up LOFAR's Standard Imaging
+Pipeline and how they can be combined to form a coherent whole. These
+components are made available as pipeline recipes; the reader is encouraged to
+be familiar with the :ref:`recipe-docs` section.
+
+.. toctree::
+    :maxdepth: 1
+
+    sip
+    datamapper
+    storagemapper
+    dppp
+    rficonsole
+    bbs
+    sourcedb
+    parmdb
+    cimager
+    vdsmaker
+    vdsreader
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst
new file mode 100644
index 00000000000..4b7ecd066ac
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/parmdb.rst
@@ -0,0 +1,8 @@
+.. _recipe-parmdb:
+
+======
+parmdb
+======
+
+.. autoclass:: parmdb.parmdb
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst
new file mode 100644
index 00000000000..6197b4a9746
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/rficonsole.rst
@@ -0,0 +1,8 @@
+.. _recipe-rficonsole:
+
+==========
+rficonsole
+==========
+
+.. autoclass:: rficonsole.rficonsole
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst
new file mode 100644
index 00000000000..d0c4f1ac536
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sip.rst
@@ -0,0 +1,42 @@
+==============
+The SIP recipe
+==============
+
+.. todo::
+
+   Check if this section is up to date.
+
+There is no single SIP recipe: an imaging pipeline should be composed of
+components as required. However, various examples are available to help.
+
+``sip.py``
+----------
+
+.. todo::
+
+   Provide simpler example SIP.
+
+This recipe demonstrates the basic functionality of an imaging pipeline. In
+turn, it runs ``DPPP`` (data compression and flagging), ``BBS`` (calibration),
+``MWimager`` (imaging) and a custom-developed source finding step. The logs of
+all these steps are collected and stored centrally, images (in ``CASA``
+format) are made available.
+
+This should form a model for how a pipeline can be constructed. However, it
+does not contain logic for routines such as the "major cycle" (whereby
+``BBS``, ``MWimager`` and the source finder will iterate to find an optimum
+calibration). Such logic should be straightforward to add based on this
+framework.
+
+.. literalinclude:: ../../../../../examples/definition/sip2/sip.py
+
+``tasks.cfg``
+-------------
+
+.. todo::
+
+   Check task file for completenes/correctness.
+
+This task file defines the tasks referred to in the above example.
+
+.. literalinclude:: ../../../../../examples/definition/sip2/tasks.cfg
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst
new file mode 100644
index 00000000000..3561c7290a1
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/sourcedb.rst
@@ -0,0 +1,8 @@
+.. _recipe-sourcedb:
+
+========
+sourcedb
+========
+
+.. autoclass:: sourcedb.sourcedb
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst
new file mode 100644
index 00000000000..c70bbed3461
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/storagemapper.rst
@@ -0,0 +1,8 @@
+.. _recipe-storagemapper:
+
+=============
+storagemapper
+=============
+
+.. autoclass:: storagemapper.storagemapper
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst
new file mode 100644
index 00000000000..ce13707016b
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsmaker.rst
@@ -0,0 +1,6 @@
+========
+vdsmaker
+========
+
+.. autoclass:: new_vdsmaker.new_vdsmaker
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst
new file mode 100644
index 00000000000..eb4e200b982
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/pipelines/sip/recipes/vdsreader.rst
@@ -0,0 +1,8 @@
+.. _vdsreader-recipe:
+
+=========
+vdsreader
+=========
+
+.. autoclass:: vdsreader.vdsreader
+   :show-inheritance:
diff --git a/CEP/Pipeline/docs/sphinx/source/todo.rst b/CEP/Pipeline/docs/sphinx/source/todo.rst
new file mode 100644
index 00000000000..a84e6853588
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/todo.rst
@@ -0,0 +1,7 @@
+.. _todo:
+
+************************
+Documentation to-do list
+************************
+
+.. todolist::
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst
new file mode 100644
index 00000000000..b8e04bd2adb
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/index.rst
@@ -0,0 +1,15 @@
+.. _infrastructure-setup:
+
+**********************
+Installation and setup
+**********************
+
+This section provides information on how to set up the pipeline system. A
+quick-start guide is provided for users of LOFAR CEP, where the pipeline
+system is already installed. Others will need to install by hand.
+
+.. toctree::
+   :maxdepth: 1
+
+   quickstart.rst
+   installation.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst
new file mode 100644
index 00000000000..dead4204104
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/installation.rst
@@ -0,0 +1,69 @@
+.. _framework-installation:
+
+**********************
+Framework installation
+**********************
+
+**Note**: These instructions were developed and tested using Ubuntu 10.04
+Server. They assume the user is using the ``bash`` shell.  Adjustment for
+other systems should be straightforward.
+
+Before starting, you will need to install the ``lofar.parameterset`` Python
+module. This must be available on your ``$PYTHONPATH``. Check by ensuring that
+you can replicate the following command:
+
+.. code-block:: bash
+
+  $ python -c 'import lofar.parameterset ; print "ok"'
+  ok
+
+The latest version of the framework is available by Subversion from the `USG
+repository <http://usg.lofar.org/>`_. Obtain a snapshot as follows:
+
+.. code-block:: bash
+
+  $ svn export http://usg.lofar.org/svn/code/trunk/src/pipeline/ ~/pipeline_framework
+
+This will create a ``pipeline_framwork`` directory within your current working
+directory. That directory contains a number of subdirectories. Note first
+``docs``: this contains the source for the pipeline documentation. If you have
+`Sphinx <http://sphinx.pocoo.org/>`_ installed, you can run ``make`` in that
+directory to generate the documentation tree.
+
+The framework itself is a Python package named ``lofarpipe``. It is found in
+the ``framework`` directory, and may be installed using the setup.py script
+included. The output directory can be specified using the ``--prefix``
+option; in the example below, we install to ``/opt/pipeline/framework``.
+
+.. code-block:: bash
+
+  $ cd ~/pipeline_framework/framework
+  $ sudo python setup.py install --prefix=/opt/pipeline/framework
+  running install
+  running build
+  running build_py
+  [... many lines elided ...] 
+  running install_egg_info
+  Writing /opt/pipeline/framework/lib/python2.6/site-packages/Pipeline-0.1.dev-py2.6.egg-info
+
+After installation, ensure that the relevant ``site-packages`` directory
+appears on your ``$PYTHONPATH`` environment variable:
+
+.. code-block:: bash
+
+  $ export PYTHONPATH=$PYTHONPATH:/opt/pipeline/framework/lib/python2.6/site-packages/
+
+You may wish to add this to your shell startup sequence.
+
+The pipeline also comes with a collection of recipes in the
+``pipeline_framework/recipes`` directory. These are not required by the
+framework itself, but will be useful for building pipelines. Ensure the
+contents of this directory are conveniently accessible:
+
+.. code-block:: bash
+
+  $ sudo cp -r ~/pipeline_framework/recipes /opt/pipeline
+
+At this point, the basic framework code should be installed. The next step is
+to start running simple recipes: see the :ref:`running-basic` section for
+details.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst b/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst
new file mode 100644
index 00000000000..b4320c3c891
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/installation/quickstart.rst
@@ -0,0 +1,155 @@
+.. _framework-quickstart:
+
+CEP quickstart
+==============
+
+.. todo::
+
+   Bring this quickstart guide in-line with the current situation.
+
+This section provides some quick notes on getting started with the pipeline
+system. More details are available in subsequent sections of this chapter.
+
+This section describes the basic requirements for setting up the pipeline
+framework. You may also need further configuration to run specific tools in
+your pipeline: see, for example, the Standard Imaging Pipeline
+:ref:`sip-quickstart` section.
+
+Locate the pipeline dependencies
+--------------------------------
+
+There are a number of Python packages which are required for the framework to
+operate: see :ref:`framework-dependencies`. On the LOFAR cluster, these are
+available under ``/opt/pipeline/dependencies``. Ensure the appropriate
+directories are available in the environment variables ``$PATH`` (should
+contain ``/opt/pipeline/dependencies/bin``)
+and ``$PYTHONPATH``
+(``/opt/pipeline/dependencies/lib/python2.5/site-packages``). To avoid any
+possible conflicts with system installations, it is best to list these paths
+early in the relevant variables.
+
+Ensure the framework modules are available
+------------------------------------------
+
+There are two Python packages which comprise the pipeline framework: :mod:`ep`
+and :mod:`lofarpipe`. These must both be available on your ``$PYTHONPATH``.
+The easiest way to achieve this is to use the system installations in
+``/opt/pipeline/framework``: add
+``/opt/pipeline/framework/lib/python2.5/site-packages`` to your
+``$PYTHONPATH``. Alternatively, you may wish to build and install your own
+copies for development purposes: see :ref:`building-modules` for details.
+
+Decide on a basic layout
+------------------------
+
+The pipeline will store all its logs, results, configuration data, etc in a
+centralised location or "runtime directory". This should be accessible from
+all nodes you will be using -- both the head node, and any compute nodes --
+and should be writable (at least) by the userid under which the pipeline will
+run. You should create this directory now.
+
+If you will be using the compute nodes to store data on their local disks, you
+will also need to create a "working directory" in a standard location on each
+of them. On the LOFAR cluster, ``/data/scratch/[username]`` is a good choice.
+This can be easily achieved using ``cexec``; for instance:
+
+.. code-block:: bash
+
+   $ cexec sub3:0-8 mkdir -p /data/scratch/swinbank
+
+Produce a ``clusterdesc`` file
+------------------------------
+
+The ``clusterdesc`` file describes the layout of the cluster -- the names of
+the various nodes, what disks they have access to, and so on. Some are already
+available in LOFAR Subversion. A minimal file for subcluster three could be:
+
+.. code-block:: bash
+
+   Head.Nodes = [ lfe001..2 ]
+   Compute.Nodes = [ lce019..027 ]
+
+It doesn't matter where you save this, but you might find it convenient to
+leave it in the runtime directory.
+
+.. _pipeline-config:
+
+Produce a pipeline configuration file
+-------------------------------------
+
+This file will contain all the standard information the pipeline framework
+needs to get going. For a basic pipeline, running only on the head node, you
+should have something like:
+
+.. literalinclude:: ../../../../../docs/examples/definition/dummy/pipeline.cfg
+
+Ensure that the ``runtime_directory`` and ``default_working_directory``
+directives match the directories you created above. The others can mostly be
+ignored for now, unless you know you need to change them.
+
+If you also want to use the cluster, you need to add another two stanzas:
+
+.. code-block:: none
+
+  [cluster]
+  clusterdesc = %(runtime_directory)s/sub3.clusterdesc
+  task_furl = %(runtime_directory)s/task.furl
+  multiengine_furl = %(runtime_directory)s/multiengine.furl
+
+  [deploy]
+  script_path = /opt/pipeline/framework/bin
+  controller_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages
+  engine_ppath = /opt/pipeline/dependencies/lib/python2.5/site-packages/:/opt/pipeline/framework/lib/python2.5/site-packages
+  engine_lpath = /opt/pipeline/dependencies/lib
+
+You should ensure the ``clusterdesc`` directive points at the clusterdesc
+file you are using. Note that ``%(runtime_directory)s`` will be expanded to
+the path you've specified for the runtime directory.
+
+``engine_lpath`` and ``engine_ppath`` specify (respectively) the
+``$LD_LIBRARY_PATH`` and ``$PYTHONPATH`` that will be set for jobs on the
+compute nodes. These should (at least) point to the dependencies and the
+framework, as above, but should also include any necessary paths for code
+which you will be running on the engines (imaging tools, data processing,
+etc).
+
+The other variables can be left at the default settings for now, unless you
+know they need to be changed.
+
+When looking for a configuration file, the framework will look first in its
+current working directory for ``pipeline.cfg`` and, if nothing is there, look
+under ``~/.pipeline.cfg``. Save yours somewhere appropriate.
+
+At this point, the framework should be ready to run on the head node. If
+required, continue to :ref:`launch-cluster`.
+
+.. _launch-cluster:
+
+Setting up the IPython cluster
+------------------------------
+
+The IPython system consists of a controller, which runs on the head node, and
+engines, which run on the compute nodes. See the sections on :ref:`IPython
+<ipython-blurb>` and the :ref:`cluster layout <cluster-layout>` for details.
+Simple Python scripts make it easy to start and stop the cluster. This can be
+done independently of an individual pipeline run: one can start the engines
+once, run multiple piplines using the same engines, and then shut it down.
+
+The relevant scripts are available in ``/opt/pipeline/framework/bin``, named
+``start_cluster.py`` and ``stop_cluster.py``. Each accepts the name of a
+pipeline configuration file as an optional argument: if one is not provided,
+it defaults to ``~/.pipeline.cfg``.
+
+Usage is very straightforward:
+
+.. code-block:: bash
+
+  $ /opt/pipeline/framework/bin/start_cluster.py --config /path/to/pipeline.cfg
+
+After the script has finished executing, you can continue to set up and run
+your pipeline. When finished, shut down the cluster:
+
+.. code-block:: bash
+
+  $ /opt/pipeline/framework/bin/stop_cluster.py --config /path/to/pipeline.cfg
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst
new file mode 100644
index 00000000000..2e6a0048e9c
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/configuration.rst
@@ -0,0 +1,115 @@
+.. _config-file:
+
+*************
+Configuration
+*************
+
+Various default parameters for pipeline operation are stored in a
+configuration file. The contents of this file are available to recipes as
+``self.config``, which is an instance of ``SafeConfigParser`` from the `Python
+Standard Library <http://docs.python.org/library/configparser.html>`_.
+
+Accepted parameters
+===================
+
+Section ``DEFAULT``
+-------------------
+
+This section contains global pipeline configuration parameters. They can be
+referred to in other sections of the configuration file using ``%()s`` syntax:
+see the Python documentation (lined above) for details.
+
+``runtime_directory``
+    Overall pipeline framework runtime directory. Pipeline framework metadata
+    which is shared between multiple jobs will be written here.
+
+    This parameter is **required**.
+
+``recipe_directories``
+    List of directories in which to search for recipes. Multiple directories
+    can be specified using a Python list-like syntax: ``[/path/to/dir/1,
+    /path/to/dir/2]``.
+
+``task_files``
+    List of task definition files. Multiple entries may be specified in the
+    same way as above.
+
+Section ``layout``
+------------------
+
+This section contains paths which may be referenced by individual pipeline
+recipes, for example to save state, locate parset files or write logs.
+
+``job_directory``
+    This will contain configuration information (parsets, etc) for a given
+    pipeline job. Metadata referring to an ongoing run may be written into
+    this directory (and will normally be removed when the run finishes),
+    and logs and output files are colelcted here.
+
+    This directory should be available to (eg, NFS mounted) to **every** node
+    that is involved in the pipeline run.
+
+    This parameter is **required**.
+
+Section ``cluster``
+-------------------
+
+This section describes the layout of a cluster which can be used for
+distributed processing.
+
+``clusterdesc``
+    The full path to a ``clusterdesc`` file (see :ref:`distproc-blurb`)
+    which describes the cluster configuration to be used by the pipeline.
+
+    This parameter is **required** if remote jobs are being used.
+
+``task_furl`` and ``multiengine_furl``
+    Filenames which will be used for the FURL files needed for collection to an
+    :ref:`ipython-blurb` cluster.
+
+    These parameters are only required if IPython is being used within the
+    pipeline.
+
+Section ``deploy``
+------------------
+
+This section describes the environment used for starting up jobs on remote
+hosts.
+
+``engine_lpath`` and ``engine_ppath``
+    The values of ``$LD_LIBRARY_PATH`` and ``$PYTHONPATH`` which will be used
+    for all remote commands. Note that these are **not** inherited from the
+    environment on the pipeline head node.
+
+    These parameter are **required** if remote jobs are being used.
+
+``controller_ppath``
+    The value of ``$PYTHONPATH`` which will be used for an IPython controller
+    started on the head node. Note that this is not used (or required) if
+    IPython is not being used.
+
+``script_path``
+    The location of scripts needed for starting IPython engines on remote
+    hosts. This is not used (or required) if IPython is not being used, or if
+    a non-pipeline method is used for starting the engines.
+
+Section ``logging``
+-------------------
+
+This section enables the user to customise the pipeline logging output. Note
+that it is entirely optional: a log file with default settings will be written
+to the ``job_directory`` if this section is omitted.
+
+``log_file``
+    Output filename for logging.
+
+``format`` and ``datefmt``
+    Format for log entries and dates, respectively. These are used exactly as
+    per the Python logging system; see `its documentation
+    <http://docs.python.org/library/logging.html#formatters>`_ for details.
+
+Section ``remote``
+------------------
+
+This section contains parameters for configuring the remote command execution
+strategy. It is intended for expert use only.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst
new file mode 100644
index 00000000000..bdbb8e3e8e3
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/index.rst
@@ -0,0 +1,17 @@
+.. _usage:
+
+*************************
+Using the pipeline system
+*************************
+
+This section provides a guide to the important concepts a pipeline-end user
+should understand.
+
+.. toctree::
+   :maxdepth: 1
+  
+   running.rst
+   layout.rst
+   jobs.rst
+   configuration.rst
+   tasks.rst
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst
new file mode 100644
index 00000000000..e0b68c1bde6
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/jobs.rst
@@ -0,0 +1,7 @@
+****
+Jobs
+****
+
+.. todo::
+
+   Details on jobs, job IDs, etc.
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst
new file mode 100644
index 00000000000..fbb1912b42d
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/layout.rst
@@ -0,0 +1,51 @@
+.. _pipeline-layout:
+
+Pipeline layout
+===============
+
+The pipeline system is designed to be organised in a standard directory
+structure. Insofar as is possible, this contains all information needed to
+manage a cluster and assosciated set of pipeline tasks. It is not designed to
+contain the actual data that is being processed. It is assumed that
+this directory will be available to all the various cluster nodes, presumably
+using NFS.
+
+The root directory of this structure is the ``runtime`` directory. This
+contains all the information about a given "cluster" -- that is, all the
+disks, compute nodes, management node, etc described by a given
+``clusterdesc`` file. This top level directory contains that ``clusterdesc``,
+and, if appropriate, information about an associated IPython cluster: 
+
+* A PID file (``ipc.pid``) and log files from the IPython controller (named
+  according to the pattern ``ipc.log${pid}.log``)
+
+* An ``engines`` directory, which contains PID (``ipengine${N}.pid``, where
+  ``N`` simply increments according to the number of engines on the host)
+  files and log files (``log${PID}.log``) from the various engines in the
+  cluster, organised into subdirectories by hostname.
+
+* The files ``engine.furl``, ``multiengine.furl`` and ``task.furl``, which
+  provide access to the IPython engines, multiengine client and task client
+  resepectively. See the IPython documentation for details.
+
+Of course, a single cluster (and hence runtime directory) may process many
+different jobs. These are stored in the ``jobs`` subdirectory, and are
+organised by job name -- an arbitrary user-supplied string.
+
+Within each job directory, three further subdirectories are found:
+
+``logs``
+    Processing logs; where appropriate, these are filed by sub-band name.
+
+``parsets``
+    Paramaeter sets providing configuration information for the various
+    pipeline components. These should provide the static parameters used by
+    tasks such as ``DPPP`` and the imager; dynamic parameters, such as the
+    name of the files to be processed, can be added by the pipeline at
+    runtime.
+
+``vds``
+    Contains VDS and GDS files pointing to the location of the data to be
+    processed on the cluster.
+
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst
new file mode 100644
index 00000000000..1db151c90d0
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/running.rst
@@ -0,0 +1,108 @@
+.. _running-basic:
+
+***********************
+Running basic pipelines
+***********************
+
+This section will describe the minimal configuration needed to run a simple
+pipeline. Every pipeline recipe is independently runnable as a stand-alone
+module, so we start with that. We will then build a larger pipeline connecting
+multiple modules.
+
+A single recipe
+---------------
+
+We will start by running the :ref:`simple recipe <basic-recipe>` described in
+the :ref:`recipe-docs` section. This is included in the standard framework
+distribution: it will be available as
+``/opt/pipeline/recipes/master/example.py`` if you have followed the
+installation guide. When called in its simplest mode, this recipe will simply
+return the results of running ``/bin/ls`` in the current working directory.
+
+Before running, it is necessary to create a pipeline configuration file. The
+simplest possible configuration file defines two options:
+``runtime_directory``, which the pipeline framework uses for storing various
+information about the framework state, and ``job_directory``, which contains
+information relevant to a specific pipeline job. The former must exist; the
+latter will be created if required, and may contain a reference to the runtime
+directory. For example:
+
+.. code-block:: none
+
+  [DEFAULT]
+  runtime_directory = /home/username/pipeline_runtime
+  
+  [layout]
+  job_directory = %(runtime_directory)s/jobs/%(job_name)s
+
+On startup, the framework will first search for a file named ``pipeline.cfg``
+in your current working directory, before falling back to ``~/.pipeline.cfg``.
+The user can also specify a configuration file on the command line. See the
+:ref:`config-file` section for full details on the configuration system.
+
+With the configuration file in place, it is now possible to run the example
+recipe. **Note** that all pipeline runs must be supplied with a job identifier
+on startup: see the section on :ref:`pipeline-jobs` for more. The recipe can
+then be executed as follows:
+
+.. code-block:: bash
+
+  $ python /opt/pipeline/recipes/master/example.py --job-name EXAMPLE_JOB -d
+  $ python example.py -j EXAMPLE_JOB -d
+  2010-10-26 18:38:31 INFO    example: This is a log message
+  2010-10-26 18:38:31 DEBUG   example: /bin/ls stdout: bbs.py
+  [ls output elided]
+  2010-10-26 18:38:31 INFO    example: recipe example completed
+  Results:
+  stdout = [ls output elided]
+
+Congratulations: you have run your first LOFAR pipeline!
+
+A pipeline
+----------
+
+To turn this simple recipe into a fully-functional pipeline is simply a matter
+of wrapping it in a pipeline definition derived from the :class:`control`
+class. The :meth:`cook_recipe` method can then be used to run the recipe. Note
+that it needs to be provided with appropriate input and output objects. An
+appropriate pipeline definition might be:
+
+.. code-block:: python
+
+  import sys
+
+  from lofarpipe.support.control import control
+  from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+
+  class pipeline(control):
+      def pipeline_logic(self):
+          recipe_inputs = LOFARinput(self.inputs)
+          recipe_outputs = LOFARoutput()
+          recipe_inputs['executable'] == '/bin/true'
+          self.cook_recipe('example', recipe_inputs, recipe_outputs)
+
+  if __name__ == "__main__":
+      sys.exit(pipeline().main())
+
+In order to make it clear where to find the ``example`` recipe, we also need
+to edit ``pipeline.cfg``, adding a ``recipe_directories`` directive to the
+``DEFAULT`` section:
+
+.. code-block:: none
+
+    recipe_directories = [/opt/pipeline/recipes]
+
+Saving the above definition in ``pipeline.py``, we now have:
+
+.. code-block:: bash
+
+  $ python pipeline.py -j test_job -d
+  2010-10-27 18:17:31 INFO    pipeline: LOFAR Pipeline (pipeline) starting.
+  2010-10-27 18:17:31 INFO    pipeline.example: recipe example started
+  2010-10-27 18:17:31 INFO    pipeline.example: Starting example recipe run
+  2010-10-27 18:17:31 DEBUG   pipeline.example: Pipeline start time: 2010-10-27T16:17:31
+  2010-10-27 18:17:31 INFO    pipeline.example: This is a log message
+  2010-10-27 18:17:31 INFO    pipeline.example: recipe example completed
+  2010-10-27 18:17:31 INFO    pipeline: recipe pipeline completed
+  Results:
+
diff --git a/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst b/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst
new file mode 100644
index 00000000000..bb005c74288
--- /dev/null
+++ b/CEP/Pipeline/docs/sphinx/source/user/usage/tasks.rst
@@ -0,0 +1,43 @@
+*****
+Tasks
+*****
+
+Declaring the full inputs and outputs for a recipe every time it is run can be
+a chore, expecially when the same set of parameters are used frequently. This
+not only complicates the pipeline definition, it inelegantly mixes
+configuration parameters into the code defining the pipeline.  Therefore, we
+introduce the concept of a "task": the combination of a recipe and a set of
+standard paramters.
+
+First, we define a task file in the :ref:`configuration file <config-file>`:
+
+.. code-block:: none
+
+  task_files = [/path/to/tasks.cfg]
+
+Within that file, tasks are delimited by blocks headed by the task name in
+square brackets. There then follows the recipe name and the parameters to be
+provided. For example:
+
+.. code-block:: none
+
+  [example_task]
+  recipe = example
+  parameter1 = value1
+  parameter2 = value2
+
+Within the pipeline definition, this task can then be used by invoking the
+:meth:`~lofarpipe.support.baserecipe.BaseRecipe.run_task` method:
+
+.. code-block:: python
+
+   self.run_task("example_task")
+
+If required, parameters can be overridden in the arguments to
+:meth:`~lofarpipe.support.baserecipe.BaseRecipe.run_task`. For example, we
+might over-ride the value of ``parameter2`` above, while leaving
+``parameter1`` intact:
+
+.. code-block:: python
+
+   self.run_task("example_task", parameter2="value3")
diff --git a/CEP/Pipeline/framework/lofarpipe/__init__.py b/CEP/Pipeline/framework/lofarpipe/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING b/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING
new file mode 100644
index 00000000000..d60c31a97a5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/COPYING
@@ -0,0 +1,340 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+	    How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year  name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/README b/CEP/Pipeline/framework/lofarpipe/cuisine/README
new file mode 100644
index 00000000000..176e12fe453
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/README
@@ -0,0 +1,8 @@
+This free software under the GNU Public Licence. See the file COPYING.
+Copyright (c) ASTRON
+
+If you are not using the Eric3 IDE you can ignore the .e3p files
+as these are the Eric3 project files (currently for version 3.6.
+
+These scripts are the base classes for the scripts in the
+WSRTpipeline_library. See the documentation in WSRTpipeline_library/doc.
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py b/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py
new file mode 100644
index 00000000000..e659b9b1511
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/WSRTrecipe.py
@@ -0,0 +1,366 @@
+#!/usr/bin/env python
+import ingredient, cook, parset
+import sys
+
+from optparse import OptionParser
+
+####
+# Use the standard Python logging module for flexibility.
+# Standard error of external jobs goes to logging.WARN, standard output goes
+# to logging.INFO.
+import logging
+from lofarpipe.support.pipelinelogging import getSearchingLogger
+
+from traceback import format_exc
+
+class RecipeError(cook.CookError):
+    pass
+
+class NullLogHandler(logging.Handler):
+    """
+    A handler for the logging module, which does nothing.
+    Provides a sink, so that loggers with no other handlers defined do
+    nothing rather than spewing unformatted garbage.
+    """
+    def emit(self, record):
+        pass
+
+class WSRTrecipe(object):
+    """Base class for recipes, pipelines are created by calling the cook_*
+    methods.  Most subclasses should only need to reimplement go() and add
+    inputs and outputs.  Some might need to addlogger() to messages or
+    override main_results."""
+    def __init__(self):
+        ## List of inputs, self.inputs[key] != True is considered valid input
+        self.inputs   = ingredient.WSRTingredient()
+        ## List of outputs, should only be filled on succesful execution
+        self.outputs  = ingredient.WSRTingredient()
+        ## All of these should do something sensible in their __str__ for
+        ## simple print output
+
+        ## Try using the standard Python system for handling options
+        self.optionparser = OptionParser(
+            usage="usage: %prog [options]"
+        )
+        self.optionparser.remove_option('-h')
+        self.optionparser.add_option(
+            '-h', '--help', action="store_true"
+        )
+        self.optionparser.add_option(
+            '-v', '--verbose',
+            action="callback", callback=self.__setloglevel,
+            help="verbose [Default: %default]"
+        )
+        self.optionparser.add_option(
+            '-d', '--debug',
+            action="callback", callback=self.__setloglevel,
+            help="debug [Default: %default]"
+        )
+
+        self.helptext = """LOFAR/WSRT pipeline framework"""
+        self.recipe_path = ['.']
+
+    def _log_error(self, e):
+        # Sometimes, an exception will be thrown before we have any loggers
+        # defined that can handle it. Check if that's the case, and, if so,
+        # dump it to stderr.
+        handled = len(self.logger.handlers) > 0
+        my_logger = self.logger
+        while my_logger.parent:
+            my_logger = my_logger.parent
+            if len(my_logger.handlers) > 0 and my_logger is not my_logger.root:
+                handled = True
+        if handled:
+            self.logger.exception('Exception caught: ' + str(e))
+        else:
+            print >> sys.stderr, "***** Exception occurred with no log handlers"
+            print >> sys.stderr, "*****", str(e)
+
+    def help(self):
+        """Shows helptext and inputs and outputs of the recipe"""
+        print self.helptext
+        self.optionparser.print_help()
+        print '\nOutputs:'
+        for k in self._outfields.keys():
+            print '  ' + k
+
+    def main_init(self):
+        """Main initialization for stand alone execution, reading input from
+        the command line"""
+        # The root logger has a null handler; we'll override in recipes.
+        logging.getLogger().addHandler(NullLogHandler())
+        self.logger = getSearchingLogger(self.name)
+        opts = sys.argv[1:]
+        try:
+            myParset = parset.Parset(self.name + ".parset")
+            for p in myParset.keys():
+                opts[0:0] = "--" + p, myParset.getString(p)
+        except IOError:
+            logging.debug("Unable to open parset")
+        (options, args) = self.optionparser.parse_args(opts)
+        if options.help:
+            return 1
+        else:
+            for key, value in vars(options).iteritems():
+                if value is not None:
+                    self.inputs[key] = value
+            self.inputs['args'] = args
+            return 0
+
+    def main(self):
+        """This function is to be run in standalone mode."""
+        import os.path
+        self.name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
+        status = self.main_init()
+        if not status:
+            status = self.run(self.name)
+            self.main_result()
+        else:
+            self.help()
+        logging.shutdown()
+        return status
+
+    def run(self, name):
+        """This code will run if all inputs are valid, and wraps the actual
+        functionality in self.go() with some exception handling, might need
+        another name, like try_execute, because it's to similar to go()."""
+        self.name = name
+        self.logger.info('recipe ' + name + ' started')
+        try:
+            status = self.go()
+            if not self.outputs.complete():
+                self.logger.warn("Note: recipe outputs are not complete")
+        except Exception, e:
+            self._log_error(e)
+            self.outputs = None ## We're not generating any results we have
+                                ## confidence in
+            return 1
+        else:
+            self.logger.info('recipe ' + name + ' completed')
+            return status
+
+    def get_run_info(self, filepath):
+        import pickle
+        try:
+            fd = open(filepath + '/pipeline.pickle')
+            results = pickle.load(fd)
+        except:
+            return None
+        fd.close()
+        if self.name in results.keys():
+            return results[self.name]
+        else:
+            return None
+
+    def set_run_info(self, filepath):
+        import pickle
+        try:
+            fd = open(filepath + '/' + 'pipeline.pickle', 'w')
+            try:
+                results = pickle.load(fd)
+            except:
+                results = {}
+            results[self.name] = {'inputs':self.inputs, 'outputs':self.outputs}
+            pickle.dump(results, fd)
+            fd.close()
+        except:
+            return None
+
+    def rerun(self, name, directory):
+        """Function that you can use to rerun a recipe from the point where it
+        ended.  Not working completely yet. [untested]"""
+        self.name = name
+        self.logger.info('recipe ' + name + ' started')
+        try:
+            results = self.get_run_info(directory)
+            if not results: return
+            if not results[self.name]: return
+            self.inputs  = results[self.name]['inputs']
+            self.outputs = results[self.name]['outputs']
+            self.run(name)
+        except Exception, e:
+            self._log_error(e)
+            self.outputs = None ## We're not generating any results we have
+                                ## confidence in
+            return 0
+        else:
+            self.logger.info('recipe ' + name + ' completed')
+            return 1
+
+    def go(self):
+        """Main functionality, this empty placeholder only shows help"""
+        self.help()
+
+    def main_result(self):
+        """Main results display for stand alone execution, displaying results
+        on stdout"""
+        if self.outputs == None:
+            print 'No results'
+        else:
+            print 'Results:'
+            for o in self.outputs.keys():
+                print str(o) + ' = ' + str(self.outputs[o])
+
+    ## Maybe these cooks should go in some subclass?
+    ## Problem is you might need all of them in a recipe describing a pipeline
+    def cook_recipe(self, recipe, inputs, outputs):
+        """Execute another recipe/pipeline as part of this one"""
+        c = cook.PipelineCook(recipe, inputs, outputs, self.logger, self.recipe_path)
+        c.spawn()
+
+    def cook_system(self, command, options):
+        """Execute an arbitrairy system call, returns it's exitstatus"""
+        l = [command]
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + ' ' + str(options))
+        c = cook.SystemCook(command, l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def cook_interactive(self, command, options, expect):
+        """Execute an arbitrairy system call, returns it's exitstatus, expect
+        can define some functions to call if certain strings are written to
+        the terminal stdout of the called program.
+        Whatever such functions return is written to the stdin of the called
+        program."""
+        commandline = [command]
+        if type(options) == list:
+            commandline.extend(options)
+        else: ## we assume it's a string
+            commandline.extend(options.split())
+        self.print_debug('running ' + command + ' ' + str(options))
+        c = cook.SystemCook(command, commandline, {})
+        c.set_expect(expect)
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def cook_miriad(self, command, options):
+        """Execute a Miriad task, uses MRIBIN, returns it's exitstatus"""
+        l = [command]
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + str(options))
+        c = cook.MiriadCook(command, l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        # should probably parse the messages on '### Fatal Error'
+        if c.exitstatus:
+            raise RecipeError('%s failed with error: %s' %
+                              (command, c.exitstatus))
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+##    def cook_aips(self, command, options):
+##        """Execute an AIPS task, returns it's exitstatus"""
+##        l = [command]
+##        if type(options) == list:
+##            l.extend(options)
+##        else: ## we assume it's a string
+##            l.extend(options.split())
+##        self.print_debug('running ' + command + str(options))
+##        c = cook.AIPSCook(command, l, {}, self.messages)
+##        c.spawn()
+##        while c.handle_messages():
+##            pass ## we could have a timer here
+##        c.wait()
+##        return c.exitstatus ## just returning the exitstatus, the programmer must decide what to do
+##
+##    def cook_aips2(self, command, options):
+##        """Execute an AIPS++ tool, returns it's exitstatus""" #?
+##        l = [command]
+##        if type(options) == list:
+##            l.extend(options)
+##        else: ## we assume it's a string
+##            l.extend(options.split())
+##        self.print_debug('running ' + command + str(options))
+##        c = cook.AIPS2Cook(command, l, {}, self.messages)
+##        c.spawn()
+##        while c.handle_messages():
+##            pass ## we could have a timer here
+##        c.wait()
+##        return c.exitstatus ## just returning the exitstatus, the programmer must decide what to do
+
+    def cook_glish(self, command, options):
+        """Execute a Glish script, uses AIPSPATH, returns it's exitstatus"""
+        l = ['glish', '-l', command + '.g']
+        if type(options) == list:
+            l.extend(options)
+        else: ## we assume it's a string
+            l.extend(options.split())
+        self.print_debug('running ' + command + str(options))
+        c = cook.GlishCook('glish', l, {})
+        c.spawn()
+        while c.handle_messages():
+            pass ## we could have a timer here
+        c.wait()
+        return c.exitstatus ## just returning the exitstatus, the programmer
+                            ## must decide what to do
+
+    def print_debug(self, text):
+        """Add a message at the debug level"""
+        self.logger.debug(text)
+
+    def print_message(self, text):
+        """Add a message at the verbose level"""
+        self.logger.info(text)
+    print_notification = print_message # backwards compatibility
+
+    def print_warning(self, text):
+        """Add a message at the warning level."""
+        self.logger.warn(text)
+
+    def print_error(self, text):
+        """Add a message at the error level"""
+        self.logger.error(text)
+
+    def print_critical(self, text):
+        """Add a message at the critical level"""
+        self.logger.crit(text)
+
+
+# The feeling is this needs to be part of the ingredient, or separate module,
+# not the recipe
+    def zap(self, filepath):
+        import os #, exception
+    #    if filepath == '/' or filepath == '~/':
+    #      raise Exception
+    #    else:
+    #      for root, dirs, files in os.walk(filepath, topdown=False):
+    #        for name in files:
+    #            os.remove(join(root, name))
+    #        for name in dirs:
+    #            os.rmdir(join(root, name))
+        if os.path.isdir(filepath):
+            self.cook_system('rm', ' -rf ' + filepath)
+        elif os.path.isfile(filepath):
+            self.cook_system('rm', ' -f ' + filepath)
+        else:
+            self.print_debug(filepath + ' doesn\'t seem to exist')
+
+    def __setloglevel(self, option, opt_str, value, parser):
+        """Callback for setting log level based on command line arguments"""
+        if str(option) == '-v/--verbose':
+            self.logger.setLevel(logging.INFO)
+        elif str(option) == '-d/--debug':
+            self.logger.setLevel(logging.DEBUG)
+
+
+# Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = WSRTrecipe()
+    sys.exit(standalone.main())
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/__init__.py b/CEP/Pipeline/framework/lofarpipe/cuisine/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py b/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py
new file mode 100644
index 00000000000..cb49d282abd
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/cook.py
@@ -0,0 +1,264 @@
+#from message import ErrorLevel, NotifyLevel, VerboseLevel, DebugLevel
+import time, os, select, pty, fcntl, sys, logging, imp
+from lofarpipe.support.pipelinelogging import getSearchingLogger
+
+class CookError(Exception):
+    """Base class for all exceptions raised by this module."""
+    def __init__(self, value):
+        self.value = value
+    def __str__(self):
+        return `self.value`
+
+class WSRTCook(object):
+    def __init__(self, task, inputs, outputs, logger):
+        self.inputs   = inputs
+        self.outputs  = outputs
+        self.task     = task.strip()
+        self.logger   = logger
+
+class PipelineCook(WSRTCook):
+    """
+    A system for spawning a recipe, providing it with correct inputs, and
+    collecting its outputs.
+    """
+    def __init__(self, task, inputs, outputs, logger, recipe_path):
+        super(PipelineCook, self).__init__(task, inputs, outputs, logger)
+        # Ensures the recipe to be run can be imported from the recipe path
+        try:
+            try:
+                module_details = imp.find_module(task, recipe_path)
+            except ImportError:
+                # ...also support lower-cased file names.
+                module_details = imp.find_module(task.lower(), recipe_path)
+            module = imp.load_module(task, *module_details)
+            self.recipe = getattr(module, task)()
+            self.recipe.logger = getSearchingLogger("%s.%s" % (self.logger.name, task))
+            self.recipe.logger.setLevel(self.logger.level)
+        except Exception, e:
+            self.logger.exception("Exception caught: " + str(e))
+            self.recipe = None
+            raise CookError (task + ' can not be loaded')
+
+    def try_running(self):
+        """Run the recipe, inputs should already have been checked."""
+        self.recipe.name     = self.task
+        if not self.recipe.run(self.task):
+            self.copy_outputs()
+        else:
+            raise CookError (self.task + ' failed')
+
+    def copy_inputs(self):
+        """Ensure inputs are available to the recipe to be run"""
+        for k in self.inputs.keys():
+            self.recipe.inputs[k] = self.inputs[k]
+
+    def copy_outputs(self):
+        """Pass outputs from the recipe back to the rest of the pipeline"""
+        if self.recipe.outputs == None:
+            raise CookError (self.task + ' has no outputs') ## should it have??
+        else:
+            for k in self.recipe.outputs.keys():
+                self.outputs[k] = self.recipe.outputs[k]
+
+    def spawn(self):
+        """Copy inputs to the target recipe then run it"""
+        self.copy_inputs()
+        self.try_running()
+
+class SystemCook(WSRTCook):
+    """Based on Parseltongue cody by Mark Kettenis (JIVE)
+    and subProcess from: Padraig Brady at www.pixelbeat.org
+    and Pexpect from: Noah Spurrier on sourceforge"""
+    def __init__(self, task, inputs, outputs, logger):
+        super(SystemCook, self).__init__(task, inputs, outputs, logger)
+        self._pid      = None ## spawned process ID
+        self._child_fd = None ## child output file descriptor
+        self._expect   = []
+        self._fd_eof   = self._pipe_eof = 0
+        ## We can only have a pipe for stderr as otherwise stdio changes it's
+        ## buffering strategy
+        (self._errorpipe_end, self._errorpipe_front) = os.pipe()
+##        self.poll = select.poll()
+
+    def StripNoPrint(self, S):
+        from string import printable
+        return "".join([ ch for ch in S if ch in printable ])
+
+    def set_expect(self, expectlist):
+        self._expect = expectlist
+
+    def spawn(self, env=None):
+        """Try to start the task."""
+        try:
+            (self._pid, self._child_fd) = pty.fork()
+        except OSError, e:
+            self.logger.error('Unable to fork:' + str(e))
+            raise CookError ('fork failed')
+        if self._pid == 0: ## the new client
+            try:
+                #fcntl.fcntl(self.errw, fcntl.F_SETFL, os.O_NONBLOCK)
+                #os.dup2(self.outputpipe_front, 1) ## This makes stdio screw
+                #up buffering because a pipe is a block device
+
+                # we hardcoded assume stderr of the pty has fd 2
+                os.dup2(self._errorpipe_front, 2)
+
+                os.close(self._errorpipe_end)
+                os.close(self._errorpipe_front) ## close what we don't need
+                self.logger.info("starting " + " ".join(self.inputs))
+                if env:
+                    os.execvpe(self.task, self.inputs, env)
+                else:
+                    os.execvp(self.task, self.inputs)
+            except:
+                sys.stderr.write('Process could not be started: ' + self.task)
+                os._exit(1)
+        else: ## the parent
+##            self.poll.register(self._child_fd)
+##            self.poll.register(self._errorpipe_end)
+            os.close(self._errorpipe_front) ## close what we don't need
+            fcntl.fcntl(self._child_fd, fcntl.F_SETFL, os.O_NONBLOCK)
+
+    def finished(self):
+        """Check whether the task has finished."""
+        return self._pid == 0
+
+    def handle_messages(self):
+        """Read messages."""
+        tocheck=[]
+        if not self._fd_eof:
+            tocheck.append(self._child_fd)
+        if not self._pipe_eof:
+            tocheck.append(self._errorpipe_end)
+        ready = select.select(tocheck, [], [], 0.25)
+        for file in ready[0]:
+            try:
+                time.sleep(0.05)
+                text = os.read(file, 32768)
+            except: ## probalby Input/Output error because the child died
+                text = ''
+            if text:
+                for x in self._expect:
+                    if x[0] in text: ## we need to do something if we see this text
+                        returntext = x[1](text)
+                        if returntext:
+                            os.write(file, returntext)
+                text.replace('\r','\n') ## a pty returns '\r\n' even on Unix
+                text.replace('\n\n','\n')
+                for line in text.split('\n'): ## still have odd behaviour for gear output
+                    if file == self._child_fd:
+                        self.logger.info(self.StripNoPrint(line))
+                    elif file == self._errorpipe_end:
+                        self.logger.warn(self.StripNoPrint(line))
+            else:
+                if file == self._child_fd:
+                    self._fd_eof   = 1
+                elif file == self._errorpipe_end:
+                    self._pipe_eof = 1
+            return 1
+##        if self._child_fd in ready[0]:
+##            try:
+##                text = os.read(self._child_fd, 1024)
+##            except: ## probalby Input/Output error because the child died
+##                text = ''
+##            if text == '':
+##                self._fd_eof   = 1
+##            else: # should probably do some kind of line buffering
+##                if text.find('(O)k/(C)ancel (Ok)') >= 0:
+##                    os.write(self._child_fd, 'C\n')
+##                else:
+##                    self.messages.append(VerboseLevel, self.StripNoPrint(text))
+##            return 1
+##        if self._errorpipe_end in ready[0]:
+##            try:
+##                time.sleep(0.002) ## stderr isn't buffered
+##                text = os.read(self._errorpipe_end, 1024)
+##            except: ## probalby Input/Output error because the child died
+##                text = ''
+##            if text == '':
+##                self._pipe_eof = 1
+##            else: # should probably do some kind of line buffering
+##                self.messages.append(NotifyLevel, self.StripNoPrint(text))
+##            return 1
+        if self._fd_eof and self._pipe_eof: # should be an and not an or, but python 2.3.5 doesn't like it
+            return 0
+        if len(ready[0]) == 0: ## no data in 0.25 second timeout
+            return 1
+        return 0
+##        if self._fd_eof and self._pipe_eof:
+##            return 0
+##        ready = self.poll.poll(1000)
+##        for x in ready:
+##            text = ''
+##            if (x[1] & select.POLLOUT) or (x[1] & select.POLLPRI):
+##                try:
+##                    text = os.read(x[0], 1024)
+##                except:
+##                    if x[0] == self._child_fd:
+##                        self._fd_eof   = 1
+##                    elif x[0] == self._errorpipe_end:
+##                        self._pipe_eof = 1
+##            if (x[1] & select.POLLNVAL) or (x[1] & select.POLLHUP) or (x[1] & select.POLLERR) or (text == ''):
+##                if x[0] == self._child_fd:
+##                    self._fd_eof   = 1
+##                elif x[0] == self._errorpipe_end:
+##                    self._pipe_eof = 1
+##            elif text:
+##                if x[0] == self._child_fd:
+##                    self.messages.append(VerboseLevel, text)
+##                elif x[0] == self._errorpipe_end:
+##                    self.messages.append(NotifyLevel, text)
+##        if self._fd_eof and self._pipe_eof:
+##             return 0
+##        return 1 ##else
+
+    def wait(self):
+        """Check if the task is finished and clean up."""
+        ##self.__excpt = sys.exc_info() might want to check this in some way?
+        try:
+          (pid, status) = os.waitpid(self._pid, os.WNOHANG) ## clean up the zombie
+          assert(pid == self._pid)
+          if os.WIFEXITED(status) or os.WIFSIGNALED(status):
+              self._pid       = 0
+              self.exitstatus = status
+          assert(self.finished())
+          del self._pid
+  ##        self.poll.unregister(self._child_fd)
+  ##        self.poll.unregister(self._errorpipe_end)
+          os.close(self._child_fd)
+          os.close(self._errorpipe_end)
+          ## Interpret the return value
+          if (self.exitstatus == 0 or self.exitstatus > 255):
+              if (self.exitstatus > 255):
+                  self.exitstatus = self.exitstatus >> 8
+              else: self.exitstatus = 0
+              self.logger.info(self.task + ' has ended with exitstatus: ' + str(self.exitstatus))
+
+          else:
+              self.logger.warn(self.task + ' was aborted with exitstatus: ' + str(self.exitstatus))
+        except Exception, e:
+          self.logger.exception('Exception caught: ' + str(type(Exception)) + ' ' + str(e))
+          raise CookError (self.task + ' critical error' + str(type(Exception)) + ' ' + str(e))
+
+class MiriadCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        mirbin = os.environ['MIRBIN'] + '/' ## can raise an exception if it doesn't exist
+        super(MiriadCook, self).__init__(mirbin + task, inputs, outputs, logger)
+        self.logger.debug('Using ' + task + ' found in ' + mirbin)
+
+class AIPSCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # someting with the Parseltongue AIPSTask
+        super(AIPSCook, self).__init__(task, inputs, outputs, logger)
+
+class AIPS2Cook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # Don't know if we can do this right now, we might need a Python interface to AIPS++
+        super(AIPS2Cook, self).__init__(task, inputs, outputs, logger)
+
+class GlishCook(SystemCook):
+    def __init__(self, task, inputs, outputs, logger):
+        # Don't know if we can do this right now, we might need a Python interface to AIPS++
+        aipspath = os.environ['AIPSPATH'] ## can raise an exception if it doesn't exist
+        super(GlishCook, self).__init__(task, inputs, outputs, logger)
+        self.logger.debug('Using ' + task + ' with AIPSPATH ' + aipspath)
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/files.py b/CEP/Pipeline/framework/lofarpipe/cuisine/files.py
new file mode 100644
index 00000000000..ab51149e16f
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/files.py
@@ -0,0 +1,19 @@
+# The feeling is this needs to be part of the ingredient, not the recipe
+# not used currently, maybe integrate it in some way with the data returned by MSinfo?
+def zap(filepath):
+    import os #, exception
+##    if filepath == '/' or filepath == '~/':
+##      raise Exception
+##    else:
+##      for root, dirs, files in os.walk(filepath, topdown=False):
+##        for name in files:
+##            os.remove(join(root, name))
+##        for name in dirs:
+##            os.rmdir(join(root, name))
+    if os.path.isdir(filepath):
+        self.execute('rm', ' -rf ' + filepath)
+    elif os.path.isfile(filepath):
+        self.execute('rm', ' -f ' + filepath)
+    else:
+        self.messages.append(DebugLevel, filepath + " doesn't seem to exist")
+
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py b/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py
new file mode 100644
index 00000000000..96e12bce0dd
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/ingredient.py
@@ -0,0 +1,7 @@
+class WSRTingredient(dict):
+    pass
+##    def __init__ (self, name):
+##        self.name = name
+##
+##    def __repr__ (self):
+##        return self.name
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py b/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py
new file mode 100644
index 00000000000..334d7e5e16e
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/job_parser.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+from WSRTrecipe import *
+
+JobError     = -1
+JobHold      =  0
+JobScheduled =  1
+JobProducing =  2
+JobProduced  =  3
+
+class job_parser(WSRTrecipe):
+    def __init__(self):
+        WSRTrecipe.__init__(self)
+        ##inputs
+        self.inputs['Job'] = ''
+        self.name = 'job parser'
+        # We have no outputs
+        ## Help text
+        self.helptext = """
+        Script to parse an XML job file for use by the pipeline_manager.
+        See the exportjob.dtd for a definition of the format of a valid file."""
+    
+    ## Code to generate results ---------------------------------------------
+    def go(self):
+        try:
+            from xml.dom import minidom, Node
+            doc = minidom.parse(self.inputs['Job'])
+            if doc.documentElement.nodeName == 'exportjob':
+                self.outputs['ExportID'] = str(doc.documentElement.attributes.get('exportID').nodeValue)
+                for node in doc.documentElement.childNodes:
+                    if node.nodeName == 'scriptname':
+                        value = node.childNodes[0].nodeValue
+                        self.outputs['scriptname'] = value
+                    elif node.nodeName == 'repository':
+                        for itemnode in node.childNodes:
+                            if itemnode.nodeName == 'server':
+                                name = itemnode.childNodes[0].nodeValue
+                            elif itemnode.nodeName == 'resultdir':
+                                res  = itemnode.childNodes[0].nodeValue
+                        if res and name: 
+                            self.outputs['repository'] = (name, res)
+                    elif node.nodeName == 'inputlist':
+                        name  = "'" + node.attributes.get('name').nodeValue + "'"
+                        exec(eval("'self.outputs[%s] = []' % (name)"))
+                        for itemnode in node.childNodes:
+                            if itemnode.nodeName == 'listitem':
+                                value = itemnode.childNodes[0].nodeValue
+                                exec(eval("'self.outputs[%s].append(%s)' % (name, value)"))
+                    elif node.nodeName == 'input':
+                        name  = "'" + node.attributes.get('name').nodeValue + "'"
+                        value = node.childNodes[0].nodeValue
+                        #try: # we should just interpret the value, and have the cook/script worry about if it's an int or string.
+                        if value == 'True' or value == 'False':
+                            exec(eval("'self.outputs[%s] = %s' % (name, value)"))
+                        #except:
+                        else:
+                            value = "'''" + value + "'''" ## tripple quotes because a value could be "8 O'clock" for example
+                            exec(eval("'self.outputs[%s] = %s' % (name, value)"))
+            if self.outputs['ExportID']: ## we need an export ID to identify the job
+                self.outputs['Status'] = JobScheduled
+                return
+        except Exception, inst:
+            self.print_notification('Failed importing job: ' + self.inputs['Job'] + '; Error: ' + str(inst))
+        self.outputs['Status'] = JobError
+
+## Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = job_parser()
+    standalone.main()
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/message.py b/CEP/Pipeline/framework/lofarpipe/cuisine/message.py
new file mode 100644
index 00000000000..a85642e6d4f
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/message.py
@@ -0,0 +1,78 @@
+import sys, time
+
+##global Message Levels
+ErrorLevel   = 70 ## Error
+WarningLevel = 60 ## Warning
+NotifyLevel  = 50 ## Always visible, stderr of external processes
+VerboseLevel = 30 ## Visible on verbose, stdout of external processes
+DebugLevel   = 10 ## Visible on debug
+
+class WSRTmessages(list):
+    """Class for handling message logging redirection and reporting tresholds."""
+    def __init__(self):
+        list.__init__(self)
+        self.log   = {sys.stdout:NotifyLevel} ## NotifyLevel and above are always sent to stdout
+        self._store = False
+
+    def store(self):
+        self._store = True
+            
+    def pause(self):
+        self._store = False
+                
+    def clear(self):
+        list.__init__(self)
+        self._store = False
+
+    def append(self, level, item):
+        """level determines if the message gets reported, item should basically be a string"""
+        t = time.gmtime()
+        if self._store and level > DebugLevel:
+            list.append(self, (t, level, item)) ## storing the item for parsing by the caller.
+        for output in self.log.keys():
+            if self.log[output] <= level:
+                if level >= ErrorLevel:
+                    e = ' Error   : '
+                elif level >= WarningLevel:
+                    e = ' Warning : '
+                elif level >= NotifyLevel:
+                    e = ' Notification: '
+                elif level >= VerboseLevel:
+                    e = '     Message : '
+                elif level >= DebugLevel:
+                    e = '        Debug: '
+                output.write('%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+                             + e + item.strip() + '\n')
+                output.flush()
+
+    def __repr__(self):
+        text = ''
+        for i in self:
+            t     = i[0]
+            level = i[1]
+            item  = i[2]
+            if level >= ErrorLevel:
+                e = ' Error   : '
+            elif level >= WarningLevel:
+                e = ' Warning : '
+            elif level >= NotifyLevel:
+                e = ' Notification: '
+            elif level >= VerboseLevel:
+                e = '     Message : '
+            elif level >= DebugLevel:
+                e = '        Debug: '
+            text += ('%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+                         + e + item.strip() + '\n')
+        return text
+
+    def addlogger(self, level, logger):
+        """The level should be one of the above 
+        global levels and the logger should support
+        a write(string) method."""
+        self.log[logger] = level
+
+    def setloglevel(self, level, logger):
+        """Changes the level at which logging info is written to the logger."""
+        for output in self.log.keys():
+            if logger == output:
+                self.log[logger] = level
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py b/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py
new file mode 100644
index 00000000000..ec4aab70205
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/parset.py
@@ -0,0 +1,87 @@
+class Parset(dict):
+
+    def __init__(self, fileName=None):
+        if fileName: self.readFromFile(fileName)
+
+    def readFromFile(self, fileName):
+        lastline = ''
+        for line in open(fileName, 'r').readlines():
+            lastline = lastline + line.split('#')[0]
+            lastline = lastline.rstrip()
+            if len(lastline) > 0 and lastline[-1] == '\\':
+                lastline = lastline[:-1]
+            elif '=' in lastline:
+                key, value = lastline.split('=')
+                self[key.strip()] = value.strip()
+                lastline = ''
+
+    def writeToFile(self, fileName):
+        outf = open(fileName, 'w')
+        for key in sorted(self.keys()):
+            outf.write(key + ' = ' + str(self[key]) + '\n')
+        outf.close()
+
+    def getString(self, key):
+        return self[key]
+
+    def getInt(self, key):
+        return int(self[key])
+
+    def getFloat(self, key):
+        return float(self[key])
+
+    def getStringVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [lp.strip() for lp in vec]
+
+    def getIntVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [int(lp) for lp in vec]
+
+    def getFloatVector(self, key):
+        if type(self[key]) is str:
+            vec = self[key].strip('[]').split(',')
+        else:
+            vec = self[key]
+        return [float(lp) for lp in vec]
+
+
+### Self tests ###
+    
+if __name__ == '__main__':
+    import sys
+    import os
+    # First create a parset in memory.
+    p = Parset()
+    p['anInt'] = str(42)
+    p['aFloat'] = str(3.141592653589793)
+    p['aString'] = str('hello world')
+    p['anIntVec'] = str([1, 2, 3, 4, 5])
+    p['aFloatVec'] = str([2.5, 4.25,
+                          8.125, 16.0625])
+    p['aStringVec'] = str(['aap', 'noot', 'mies', 'wim', 'zus', 'jet',
+                           'teun', 'vuur', 'gijs', 'lam', 'kees', 'bok',
+                           'weide', 'does', 'hok', 'duif', 'schapen'])
+    # Write the parset out to file
+    p.writeToFile('p.parset');
+
+    # Create another parset by reading the written parset file
+    q = Parset()
+    q.readFromFile('p.parset')
+
+    # Clean-up temporary parset file
+    os.remove('p.parset')
+
+    # Parsets p and q must be equal
+    sys.stdout.write('Comparing parameter sets ...   ')
+    if p == q:
+        print 'ok'
+    else:
+        print 'FAIL: Expected equal parameter sets!'
+
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py
new file mode 100644
index 00000000000..65d3095af2c
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline.py
@@ -0,0 +1,324 @@
+#!/usr/bin/env python
+from WSRTrecipe import *
+from job_parser import *
+import os, os.path, time, threading, types, thread, sys
+
+NewJob     = 1
+UpdateJob  = 2
+RestartCom = 3
+
+##--------------------- Server Code, should maybe be in separate module? --------------------------
+class server_wrapper(threading.Thread):
+    """Base class to create a server to listen for new jobs in a separate thread.
+    Mainly implements how to talk with the pipeline manager."""
+    def __init__(self, server): ## might need to be overridden
+        threading.Thread.__init__(self)
+        self.server     = server
+        self.stop       = False
+        self.stop_lock  = threading.Lock()
+        self.events     = [] ## list of received events. Main thread will remove them
+        self.event_lock = threading.Lock()  ## lock to make sure only one is using this list
+        self.event      = threading.Event() ## event to signal the main thread
+        self.setDaemon(True) ## this kills the server when the main thread finishes
+    
+    def run(self):
+        """Empty function override in a subclass"""
+        pass ## override in sub-class
+        
+##    def new_job(self, fileName):
+##        """Send an event to the pipeline manager that a new job is available."""
+##        self.event_lock.acquire()
+##        self.events.append((NewJob, fileName))
+##        self.event.set() ## signal the main thread we've done something
+##        self.event.clear()
+##        self.event_lock.release()
+
+    def new_job(self, fileName, fileContent):
+        """Send an event to the pipeline manager that a new job is available."""
+        self.event_lock.acquire()
+        self.events.append((NewJob, fileName, fileContent))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def update_job(self, exportID, status):
+        """Send an event to the pipeline manager that a job needs to be updated."""
+        self.event_lock.acquire()
+        self.events.append((UpdateJob, exportID, status))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def restart(self):
+        """function that gets called when a restart of the communication
+        of jobs statusses is needed."""
+        self.event_lock.acquire()
+        self.events.append((RestartCom))
+        self.event.set() ## signal the main thread we've done something
+        self.event.clear()
+        self.event_lock.release()
+
+    def serve_forever(self):
+        """Accept requests until the pipeline manager signals to stop."""
+        self.server.socket.settimeout(60)
+        while 1:
+            #? self.stop_lock.acquire() doesn't seem necessary
+            if self.stop:
+                #? self.stop_lock.release() doesn't seem necessary
+                break
+            #? self.stop_lock.release() doesn't seem necessary
+            self.server.handle_request()
+##--------------------- End Server Code --------------------------
+
+class pipeline_manager(WSRTrecipe):
+    def __init__(self):
+        WSRTrecipe.__init__(self)
+        ## inputs
+        self.inputs['ConfigurationFile'] = 'pipeline_manager_config' ## without the .py
+        self.inputs['NewJobsDirectory']  = ''
+        self.inputs['JobsDirectory']     = ''
+        self.inputs['LogDirectory']      = ''
+
+        ##outputs
+        self.outputs['failed_communication'] = []
+
+        ## Help text
+        self.helptext = """
+        Script to run as a server for executing individial jobs
+        ConfigurationFile should be given without the '.py' extention.
+        NewJobs is where new jobs should be written, the JobsDirectory
+        contains unfinished Jobs"""
+        
+        self.jobs                 = []
+        self.parser               = job_parser()
+        self.parser.messages      = self.messages
+        self.running_job          = None
+    
+    ## Code to generate results ---------------------------------------------
+    def startup(self):
+        """Tell the user we stared, read the configuration and try to read unfinished jobs from JobDirectory"""
+        print 'WSRT pipeline manager version 0.5'
+        print 'Press Ctrl-C to abort'
+        exec(eval("'from %s import *' % self.inputs['ConfigurationFile']"))
+        self.log     = file(self.inputs['LogDirectory'] + '/pipeline_manager.log', 'a', 1)
+        self.messages.addlogger(message.DebugLevel, self.log)
+        self.print_message('----- Logging started -----')
+        ExistingJobs = os.listdir(self.inputs['JobsDirectory'])
+        self.server  = server
+        self.client  = client
+        self.restart_communication()
+        for e in ExistingJobs:
+            self.new_job(e, "")
+
+    def communicate_job(self, job): 
+        """function to write to log and communicate with GUI"""
+        if   job['Status'] == JobError:     self.print_notification('Job:' + str(job['ExportID']) + ' Failed')
+        elif job['Status'] == JobHold:      self.print_notification('Job:' + str(job['ExportID']) + ' is on Hold')
+        elif job['Status'] == JobScheduled: self.print_notification('Job:' + str(job['ExportID']) + ' Scheduled')
+        elif job['Status'] == JobProducing: self.print_notification('Job:' + str(job['ExportID']) + ' Started')
+        elif job['Status'] == JobProduced:  self.print_notification('Job:' + str(job['ExportID']) + ' Produced')
+        try:
+            if not isinstance(self.client, types.NoneType):
+                (status, message) = self.client.setStatus(str(job['ExportID']), str(job['Status']))
+                if status: ## we retry, because the client does not do an internal retry, but only reports the problem
+                    count = 1
+                    while (status and (count < 10)):
+                        self.print_notification("Got some error, retrying " + str(job['ExportID']) + ": " + message)
+                        time.sleep(60)
+                        (status, message) = self.client.setStatus(str(job['ExportID']), str(job['Status']))
+                        count += 1
+                if status:
+                    self.print_error(message)
+                else:
+                    self.print_message(message)
+        except:
+            self.outputs['failed_communication'].append((job['ExportID'], job['Status']))
+            self.set_run_info(self.inputs['LogDirectory'])
+            self.print_error('Could not update job %s status to %s.' % (str(job['ExportID']), str(job['Status'])))
+
+    def restart_communication(self):
+        """Try to tell the client what we failed to tell earlier."""
+        results = self.get_run_info(self.inputs['LogDirectory'])
+        if not results: return
+        if not results[self.name]: return
+        for i in results[self.name]['outputs']['failed_communication']:
+            try:
+                if not isinstance(self.client, types.NoneType):
+                    self.print_message(self.client.setStatus(i[0], i[1]))
+            except:
+                self.print_error('Could not update job %s status to %s.' % (str(job['ExportID']), str(job['Status'])))
+                return
+        self.outputs['failed_communication'] = []
+        self.set_run_info(self.inputs['LogDirectory'])
+
+    def new_job(self, filename, fileContent):
+        """Read filename and add to the list of jobs if it is a valid file."""
+        import shutil
+        try:
+            if fileContent:
+                f = open(self.inputs['NewJobsDirectory'] + '/' + filename, 'w')
+                f.write(fileContent)
+                f.close()
+            shutil.move(self.inputs['NewJobsDirectory'] + '/' + filename, self.inputs['JobsDirectory'] + '/' + filename)
+        except:
+            self.print_debug('file not found (existing job?): ' + self.inputs['NewJobsDirectory'] + '/' + filename)
+        self.parser.inputs['Job'] = self.inputs['JobsDirectory'] + '/' + filename
+        self.parser.outputs       = ingredient.WSRTingredient() ## empty ingredient to be able to run more than once
+        self.parser.go()
+        job             = self.parser.outputs.copy()
+        job['filename'] = filename
+        if job['Status'] == JobScheduled:
+            self.jobs.append(job)
+            self.communicate_job(job)
+        else:
+            self.print_notification('Parsing ' + self.inputs['JobsDirectory'] + '/' + filename + ' failed') ## Not implemented yet
+
+    def find_job(self, exportID):
+        for j in self.jobs:
+            if j['ExportID'] == exportID:
+                return j
+        return None
+
+    def update_job(self, exportID, status): 
+        """for communicating job status with GUI, mainly to put on Hold."""
+        j = self.find_job(exportID)
+        if j:
+            j['Status'] = status
+            self.communicate_job(j)
+        else:
+            self.print_debug('Job ' + str(exportID) + ' not found, ignoring message.')
+
+    def check_server(self):
+        """Check if there are any new jobs communicated to the server."""
+        self.server.event.wait(10)
+        self.server.event_lock.acquire()
+        while len(self.server.events) > 0:
+            job = self.server.events.pop(0)
+            if job[0] == NewJob:
+                self.new_job(job[1], job[2])
+            elif job[0] == UpdateJob:
+                self.update_job(job[1], job[2])
+            elif job[0] == RestartCom:
+                self.restart_communication()
+        self.server.event_lock.release()
+
+    def next_job(self):
+        """See if there is another job scheduled, then start it."""
+        import shutil
+        for j in self.jobs:
+            if j['Status'] >= JobScheduled:
+                if j['Status'] > JobScheduled:
+                    self.print_notification('Restarting job: ' + j['ExportID'])
+                j['Status'] = JobProducing
+                self.running_job = j
+                self.communicate_job(j)
+                self.cook_job(j)
+                self.communicate_job(j)
+                shutil.move(self.inputs['JobsDirectory'] + '/' + j['filename'], 
+                            self.inputs['LogDirectory'] + '/' + str(j['ExportID']) + '/' + j['filename'])
+                self.jobs.remove(j) ## we can not use .pop() because the first job might be on hold
+                self.running_job = None ## tell ourselves that we're doing nothing
+                return ## we did a jobs.remove() so we don't what to stay in the for loop!
+    
+    def prepare_recipe_parameters(self, job):
+        """Prepare ingedients and message handler for the cook to cook the recipe."""
+        import sys
+        logfile = file(self.inputs['LogDirectory'] + '/' + str(job['ExportID']) + '/pipeline_manager.log', 'a', 1)
+        messages = message.WSRTmessages()
+        results  = ingredient.WSRTingredient()
+        if self.messages.log[sys.stdout] == message.DebugLevel:
+            messages.addlogger(message.DebugLevel, logfile)
+            messages.setloglevel(message.DebugLevel, sys.stdout)
+        else:
+            messages.addlogger(message.VerboseLevel, logfile)
+            messages.setloglevel(message.NotifyLevel, sys.stdout)
+        inputs = job.copy()
+        del inputs['scriptname']
+        del inputs['Status']
+        del inputs['filename']
+        return (logfile, inputs, results, messages)
+
+    def cook_job(self, job):
+        """This starts a recipe with the inputs as defined in the job file."""
+        if not os.path.isdir(self.inputs['LogDirectory'] + '/' + str(job['ExportID'])):
+            os.makedirs(self.inputs['LogDirectory'] + '/' + str(job['ExportID']))
+        logfile, inputs, results, messages = self.prepare_recipe_parameters(job)
+        try:
+            self.cook_recipe(job['scriptname'], inputs, results, messages)
+        except Exception, e:
+            messages.append(message.ErrorLevel, str(e))
+            job['Status'] = JobError
+            results       = None
+        if results:
+            job['Status'] = JobProduced # something more elaborate?
+            messages.append(message.VerboseLevel, 'Results:')
+            for o in results.keys():
+                messages.append(message.VerboseLevel, str(o) + ' = ' + str(results[o]))
+        else: # should a recipe always have results?
+            messages.append(message.VerboseLevel, 'No Results!')
+            job['Status'] = JobError
+        logfile.close()
+        ## dump the logfile to the webdav as a dataproduct.
+        if 'repository' in job.keys():
+            try:
+                temp = ingredient.WSRTingredient()
+                temp['server']        = job['repository'][0]
+                temp['resultdir']     = job['repository'][1]
+                temp['filepath']      = self.inputs['LogDirectory'] + '/' + str(job['ExportID'])
+                temp['filename']      = 'pipeline_manager.log'
+                temp['resultfilename']= str(job['ExportID']) + '_pipeline.log'
+                self.cook_recipe('put_pipeline_log', temp, temp)
+            except:
+                self.print_notification('failed writing pipeline log.')
+
+    def print_time(self):
+        t = time.gmtime()
+        timestring = '\r%04d-%02d-%02d %02d:%02d:%02d' % (t[0], t[1], t[2], t[3], t[4], t[5])
+        if self.running_job:
+            timestring += ' Busy running job: ' + self.running_job['ExportID']
+        else:
+            if self.jobs:
+                timestring += ' checking for new jobs.'
+            else:
+                ##timestring += ' No jobs available, waiting for next job.'
+                timestring = '.'
+        sys.stdout.write(timestring)
+        sys.stdout.flush()
+
+    def go(self):
+        self.startup()
+        try:
+            while True: ##run forever
+                try:
+                    if not self.running_job:
+                        thread.start_new_thread((self.next_job), ())
+                    self.print_time()
+                    if not isinstance(self.server, types.NoneType):
+                        self.check_server()
+                        time.sleep(1) # temp fix as apparantly check_server can return fast enough to re-enter
+                                      # next_job before the previous one gets to self.running_job = j
+                                      # should be something with a lock like self.stop = False  self.stop_lock = threading.Lock()
+                    else:
+                        if self.jobs:
+                            time.sleep(10)
+                        else:
+                            raise Exception ("No more jobs and no Server, ending manager.")
+                except KeyboardInterrupt:
+                    self.print_notification('Pipeline Manager: Keyboard interupt detected, asking user...')
+                    reply = raw_input('Do you want to end the pipeline manager (y/n)?')
+                    if 'y' in reply:
+                        raise KeyboardInterrupt ('Pipeline Manager: User wants to end program')
+        except KeyboardInterrupt, k:
+            self.print_notification(str(k))
+        except Exception, inst:
+            self.print_error('Pipeline Manager: Exception caught: ' + str(type(Exception)) + ' ' + str(inst))
+            raise inst
+        if not isinstance(self.server, types.NoneType): ## check if the server is alive
+            self.server.stop_lock.acquire()
+            self.server.stop = True ## tell the server to stop
+            self.server.stop_lock.release()
+
+## Stand alone execution code ------------------------------------------
+if __name__ == '__main__':
+    standalone = pipeline_manager()
+    standalone.main()
diff --git a/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py
new file mode 100644
index 00000000000..d80dabe98f5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/cuisine/pipeline_manager_config.py
@@ -0,0 +1,2 @@
+client = None
+server = None
diff --git a/CEP/Pipeline/framework/lofarpipe/support/__init__.py b/CEP/Pipeline/framework/lofarpipe/support/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py b/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py
new file mode 100644
index 00000000000..7b26c7322b5
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/baserecipe.py
@@ -0,0 +1,231 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                              Base LOFAR Recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from ConfigParser import NoOptionError, NoSectionError
+from ConfigParser import SafeConfigParser as ConfigParser
+from threading import Event
+from functools import partial
+
+import os
+import sys
+import inspect
+import logging
+import errno
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.lofarexceptions import PipelineException
+from lofarpipe.cuisine.WSRTrecipe import WSRTrecipe
+from lofarpipe.support.lofaringredient import RecipeIngredients, LOFARinput, LOFARoutput
+from lofarpipe.support.remotecommand import run_remote_command
+
+class BaseRecipe(RecipeIngredients, WSRTrecipe):
+    """
+    Provides standard boiler-plate used in the various LOFAR pipeline recipes.
+    """
+    # Class ordering is important here.
+    # WSRTrecipe.__init__ does not call a superclass, hence BaseIngredients
+    # must go first.
+    # Further, BaseIngredients.__init__ overwrites the inputs dict provided by
+    # WSRTrecipe, so it must call super() before setting up inputs.
+    inputs = {} # No inputs to add to defaults
+    def __init__(self):
+        """
+        Subclasses should define their own parameters, but remember to call
+        this __init__() method to include the required defaults.
+        """
+        super(BaseRecipe, self).__init__()
+        self.error = Event()
+        self.error.clear()
+
+    @property
+    def __file__(self):
+        """
+        Provides the file name of the currently executing recipe.
+        """
+        import inspect
+        full_location = os.path.abspath(inspect.getsourcefile(self.__class__))
+        # DANGER WILL ROBINSON!
+        # On the lofar cluster frontend (lfe001), home directories are in
+        # /data/users, but on the nodes they are in /home. This workaround
+        # means things work like one might expect for now, but this is not a
+        # good long-term solution.
+        return full_location.replace('/data/users', '/home')
+
+    def _setup_logging(self):
+        """
+        Set up logging.
+
+        We always produce a log file and log to stdout. The name of the file
+        and the logging format can be set in the pipeline configuration file.
+        """
+        try:
+            logfile = self.config.get("logging", "log_file")
+        except:
+            logfile = os.path.join(
+                self.config.get("layout", "job_directory"), 'logs/pipeline.log'
+            )
+
+        try:
+            format = self.config.get("logging", "format", raw=True)
+        except:
+            format = "%(asctime)s %(levelname)-7s %(name)s: %(message)s"
+
+        try:
+            datefmt = self.config.get("logging", "datefmt", raw=True)
+        except:
+            datefmt = "%Y-%m-%d %H:%M:%S"
+
+        try:
+            os.makedirs(os.path.dirname(logfile))
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        stream_handler = logging.StreamHandler(sys.stdout)
+        file_handler = logging.FileHandler(logfile)
+
+        formatter = logging.Formatter(format, datefmt)
+
+        stream_handler.setFormatter(formatter)
+        file_handler.setFormatter(formatter)
+        self.logger.addHandler(stream_handler)
+        self.logger.addHandler(file_handler)
+
+    def run_task(self, configblock, datafiles=[], **kwargs):
+        """
+        A task is a combination of a recipe and a set of parameters.
+        Tasks can be prefedined in the task file set in the pipeline
+        configuration (default: tasks.cfg).
+
+        Here, we load a task configuration and execute it.
+        This is a "shorthand" version of
+        :meth:`lofarpipe.cuisine.WSRTrecipe.WSRTrecipe.cook_recipe`.
+        """
+        self.logger.info("Running task: %s" % (configblock,))
+
+        # Does the task definition exist?
+        try:
+            recipe = self.task_definitions.get(configblock, "recipe")
+        except NoSectionError:
+            raise PipelineException(
+                "%s not found -- check your task definitions" % configblock
+            )
+
+        # Build inputs dict.
+        # First, take details from caller.
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = datafiles
+
+        # Add parameters from the task file.
+        # Note that we neither need the recipe name nor any items from the
+        # DEFAULT config.
+        parameters = dict(self.task_definitions.items(configblock))
+        del parameters['recipe']
+        for key in dict(self.config.items("DEFAULT")).keys():
+            del parameters[key]
+        inputs.update(parameters)
+
+        # Update inputs with provided kwargs, if any.
+        inputs.update(kwargs)
+
+        # Default outputs dict.
+        outputs = LOFARoutput()
+
+        # Cook the recipe and return the results"
+        if self.cook_recipe(recipe, inputs, outputs):
+            self.logger.warn(
+                "%s reports failure (using %s recipe)" % (configblock, recipe)
+            )
+            raise PipelineRecipeFailed("%s failed", configblock)
+        return outputs
+
+    def _read_config(self):
+        # If a config file hasn't been specified, use the default
+        if not self.inputs.has_key("config"):
+            # Possible config files, in order of preference:
+            conf_locations = (
+                os.path.join(sys.path[0], 'pipeline.cfg'),
+                os.path.join(os.path.expanduser('~'), '.pipeline.cfg')
+            )
+            for path in conf_locations:
+                if os.access(path, os.R_OK):
+                    self.inputs["config"] = path
+                    break
+            if not self.inputs.has_key("config"):
+                raise PipelineException("Configuration file not found")
+
+        config = ConfigParser({
+            "job_name": self.inputs["job_name"],
+            "start_time": self.inputs["start_time"],
+            "cwd": os.getcwd()
+        })
+        config.read(self.inputs["config"])
+        return config
+
+    def go(self):
+        """
+        This is where the work of the recipe gets done.
+        Subclasses should define their own go() method, but remember to call
+        this one to perform necessary initialisation.
+        """
+        # Every recipe needs a job identifier
+        if not self.inputs.has_key("job_name"):
+            raise PipelineException("Job undefined")
+
+        if not self.inputs.has_key("start_time"):
+            import datetime
+            self.inputs["start_time"] = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+        self.logger.debug("Pipeline start time: %s" % self.inputs['start_time'])
+
+        # Config is passed in from spawning recipe. But if this is the start
+        # of a pipeline, it won't have one.
+        if not hasattr(self, "config"):
+            self.config = self._read_config()
+
+        # Ensure we have a runtime directory
+        if not self.inputs.has_key('runtime_directory'):
+            self.inputs["runtime_directory"] = self.config.get(
+                "DEFAULT", "runtime_directory"
+            )
+        else:
+            self.config.set('DEFAULT', 'runtime_directory', self.inputs['runtime_directory'])
+        if not os.access(self.inputs['runtime_directory'], os.F_OK):
+            raise IOError, "Runtime directory doesn't exist"
+
+        # ...and task files, if applicable
+        if not self.inputs.has_key("task_files"):
+            try:
+                self.inputs["task_files"] = utilities.string_to_list(
+                    self.config.get('DEFAULT', "task_files")
+                )
+            except NoOptionError:
+                self.inputs["task_files"] = []
+        self.task_definitions = ConfigParser(self.config.defaults())
+        self.task_definitions.read(self.inputs["task_files"])
+
+        try:
+            self.recipe_path = [
+                os.path.join(root, 'master') for root in utilities.string_to_list(
+                    self.config.get('DEFAULT', "recipe_directories")
+                )
+            ]
+        except NoOptionError:
+            self.recipe_path = []
+
+        # At this point, the recipe inputs must be complete. If not, exit.
+        if not self.inputs.complete():
+            raise PipelineException(
+                "Required inputs not available: %s" %
+                " ".join(self.inputs.missing())
+            )
+
+        # Only configure handlers if our parent is the root logger.
+        # Otherwise, our parent should have done it for us.
+        if isinstance(self.logger.parent, logging.RootLogger):
+            self._setup_logging()
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py b/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py
new file mode 100644
index 00000000000..dbefb0a8b5a
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterdesc.py
@@ -0,0 +1,78 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                     Cluster description handler
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+import os.path
+import lofar.parameterset
+from lofarpipe.support.lofarexceptions import ClusterError
+
+class ClusterDesc(object):
+    """
+    Wrap a clusterdesc file, providing a more convenient, Pythonic interface
+    for accessing its contents.
+    """
+    def __init__(self, filename):
+        self.filename = filename
+        self.parameterset = lofar.parameterset.parameterset(self.filename)
+        self.name = self.parameterset.getString('ClusterName')
+        self.keylist = []
+        with open(filename, 'r') as file:
+            for line in file.readlines():
+                if len(line.strip()) == 0 or line.strip()[0] == '#': continue
+                self.keylist.append(line.split('=')[0].strip())
+        try:
+            subclusters = self.parameterset.get("SubClusters").expand().getStringVector()
+            self.subclusters = [
+                ClusterDesc(
+                    os.path.join(os.path.dirname(self.filename), subcluster)
+                )
+                for subcluster in subclusters
+            ]
+        except RuntimeError:
+            self.subclusters = []
+
+    def get(self, key, recursive=True):
+        values = []
+        if key in self.keylist:
+            values.extend(self.parameterset.get(key).expand().getStringVector())
+        if recursive:
+            for subcluster in self.subclusters:
+                values.extend(subcluster.get(key, recursive=True))
+        return values
+
+    def keys(self, recursive=True):
+        values = self.keylist[:]
+        if recursive:
+            for subcluster in self.subclusters:
+                values.extend(subcluster.keys(recursive=True))
+        return list(set(values))
+
+    def __str__(self):
+        return "ClusterDesc: " + self.name
+
+def get_compute_nodes(clusterdesc):
+    """
+    Return a list of all compute nodes defined (under the key "Compute.Nodes")
+    in the ClusterDesc class object clusterdesc.
+    """
+    try:
+        return clusterdesc.get('Compute.Nodes')
+    except:
+        raise ClusterError("Unable to find compute nodes in clusterdesc.")
+
+def get_head_node(clusterdesc):
+    """
+    Return the head node, defined by the key "Head.Nodes" in the ClusterDesc
+    class object clusterdesc.
+
+    Always return the first head node, even if there are several defined.
+    """
+    try:
+        return [clusterdesc.get('Head.Nodes')[0]]
+    except:
+        raise ClusterError("Unable to find head node in clusterdesc.")
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py b/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py
new file mode 100644
index 00000000000..5317045fa57
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterhandler.py
@@ -0,0 +1,127 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                        Management of IPython cluster processes
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+import shlex
+import subprocess
+import threading
+import logging
+import os
+import time
+
+from contextlib import contextmanager
+
+from lofarpipe.support.lofarexceptions import ClusterError
+from lofarpipe.support.clusterdesc import ClusterDesc
+from lofarpipe.support.clusterdesc import get_compute_nodes, get_head_node
+
+class ClusterHandler(object):
+    """
+    ClusterHandler provides a convenient interface for setting up and tearing
+    down an IPython cluser -- engines & controller -- over a network topology
+    described by a clusterdesc file.
+    """
+    def __init__(self, config, logger=None):
+        if not logger:
+            logging.basicConfig()
+            self.logger = logging.getLogger()
+        else:
+            self.logger = logger
+        clusterdesc = ClusterDesc(config.get('cluster', 'clusterdesc'))
+        self.head_node = get_head_node(clusterdesc)[0]
+        self.compute_nodes = get_compute_nodes(clusterdesc)
+        self.script_path = config.get('deploy', 'script_path')
+        self.config = config
+
+    def start_cluster(self, nproc=""):
+        # Optional nproc argument specifies number of engines per node
+        self.__start_controller()
+        self.__start_engines(nproc)
+
+    def stop_cluster(self):
+        self.__stop_controller()
+        self.__stop_engines()
+
+    def __execute_ssh(self, host, command):
+        ssh_cmd = shlex.split("ssh -x %s -- %s" % (host, command))
+        subprocess.check_call(ssh_cmd)
+        self.logger.info("  * %s" % (host))
+
+    def __multinode_ssh(self, nodes, command):
+        ssh_connections = [
+            threading.Thread(
+                target = self.__execute_ssh,
+                args = (node, command)
+            ) for node in nodes
+        ]
+        [thread.start() for thread in ssh_connections]
+        [thread.join() for thread in ssh_connections]
+
+    def __start_controller(self):
+        self.logger.info("Starting controller:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        controller_ppath = self.config.get('deploy', 'controller_ppath')
+        # Check that there isn't an existing pidfile
+        if os.path.isfile(os.path.join(controlpath, "ipc.pid")):
+            raise ClusterError("Controller already running")
+        # Before starting, ensure that the old engine.furl isn't lying about
+        # to cause confusion
+        try:
+            os.unlink(os.path.join(controlpath, 'engine.furl'))
+        except OSError:
+            pass
+        self.__execute_ssh(self.head_node, "bash %s/ipcontroller.sh %s start %s" % (self.script_path, controlpath, controller_ppath))
+        # Wait until an engine.furl file has been created before moving on to
+        # start engines etc.
+        while not os.path.isfile(os.path.join(controlpath, 'engine.furl')):
+            time.sleep(1)
+        self.logger.info("done.")
+
+    def __stop_controller(self):
+        self.logger.info("Stopping controller:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        controller_ppath = self.config.get('deploy', 'controller_ppath')
+        self.__execute_ssh(self.head_node, "bash %s/ipcontroller.sh %s stop %s" % (self.script_path, controlpath, controller_ppath))
+        os.unlink(os.path.join(controlpath, 'engine.furl'))
+        self.logger.info("done.")
+
+    def __start_engines(self, nproc):
+        self.logger.info("Starting engines:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        engine_ppath = self.config.get('deploy', 'engine_ppath')
+        engine_lpath = self.config.get('deploy', 'engine_lpath')
+        with open(os.path.join(controlpath, 'engine.furl')) as furlfile:
+            furl = furlfile.readline().strip()
+        command = "bash %s/ipengine.sh %s start %s %s %s %s" % (self.script_path, controlpath, engine_ppath, engine_lpath, furl, str(nproc))
+        self.__multinode_ssh(self.compute_nodes, command)
+        self.logger.info("done.")
+
+    def __stop_engines(self):
+        self.logger.info("Stopping engines:")
+        controlpath = self.config.get('DEFAULT', 'runtime_directory')
+        command= "bash %s/ipengine.sh %s stop" % (self.script_path, controlpath)
+        self.__multinode_ssh(self.compute_nodes, command)
+        self.logger.info("done.")
+
+@contextmanager
+def ipython_cluster(config, logger, nproc=""):
+    """
+    Provides a context in which an IPython cluster is available. Designed for
+    use within LOFAR pipeline recipes.
+
+    THe optional nproc argument specifies the number of engines which will be
+    started per compute node.
+    """
+    cluster_handler = ClusterHandler(config, logger)
+    cluster_handler.start_cluster(nproc)
+    # If an exception is raised during the pipeline run, make sure we catch it
+    # and shut the cluster down cleanly.
+    try:
+        yield
+    finally:
+        cluster_handler.stop_cluster()
diff --git a/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py b/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py
new file mode 100644
index 00000000000..191ef1c12f1
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/clusterlogger.py
@@ -0,0 +1,38 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                         Network logging system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from contextlib import contextmanager
+
+import socket
+import threading
+
+from lofarpipe.support.jobserver import JobSocketReceiver
+
+@contextmanager
+def clusterlogger(
+    logger,
+    host=None,
+    port=0
+):
+    """
+    Provides a context in which a network logging sever is available. Note
+    that the logging server is just a JobSocketReceiver with no jobpool or
+    error flag.
+
+    Yields a host name & port to which log messages can be sent.
+    """
+    if not host:
+        host = socket.gethostname()
+    logserver = JobSocketReceiver(logger, {}, threading.Event(), host=host, port=port)
+    logserver.start()
+    try:
+        yield logserver.server_address
+    except KeyboardInterrupt:
+        logserver.stop()
+        raise
+    logserver.stop()
+    [handler.flush() for handler in logger.handlers]
diff --git a/CEP/Pipeline/framework/lofarpipe/support/control.py b/CEP/Pipeline/framework/lofarpipe/support/control.py
new file mode 100644
index 00000000000..953496aae83
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/control.py
@@ -0,0 +1,45 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                        Pipeline control recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from lofarpipe.support.stateful import StatefulRecipe
+from lofarpipe.support.lofarexceptions import PipelineException
+
+#                                             Standalone Pipeline Control System
+# ------------------------------------------------------------------------------
+
+class control(StatefulRecipe):
+    """
+    Basic pipeline control framework.
+
+    Define a pipeline by subclassing and provding a body for the
+    :meth:`pipeline_logic`.
+
+    This class provides little, but can be specialised to eg provide a
+    MAC/SAS interface etc.
+    """
+    inputs = {}
+    def pipeline_logic(self):
+        """
+        Define pipeline logic here in subclasses
+        """
+        raise NotImplementedError
+
+    def go(self):
+        super(control, self).go()
+
+        self.logger.info(
+            "LOFAR Pipeline (%s) starting." %
+            (self.name,)
+        )
+
+        try:
+            self.pipeline_logic()
+        except PipelineException, message:
+            self.logger.error(message)
+            return 1
+
+        return 0
diff --git a/CEP/Pipeline/framework/lofarpipe/support/group_data.py b/CEP/Pipeline/framework/lofarpipe/support/group_data.py
new file mode 100644
index 00000000000..8eaaba58eca
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/group_data.py
@@ -0,0 +1,99 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                              Group data into appropriate chunks for processing
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from collections import defaultdict
+import subprocess
+
+from lofar.parameterset import parameterset
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterdesc import get_compute_nodes
+from lofarpipe.support.parset import Parset
+
+def group_files(logger, clusterdesc, node_directory, group_size, filenames):
+        """
+        Group a list of files into blocks suitable for simultaneous
+        processing, such that a limited number of processes run on any given
+        host at a time.
+
+        All node_directory on all compute nodes specified in clusterdesc is
+        searched for any of the files listed in filenames. A generator is
+        produced; on each call, no more than group_size files per node
+        are returned.
+        """
+        # Given a limited number of processes per node, the first task is to
+        # partition up the data for processing.
+        logger.debug('Listing data on nodes')
+        data = {}
+        for node in get_compute_nodes(clusterdesc):
+            logger.debug("Node: %s" % (node))
+            exec_string = ["ssh", node, "--", "find",
+                node_directory,
+                "-maxdepth 1",
+                "-print0"
+                ]
+            logger.debug("Executing: %s" % (" ".join(exec_string)))
+            my_process = subprocess.Popen(exec_string, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            sout, serr = my_process.communicate()
+            data[node] = sout.split('\x00')
+            data[node] = utilities.group_iterable(
+                [element for element in data[node] if element in filenames],
+                group_size,
+            )
+
+        # Now produce an iterator which steps through the various chunks of
+        # data to image, and image each chunk
+        data_iterator = utilities.izip_longest(*list(data.values()))
+        for data_chunk in data_iterator:
+            to_process = []
+            for node_data in data_chunk:
+                if node_data: to_process.extend(node_data)
+            yield to_process
+
+def gvds_iterator(gvds_file, nproc=4):
+    """
+    Reads a GVDS file.
+
+    Provides a generator, which successively returns the contents of the GVDS
+    file in the form (host, filename), in chunks suitable for processing
+    across the cluster. Ie, no more than nproc files per host at a time.
+    """
+    parset = Parset(gvds_file)
+
+    data = defaultdict(list)
+    for part in range(parset.getInt('NParts')):
+        host = parset.getString("Part%d.FileSys" % part).split(":")[0]
+        file = parset.getString("Part%d.FileName" % part)
+        vds  = parset.getString("Part%d.Name" % part)
+        data[host].append((file, vds))
+
+    for host, values in data.iteritems():
+        data[host] = utilities.group_iterable(values, nproc)
+
+    while True:
+        yieldable = []
+        for host, values in data.iteritems():
+            try:
+                for filename, vds in values.next():
+                    yieldable.append((host, filename, vds))
+            except StopIteration:
+                pass
+        if len(yieldable) == 0:
+            raise StopIteration
+        else:
+            yield yieldable
+
+def load_data_map(filename):
+    """
+    Load a mapping of filename <-> compute node from a parset on disk.
+    """
+    datamap = Parset(filename)
+    data = []
+    for host in datamap:
+        for filename in datamap.getStringVector(host):
+            data.append((host, filename))
+    return data
diff --git a/CEP/Pipeline/framework/lofarpipe/support/ipython.py b/CEP/Pipeline/framework/lofarpipe/support/ipython.py
new file mode 100644
index 00000000000..dbc33f1b482
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/ipython.py
@@ -0,0 +1,56 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                   Extensions to IPython system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from ConfigParser import NoSectionError
+from IPython.kernel.task import StringTask
+from IPython.kernel import client as IPclient
+from lofarpipe.support.lofarexceptions import ClusterError
+
+class LOFARTask(StringTask):
+    """
+    Extended version of IPython's StringTask, allowing external
+    arguments for dependency resolution.
+    """
+    def __init__(self, expression, pull=None, push=None,
+        clear_before=False, clear_after=False, retries=0,
+        recovery_task=None, depend=None, dependargs=None
+    ):
+        self.dependargs = dependargs
+        return super(LOFARTask, self).__init__(
+            expression, pull, push, clear_before, clear_after,
+            retries, recovery_task, depend
+        )
+
+    def check_depend(self, properties):
+        """
+        Calls self.depend(properties, self.dependargs)
+        to see if a task should be run.
+        """
+        if self.depend is not None:
+            return self.depend(properties, self.dependargs)
+        else:
+            return True
+
+class IPythonRecipeMixIn(object):
+    """
+    Mix-in for recipes to provide access to an IPython cluster.
+    """
+    def _get_cluster(self):
+        """
+        Return task and multiengine clients connected to the running
+        pipeline's IPython cluster.
+        """
+        self.logger.info("Connecting to IPython cluster")
+        try:
+            tc  = IPclient.TaskClient(self.config.get('cluster', 'task_furl'))
+            mec = IPclient.MultiEngineClient(self.config.get('cluster', 'multiengine_furl'))
+        except NoSectionError:
+            self.logger.error("Cluster not definied in configuration")
+            raise ClusterError
+        except:
+            self.logger.error("Unable to initialise cluster")
+            raise ClusterError
+        return tc, mec
diff --git a/CEP/Pipeline/framework/lofarpipe/support/jobserver.py b/CEP/Pipeline/framework/lofarpipe/support/jobserver.py
new file mode 100644
index 00000000000..138bfd61d61
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/jobserver.py
@@ -0,0 +1,168 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                          Network services for sending/receiving job parameters
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import contextmanager
+
+import signal
+import threading
+import struct
+import socket
+import select
+import logging
+import logging.handlers
+import Queue
+import SocketServer
+import cPickle as pickle
+
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.utilities import spawn_process
+
+class JobStreamHandler(SocketServer.StreamRequestHandler):
+    """
+    Networked job server.
+
+    This will listen for:
+
+    * GET <jobid>           -- reply with a list of all job arguments
+    * PUT <jobid> <results> -- receive and unpickle job results
+    * Pickled log records
+
+    Log records are logs using whatever local logger is supploed to the
+    SocketReceiver.
+    """
+    def handle(self):
+        """
+        Each request is expected to be a 4-bute length followed by either a
+        GET/PUT request or a pickled LogRecord.
+        """
+        while True:
+            chunk = self.request.recv(4)
+            try:
+                slen = struct.unpack(">L", chunk)[0]
+            except:
+                break
+            chunk = self.connection.recv(slen)
+            while len(chunk) < slen:
+                chunk = chunk + self.connection.recv(slen - len(chunk))
+            input_msg = chunk.split(" ", 2)
+            try:
+                # Can we handle this message type?
+                if input_msg[0] == "GET":
+                    self.send_arguments(int(input_msg[1]))
+                elif input_msg[0] == "PUT":
+                    self.read_results(input_msg[1], input_msg[2])
+                else:
+                    self.handle_log_record(chunk)
+            except:
+                # Otherwise, fail.
+                self.server.error.set()
+                self.server.logger.error("Protocol error; received %s" % chunk)
+                self.server.logger.error("Aborting.")
+
+    def send_arguments(self, job_id):
+        job_id = int(job_id)
+        self.server.logger.debug(
+            "Request for job %d from %s" %
+            (job_id, self.request.getpeername())
+        )
+        args = self.server.jobpool[job_id].arguments
+        pickled_args = pickle.dumps(args)
+        length = struct.pack(">L", len(pickled_args))
+        self.request.send(length + pickled_args)
+
+    def read_results(self, job_id, pickled_results):
+        job_id = int(job_id)
+        self.server.logger.debug(
+            "Results for job %d submitted by %s" %
+            (job_id, self.request.getpeername())
+        )
+        results = pickle.loads(pickled_results)
+        self.server.jobpool[job_id].results = results
+
+    def handle_log_record(self, chunk):
+        record = logging.makeLogRecord(pickle.loads(chunk))
+        self.server.queue.put_nowait(record)
+
+class JobSocketReceiver(SocketServer.ThreadingTCPServer):
+    """
+    Simple TCP socket-based job dispatch and results collection as well as
+    network logging.
+    """
+    def __init__(
+        self,
+        logger,
+        jobpool,
+        error,
+        host=None,
+        port=logging.handlers.DEFAULT_TCP_LOGGING_PORT
+    ):
+        if not host:
+            host = socket.gethostname()
+        SocketServer.ThreadingTCPServer.__init__(self, (host, port), JobStreamHandler)
+        self.abort = False
+        self.timeout = 1
+        self.queue = Queue.Queue()
+        self.logger = logger
+        self.jobpool = jobpool
+        self.error = error
+
+    def start(self):
+        # Log messages are received in one thread, and appended to an instance
+        # of Queue.Queue. Another thread picks messages off the queue and
+        # sends them to the log handlers. We keep the latter thread running
+        # until the queue is empty, thereby avoiding the problem of falling
+        # out of the logger threads before all log messages have been handled.
+        def loop_in_thread():
+            while True:
+                rd, wr, ex = select.select(
+                    [self.socket.fileno()], [], [], self.timeout
+                )
+                if rd:
+                    self.handle_request()
+                elif self.abort:
+                    break
+
+        def log_from_queue():
+            while True:
+                try:
+                    record = self.queue.get(True, 1)
+                    # Manually check the level against the pipeline's root logger.
+                    # Not sure this should be necessary, but it seems to work...
+                    if self.logger.isEnabledFor(record.levelno):
+                        self.logger.handle(record)
+                except Queue.Empty:
+                    if self.abort:
+                        break
+
+        self.runthread = threading.Thread(target=loop_in_thread)
+        self.logthread = threading.Thread(target=log_from_queue)
+        self.runthread.start()
+        self.logthread.start()
+
+    def stop(self):
+        self.abort = True
+        self.runthread.join()
+        self.logthread.join()
+        self.server_close()
+
+@contextmanager
+def job_server(logger, jobpool, error):
+    """
+    Provides a context in which job dispatch is available.
+
+    Yields a host name & port which clients can connect to for job details.
+    """
+    jobserver = JobSocketReceiver(logger, jobpool, error, port=0)
+    jobserver.start()
+    try:
+        yield jobserver.server_address
+    except KeyboardInterrupt:
+        jobserver.stop()
+        raise
+    jobserver.stop()
+    [handler.flush() for handler in logger.handlers]
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py b/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py
new file mode 100644
index 00000000000..2df127fe0d9
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarexceptions.py
@@ -0,0 +1,28 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                                     Exceptions
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+class ExecutableMissing(Exception):
+    pass
+
+class PipelineException(Exception):
+    pass
+
+class PipelineRecipeFailed(PipelineException):
+    pass
+
+class PipelineReceipeNotFound(PipelineException):
+    pass
+
+class PipelineQuit(PipelineException):
+    """
+    If this exception is raised during a pipeline run, we skip over all
+    subsequent steps and exit cleanly.
+    """
+    pass
+
+class ClusterError(PipelineException):
+    pass
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py b/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py
new file mode 100644
index 00000000000..5d3fddc16de
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofaringredient.py
@@ -0,0 +1,353 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                                    Ingredients
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+from optparse import make_option
+from UserDict import DictMixin
+
+from lofarpipe.cuisine.ingredient import WSRTingredient
+from lofarpipe.support.utilities import string_to_list, is_iterable
+
+#       These are currently only used by lofarrecipe.run_task to provide default
+#              input and output dicts based on copying metadata from the parent.
+# ------------------------------------------------------------------------------
+class LOFARinput(WSRTingredient):
+    """
+    All LOFAR pipeline ingredients are required to provide a few basic
+    parameters:
+
+    * job_name
+    * runtime_directory
+    * config
+    * task_files
+    * dry_run
+    * start_time
+
+    These are defined in the RecipeIngredients class; all options (but not
+    arguments) defined there are required.
+    """
+    def __init__(self, defaults):
+        super(LOFARinput, self).__init__(self)
+        for param in RecipeIngredients.inputs.iterkeys():
+            if param != "args" and defaults.has_key(param):
+                self[param] = defaults[param]
+
+class LOFARoutput(WSRTingredient):
+    """
+    LOFARoutput makes no changes to WSRTingredient.
+    """
+    pass
+
+class Field(object):
+    """
+    Fields provided validation and type checking of input/output.
+
+    Unlimited user-defined fields are possible; they should all derive from
+    this class.
+    """
+    def __init__(self, *opts, **attrs):
+        self.optionstrings = opts
+        if attrs.has_key("help"):
+            self.help = attrs['help']
+        else:
+            self.help = ""
+        if attrs.has_key("default"):
+            self.default = attrs['default']
+        elif attrs.has_key("optional") and attrs["optional"]:
+            self.optional = True
+
+    def is_valid(self, value):
+        """
+        Check whether ``value`` is a valid value for this field.
+
+        This must be defined in subclasses.
+
+        :param value: value to be checked
+        :rtype: bool
+        """
+        raise NotImplementedError
+
+    def coerce(self, value):
+        """
+        Try to convert value into the appropriate type for this sort of field.
+        Results should be checked with ``is_valid()``.
+
+        :param value: value to be coerced
+        :rtype: coerced value
+        """
+        return value
+
+    def generate_option(self, name):
+        """
+        Generated an :mod:`optparse` option.
+
+        :param name: Destination
+        :rtype: :class:`optparse.Option`
+        """
+        if hasattr(self, "default"):
+            help = self.help + " [Default: %s]" % str(self.default)
+        elif hasattr(self, "optional"):
+            help = self.help + " [Optional]"
+        else:
+            help = self.help
+        return make_option(help=help, dest=name, *self.optionstrings)
+
+class StringField(Field):
+    """
+    A Field which accepts any string as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, str)
+
+class IntField(Field):
+    """
+    A Field which accepts any int as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, int)
+
+    def coerce(self, value):
+        try:
+            return int(value)
+        except:
+            return value
+
+class FloatField(Field):
+    """
+    A Field which accepts any float as its value.
+    """
+    def is_valid(self, value):
+        return isinstance(value, float)
+
+    def coerce(self, value):
+        try:
+            return float(value)
+        except:
+            return value
+
+class FileField(Field):
+    """
+    A Field which accepts the name of an extant file.
+    """
+    def is_valid(self, value):
+        return os.path.exists(str(value))
+
+class ExecField(Field):
+    """
+    A Field which accepts the name of an executable file.
+    """
+    def is_valid(self, value):
+        return os.access(value, os.X_OK)
+
+class DirectoryField(Field):
+    """
+    A Field which accepts the name of an extant directory.
+    """
+    def is_valid(self, value):
+        return os.path.isdir(str(value))
+
+    def coerce(self, value):
+        try:
+            os.makedirs(str(value))
+        except:
+            pass
+        finally:
+            return value
+
+class BoolField(Field):
+    """
+    A Field which accepts a bool.
+    """
+    def is_valid(self, value):
+        return isinstance(value, bool)
+
+    def coerce(self, value):
+        if value == "False" or value == "None" or value == "":
+            return False
+        elif value == "True":
+            return True
+        else:
+            return value
+
+class ListField(Field):
+    """
+    A Field which accepts a non-string iterable (ie, list or tuple).
+    """
+    def is_valid(self, value):
+        return not isinstance(value, str) and is_iterable(value)
+
+    def coerce(self, value):
+        if isinstance(value, str):
+            return string_to_list(value)
+        else:
+            return value
+
+class DictField(Field):
+    """
+    A Field which accepts a dict.
+    """
+    def is_valid(self, value):
+        return isinstance(value, dict)
+
+class FileList(ListField):
+    """
+    A Field which accepts a list of extant filenames.
+    """
+    def is_valid(self, value):
+        if super(FileList, self).is_valid(value) and \
+        not False in [os.path.exists(file) for file in value]:
+            return True
+        else:
+            return False
+
+#                                            The magic that makes it all work...
+#                      RecipeIngredients should be mixed in to any recipes which
+#             need to deal with input or output. BaseRecipe already includes it,
+#                                so that will almost always the case by default.
+# ------------------------------------------------------------------------------
+
+class LOFARingredient(DictMixin):
+    """
+    LOFARingredient provides dict-like access to a group of instances of
+    :class:`Field`.  If a field is defined which does not have a value set,
+    but which does have a default, that is returned.
+    """
+    def __init__(self, fields):
+        self._fields = fields
+        self._values = {}
+
+    def __getitem__(self, key):
+        # If we don't have the value for this key, but we do have a field with
+        # a valid default, return that.
+        if (
+            not self._values.has_key(key) and
+            self._fields.has_key(key) and
+            hasattr(self._fields[key], "default")
+        ):
+            field = self._fields[key]
+            value = field.coerce(field.default)
+            if not field.is_valid(value):
+                raise TypeError(
+                    "%s is an invalid value for %s %s" %
+                    (str(value), type(field).__name__, key)
+                )
+        elif self._values.has_key(key):
+            value = self._values[key]
+        else:
+            raise KeyError(key)
+        return value
+
+    def __setitem__(self, key, value):
+        if key in self._fields:
+            field = self._fields[key]
+            converted_value = field.coerce(value)
+            if not field.is_valid(converted_value):
+                raise TypeError(
+                    "%s is an invalid value for %s %s" %
+                    (str(value), type(field).__name__, key)
+                )
+            self._values[key] = converted_value
+        else:
+            raise TypeError("Ingredient %s not defined" % key)
+
+    def keys(self):
+        # We want to return a list of everything we have a value for. That's
+        # everything in _values, plus things in _fields which have a default.
+        return list(
+            set(self._values.keys()).union(
+                set(k for k, v in self._fields.items() if hasattr(v, "default"))
+            )
+        )
+
+    def make_options(self):
+        return [value.generate_option(key) for key, value in self._fields.iteritems()]
+
+    def missing(self):
+        return [
+            key for key in self._fields.iterkeys()
+            if not self._values.has_key(key)
+            and not hasattr(self._fields[key], "optional")
+            and not hasattr(self._fields[key], "default")
+        ]
+
+    def complete(self):
+        return False if self.missing() else True
+
+    def update(self, args, **kwargs):
+        for key, value in args.iteritems():
+            self._values[key] = value
+        for key, value in kwargs.iteritems():
+            self._values[key] = value
+
+class RecipeIngredientsMeta(type):
+    """
+    This metaclass ensures that the appropriate instances of :class:`Field`
+    are available in the inputs of every LOFAR recipe.
+    """
+    def __init__(cls, name, bases, ns):
+        # Inputs are inherited from the superclass.
+        # Need to do some gymnastics here, as we don't want to update the
+        # superclass's _infields -- thus we replace it and copy the contents.
+        new_inputs = {}
+        if hasattr(cls, "_infields"):
+            new_inputs.update(cls._infields)
+        if ns.has_key("inputs"):
+            new_inputs.update(ns["inputs"])
+        cls._infields = new_inputs
+
+        # Outputs are not inherited.
+        if ns.has_key('outputs'):
+            cls._outfields = ns['outputs']
+
+class RecipeIngredients(object):
+    """
+    All LOFAR recipes ultimately inherit from this. It provides the basic
+    ingredient structure, as well as the default fields which are available in
+    every recipe.
+    """
+    __metaclass__ = RecipeIngredientsMeta
+
+    inputs = {
+        'job_name': StringField(
+            '-j', '--job-name',
+            help="Job name"
+        ),
+        'runtime_directory': FileField(
+            '-r', '--runtime-directory',
+            help="Runtime directory"
+        ),
+        'config': FileField(
+            '-c', '--config',
+            help="Configuration file"
+        ),
+        'task_files': FileList(
+            '--task-file',
+            help="Task definition file"
+        ),
+        'start_time': StringField(
+            '--start-time',
+            help="[Expert use] Pipeline start time"
+        ),
+        'dry_run': BoolField(
+            '-n', '--dry-run',
+            help="Dry run",
+            default=False
+        ),
+        'args': ListField(
+            '--args', help="Args", default=[]
+        )
+    }
+
+    outputs = {}
+
+    def __init__(self):
+        super(RecipeIngredients, self).__init__()
+        #                  Must run the following *after* WSRTrecipe.__init__().
+        # ----------------------------------------------------------------------
+        self.inputs = LOFARingredient(self._infields)
+        self.outputs = LOFARingredient(self._outfields)
+        self.optionparser.add_options(self.inputs.make_options())
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py b/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py
new file mode 100644
index 00000000000..1886ec86781
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarnode.py
@@ -0,0 +1,119 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                            Compute node system
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+import socket
+import struct
+import platform
+import logging
+import logging.handlers
+import cPickle as pickle
+
+def run_node(*args):
+    """
+    Run on node to automatically locate, instantiate and execute the
+    correct LOFARnode class.
+    """
+    import imp
+    control_script = getattr(
+        imp.load_module(recipename, *imp.find_module(recipename, [nodepath])),
+        recipename
+    )
+    return control_script(loghost=loghost, logport=logport).run_with_logging(*args)
+
+class LOFARnode(object):
+    """
+    Base class for node jobs called through IPython or directly via SSH.
+
+    Sets up TCP based logging.
+    """
+    def __init__(
+        self,
+        loghost=None,
+        logport=logging.handlers.DEFAULT_TCP_LOGGING_PORT
+    ):
+        self.logger = logging.getLogger(
+            'node.%s.%s' % (platform.node(), self.__class__.__name__)
+        )
+        self.logger.setLevel(logging.DEBUG)
+        self.loghost = loghost
+        self.logport = int(logport)
+        self.outputs = {}
+
+    def run_with_logging(self, *args):
+        """
+        Calls the run() method, ensuring that the logging handler is added
+        and removed properly.
+        """
+        if self.loghost:
+            my_tcp_handler = logging.handlers.SocketHandler(self.loghost, self.logport)
+            self.logger.addHandler(my_tcp_handler)
+        try:
+            return self.run(*args)
+        finally:
+            if self.loghost:
+                my_tcp_handler.close()
+                self.logger.removeHandler(my_tcp_handler)
+
+    def run(self):
+        # Override in subclass.
+        raise NotImplementedError
+
+class LOFARnodeTCP(LOFARnode):
+    """
+    This node script extends :class:`~lofarpipe.support.lofarnode.LOFARnode`
+    to receive instructions via TCP from a
+    :class:`~lofarpipe.support.jobserver.JobSocketReceiver`.
+    """
+    def __init__(self, job_id, host, port):
+        self.job_id, self.host, self.port = int(job_id), host, int(port)
+        self.__fetch_arguments()
+        super(LOFARnodeTCP, self).__init__(self.host, self.port)
+
+    def run_with_stored_arguments(self):
+        """
+        After fetching arguments remotely, use them to run the standard
+        run_with_logging() method.
+        """
+        returnvalue = self.run_with_logging(*self.arguments)
+        self.__send_results()
+        return returnvalue
+
+    def __fetch_arguments(self):
+        """
+        Connect to a remote job dispatch server (an instance of
+        jobserver.JobSocketReceive) and obtain all the details necessary to
+        run this job.
+        """
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        try:
+            s.connect((self.host, self.port))
+        except Exception, e:
+            print "Could not connect to %s:%s (got %s)" % (self.host, str(self.port), str(e))
+            raise
+        message = "GET %d" % self.job_id
+        s.send(struct.pack(">L", len(message)) + message)
+        chunk = s.recv(4)
+        slen = struct.unpack(">L", chunk)[0]
+        chunk = s.recv(slen)
+        while len(chunk) < slen:
+            chunk += s.recv(slen - len(chunk))
+        self.arguments = pickle.loads(chunk)
+
+    def __send_results(self):
+        """
+        Send the contents of self.outputs to the originating job dispatch
+        server.
+        """
+        message = "PUT %d %s" % (self.job_id, pickle.dumps(self.outputs))
+        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        try:
+            s.connect((self.host, int(self.port)))
+        except Exception, e:
+            print "Could not connect to %s:%s (got %s)" % (self.host, str(self.port), str(e))
+            raise
+        s.send(struct.pack(">L", len(message)) + message)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py b/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py
new file mode 100644
index 00000000000..b4431dbfcb6
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/lofarrecipe.py
@@ -0,0 +1,13 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                           IPython and RemoteCommand all in one
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.ipython import IPythonRecipeMixIn
+from lofarpipe.support.stateful import BaseRecipe
+
+class LOFARrecipe(BaseRecipe, IPythonRecipeMixIn, RemoteCommandRecipeMixIn):
+    pass
diff --git a/CEP/Pipeline/framework/lofarpipe/support/mac.py b/CEP/Pipeline/framework/lofarpipe/support/mac.py
new file mode 100644
index 00000000000..7890b27f552
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/mac.py
@@ -0,0 +1,202 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                       Pipeline MAC integration
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import threading
+import collections
+
+from lofarpipe.support.control import control
+from lofarpipe.support.lofarexceptions import PipelineException, PipelineQuit
+
+#                                            Required by MAC EventPort Interface
+# ------------------------------------------------------------------------------
+
+from ep.control import *
+from ep.control import OK as controlOK
+
+#                                                           Integration with MAC
+# ------------------------------------------------------------------------------
+
+class MAC_control(control):
+    """
+    This extends the control framework to interface with MAC.
+    """
+    def __init__(self):
+        super(MAC_control, self).__init__()
+        self.optionparser.add_option('--controllername')
+        self.optionparser.add_option('--servicemask')
+        self.optionparser.add_option('--targethost')
+        self.optionparser.add_option('--treeid')
+
+    def pipeline_logic(self):
+        """
+        Define pipeline logic in subclasses.
+        """
+        raise NotImplementedError
+
+    def run_task(self, configblock, datafiles=[]):
+        self.logger.info( "Waiting for run state...")
+        self.state['run'].wait()
+        self.logger.info( "Running.")
+        self.logger.debug("Quit is %s" % (str(self.state['quit'].isSet())))
+        if self.state['quit'].isSet():
+            self.logger.info("Pipeline instructed to quit; bailing out")
+            raise PipelineQuit
+        try:
+            super(MAC_control, self).run_task(configblock, datafiles)
+        except PipelineException, message:
+            self.logger.error(message)
+#            raise PipelineQuit
+
+    def go(self):
+        #     Pipeline logic proceeds as in a standard recipe in its own thread
+        #                          MAC control takes place in a separate thread
+        # ---------------------------------------------------------------------
+        super(MAC_control, self).go()
+
+        self.logger.info(
+            "LOFAR Pipeline (%s) starting." %
+            (self.name,)
+        )
+
+        self.state = {
+            'run':      threading.Event(),
+            'quit':     threading.Event(),
+            'pause':    threading.Event(),
+            'finished': threading.Event()
+        }
+
+        control_thread = threading.Thread(target=self.control_loop)
+        pipeline_thread = threading.Thread(target=self.pipeline_logic)
+
+        pipeline_thread.setDaemon(True)
+        control_thread.start()
+        pipeline_thread.start()
+        control_thread.join()
+        self.logger.info("Control loop finished; shutting down")
+        return 0
+
+    def control_loop(self):
+        """
+        Loop until the pipeline finishes, receiving and responding to messages
+        sent by MAC.
+        """
+        #                                             Connect to the MAC server
+        # ---------------------------------------------------------------------
+        try:
+            my_interface = ControllerPort_Interface(
+                self.inputs['servicemask'], self.inputs['targethost']
+            )
+        except:
+            self.logger.info("Control interface not connected; quitting")
+            self.state['quit'].set()
+            self.state['run'].set()
+            return
+        my_interface.send_event(
+            ControlConnectEvent(self.inputs['controllername'])
+        )
+
+        #                    Buffer events received from the EventPort interface
+        # ----------------------------------------------------------------------
+        class ReceiverThread(threading.Thread):
+            def __init__(self, interface):
+                super(ReceiverThread, self).__init__()
+                self.interface = interface
+                self.event_queue = collections.deque()
+                self.active = True
+            def run(self):
+                while self.active:
+                    self.event_queue.append(self.interface.receive_event())
+                    self.logger.debug("Got a new event")
+            def next_event(self):
+                try:
+                    return self.event_queue.popleft()
+                except IndexError:
+                    return None
+        event_receiver = ReceiverThread(my_interface)
+        event_receiver.setDaemon(True)
+        event_receiver.start()
+        controllername = self.inputs['controllername']
+
+        #            The main control loop continues until the pipeline finshes
+        # ---------------------------------------------------------------------
+        while True:
+            #                               Handle any events received from MAC
+            # -----------------------------------------------------------------
+            current_event = event_receiver.next_event()
+
+            if isinstance(current_event, ControlConnectedEvent):
+                self.logger.debug("Received ConnectedEvent")
+            elif isinstance(current_event, ControlClaimEvent):
+                self.logger.debug("Received ClaimEvent")
+                my_interface.send_event(
+                    ControlClaimedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlPrepareEvent):
+                self.logger.debug("Received PrepareEvent")
+                my_interface.send_event(
+                    ControlPreparedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlSuspendEvent):
+                self.logger.debug("Received SuspendEvent")
+                self.logger.debug("Clearing run state; pipeline must pause")
+                self.state['run'].clear()
+                my_interface.send_event(
+                    ControlSuspendedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlResumeEvent):
+                self.logger.debug("Received ResumeEvent")
+                self.logger.debug("Setting run state: pipeline may run")
+                self.state['run'].set()
+                my_interface.send_event(
+                    ControlResumedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlReleaseEvent):
+                self.logger.debug("Received ReleaseEvent")
+                my_interface.send_event(
+                    ControlReleasedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlQuitEvent):
+                self.logger.debug("Received QuitEvent")
+                self.logger.debug("Setting quit state: pipeline must exit")
+                self.state['quit'].set()
+                self.state['run'].set()
+                my_interface.send_event(
+                    ControlQuitedEvent(
+                        controllername,
+                        self.inputs['treeid'],
+                        controlOK,
+                        "no error"
+                    )
+                )
+            elif isinstance(current_event, ControlResyncEvent):
+                self.logger.debug("Received ResyncEvent")
+                my_interface.send_event(
+                    ControlResyncedEvent(controllername, controlOK)
+                )
+            elif isinstance(current_event, ControlScheduleEvent):
+                self.logger.debug("Received ScheduleEvent")
+                my_interface.send_event(
+                    ControlScheduledEvent(controllername, controlOK)
+                )
+
+            #                  Shut everything down if the pipeline is finished
+            # -----------------------------------------------------------------
+            if self.state['finished'].isSet():
+                self.logger.debug("Got finished state: control loop exiting")
+                my_interface.send_event(
+                    ControlQuitedEvent(
+                        controllername,
+                        self.inputs['treeid'],
+                        controlOK,
+                        "pipeline finished"
+                    )
+                )
+                event_receiver.active = False
+                break
+
+            self.logger.debug("Control looping...")
+            time.sleep(1)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/parset.py b/CEP/Pipeline/framework/lofarpipe/support/parset.py
new file mode 100644
index 00000000000..dee9ae3643f
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/parset.py
@@ -0,0 +1,114 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                          Parameterset Handling
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os
+from tempfile import mkstemp
+from contextlib import contextmanager
+
+from lofar.parameterset import parameterset
+
+class Parset(parameterset):
+    """
+    This wraps lofar.parameterset to provide a convenient means of iterating
+    over the parameterset's keys.
+
+    It should be replaced (eventually) by rolling this functionality directly
+    into the C++ implementation.
+    """
+    def __init__(self, filename=None, caseInsensitive=False):
+        super(Parset, self).__init__(filename, caseInsensitive)
+        self.keys = []
+        if filename:
+            self._append_file(filename)
+
+    def add(self, key, value):
+        super(Parset, self).add(key, value)
+        self.keys.append(key)
+
+    def adoptFile(self, filename, prefix=''):
+        super(Parset, self).adoptFile(filename, prefix)
+        self._append_file(filename, prefix)
+
+    def clear(self):
+        super(Parset, self).clear()
+        self.keys = []
+
+    def remove(self, key):
+        super(Parset, self).remove(key)
+        self.keys.remove(key)
+
+    def replace(self, key, value):
+        super(Parset, self).replace(key, value)
+        if not key in self.keys:
+            self.keys.append(key)
+
+    def subtractSubset(self, baseKey):
+        super(Parset, self).subtractSubset(baseKey)
+        self.keys = filter(
+            lambda key: False if key[:len(baseKey)] == baseKey else True,
+            self.keys
+        )
+
+    def makeSubset(self, baseKey, prefix=None):
+        newps = Parset()
+        for key in self.keys:
+            if key[:len(baseKey)] == baseKey:
+                if prefix:
+                    newkey = key.replace(baseKey, prefix)
+                else:
+                    newkey = key
+                newps.add(newkey, self[key].get())
+        return newps
+
+    def addStringVector(self, key, vector):
+        super(Parset, self).add(key, "[ %s ]" % ", ".join(vector))
+        self.keys.append(key)
+
+    def _append_file(self, filename, prefix=''):
+        file = open(filename, 'r')
+        for line in file:
+            key = line.split("=")[0].strip()
+            if key:
+                self.keys.append(prefix + key)
+        file.close()
+
+    def __iter__(self):
+        return iter(self.keys)
+
+def get_parset(parset_filename):
+    """
+    Returns an instance of Parset with the given file loaded.
+    """
+    return Parset(parset_filename)
+
+def patch_parset(parset, data, output_dir=None):
+    """
+    Generate a parset file by adding the contents of the data dictionary to
+    the specified parset object. Write it to file, and return the filename.
+    """
+    temp_parset = get_parset(parset)
+    for key, value in data.iteritems():
+        temp_parset.replace(key, value)
+    fd, output = mkstemp(dir=output_dir)
+    temp_parset.writeFile(output)
+    os.close(fd)
+    return output
+
+@contextmanager
+def patched_parset(parset, data, output_dir=None, unlink=True):
+    """
+    Wrap patch_parset() in a contextmanager which removes the generated parset
+    when it finishes.
+
+    The never_unlink flag is to facilitate debugging -- one can leave a
+    patched parset in place for manual checking if required.
+    """
+    filename = patch_parset(parset, data, output_dir)
+    try:
+        yield filename
+    finally:
+        if unlink: os.unlink(filename)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py b/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py
new file mode 100644
index 00000000000..1c53f7ab129
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/pipelinelogging.py
@@ -0,0 +1,269 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                               Logging routines
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from contextlib import contextmanager
+from string import Template
+
+import os
+import time
+import resource
+import threading
+import logging
+import re
+
+class SearchPattern(object):
+    """
+    Match the contents of LogRecords against a regular expression, keeping
+    track of matching records.
+    """
+    def __init__(self, pattern):
+        self.pattern = re.compile(pattern)
+        self.zero()
+
+    def check(self, record):
+        """
+        If the message attached to LogRecords record matches our pattern,
+        store the record.
+        """
+        if self.pattern.search(record.getMessage()):
+            self.results.append(record)
+
+    def zero(self):
+        """
+        Reset our list of stored messages.
+        """
+        self.results = []
+
+class SearchPatterns(dict):
+    """
+    A dictionary of SearchPattern objects.
+
+    When a new entry is appended, it's automatically compiled into a
+    SearchPattern. Other access patterns are as for a dictionary.
+    """
+    def __init__(self):
+        # We only support "bare" init, ie no arguments.
+        super(SearchPatterns, self).__init__()
+
+    def __setitem__(self, name, pattern):
+        # Compile supplied string to a SearchPattern and add it to our
+        # dictionary.
+        super(SearchPatterns, self).__setitem__(name, SearchPattern(pattern))
+
+    def check(self, record):
+        """
+        Check the supplied LogRecord against all
+        registered SearchPatetrn objects.
+        """
+        for pattern in self.itervalues():
+            pattern.check(record)
+
+    def zero(self, name):
+        """
+        Zero the counter on a given SearchPattern.
+        """
+        self[name].zero()
+
+    def zero_all(self, name):
+        """
+        Zero the counter on all SearchPatterns registered.
+        """
+        for name in self.iterkeys():
+            self.zero(name)
+
+class SearchingLogger(logging.Logger):
+    """
+    A SearchingLogger will act as a normal logger object, forwarding
+    LogRecords to the appropriate handlers. In addition, it will check the
+    LogRecord against a SearchPatterns object and save any useful results.
+    """
+    def __init__(self, *args, **kwargs):
+        logging.Logger.__init__(self, *args, **kwargs)
+        self.searchpatterns = SearchPatterns()
+
+    def handle(self, record):
+        logging.Logger.handle(self, record)
+        self.searchpatterns.check(record)
+
+def getSearchingLogger(name):
+    """
+    Return an instance of SearchingLogger with the given name.
+
+    Equivalent to logging.getLogger, but returns a SearchingLogger.
+    """
+    old_class = logging.getLoggerClass()
+    logging.setLoggerClass(SearchingLogger)
+    try:
+        return logging.getLogger(name)
+    finally:
+        logging.setLoggerClass(old_class)
+
+def log_file(filename, logger, killswitch):
+    """
+    Do the equivalent of tail -f on filename -- ie, watch it for updates --
+    and send any lines written to the file to the logger.
+
+    killswitch is an instance of threading.Event: when set, we bail out of the
+    loop.
+
+    :param filename: Full path to file to watch
+    :param logger: Logger to which to send updates
+    :param killswitch: instance of :class:`threading.Event` -- stop watching file when set
+    """
+    if not os.path.exists(filename):
+        open(filename, 'w').close()
+    with open(filename, 'r') as f:
+        while not killswitch.isSet():
+            line = f.readline()
+            if not line:
+                f.seek(0, 2)
+                time.sleep(1)
+            else:
+                logger.debug(line.strip())
+
+class LogCatcher(object):
+    """
+    Sets up a context in which we can catch logs from individual pipeline
+    process in a file, then send then to the pipeline logger.
+
+    This provides the basic mechanism, but requires subclassing to define
+    self.log_prop and self.log_prop_filename (the name & contents of the log
+    configuration file).
+    """
+    def __init__(self):
+        raise NotImplementedError
+
+    def __enter__(self):
+        log_filename = os.path.join(
+            self.working_dir, "pipeline_process.log"
+        )
+        with open(self.log_prop_filename, 'w') as log_prop_file:
+            log_prop_file.write(
+                self.log_prop.substitute(log_filename=log_filename)
+            )
+        local_logger = logging.getLogger(self.logger_name)
+        self.killswitch = threading.Event()
+        self.logging_thread = threading.Thread(
+            target=log_file,
+            args=(log_filename, local_logger, self.killswitch)
+        )
+        self.logging_thread.start()
+        return local_logger
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        time.sleep(2)   # Wait in case any remaining data is to be flushed to log
+        self.killswitch.set()
+        self.logging_thread.join()
+
+class CatchLog4CPlus(LogCatcher):
+    """
+    Implement a LogCatcher for log4cplus (as used by most LOFAR pipeline
+    tools).
+    """
+    def __init__(self, working_dir, logger_name, executable_name):
+        self.log_prop = Template("""
+log4cplus.rootLogger=DEBUG, FILE
+log4cplus.logger.TRC=TRACE9
+
+log4cplus.appender.FILE=log4cplus::RollingFileAppender
+log4cplus.appender.FILE.File=$log_filename
+log4cplus.appender.FILE.ImmediateFlush=true
+log4cplus.appender.FILE.MaxFileSize=10MB
+#log4cplus.appender.FILE.MaxBackupIndex=1
+#log4cplus.appender.FILE.layout=log4cplus::PatternLayout
+log4cplus.appender.FILE.layout.ConversionPattern=%l [%-3p] - %m%n
+        """)
+        self.log_prop_filename = os.path.join(
+            working_dir, executable_name + ".log_prop"
+        )
+        self.working_dir = working_dir
+        self.logger_name = logger_name
+
+class CatchLog4CXX(LogCatcher):
+    """
+    Implement a LogCatcher for log4cplus (as used by ASKAP tools, eg cimager).
+    """
+    def __init__(self, working_dir, logger_name):
+        self.log_prop = Template("""
+log4j.rootLogger=DEBUG, FILE
+
+log4j.appender.FILE=org.apache.log4j.RollingFileAppender
+log4j.appender.FILE.File=$log_filename
+log4j.appender.FILE.ImmediateFlush=true
+log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.FILE.layout.ConversionPattern=%l [%-3p] - %m%n
+        """)
+        self.log_prop_filename = os.path.join(
+            working_dir, "askap.log_cfg"
+        )
+        self.working_dir = working_dir
+        self.logger_name = logger_name
+
+@contextmanager
+def log_time(logger):
+    """
+    Send information about the processing time used by code in this context to
+    the specified logger.
+
+    :param logger: logger to which timing information should be sent.
+    """
+    def get_rusage():
+        return [
+            x + y for x, y in zip(
+                resource.getrusage(resource.RUSAGE_CHILDREN),
+                resource.getrusage(resource.RUSAGE_SELF)
+            )
+        ]
+
+    start_time = time.time()
+    start_rusage = get_rusage()
+    try:
+        yield
+    finally:
+        total_rusage = [x - y for x, y in zip(get_rusage(), start_rusage)]
+        logger.info(
+            "Total time %.4fs; user time: %.4fs; system time: %.4fs" % (
+                time.time() - start_time, total_rusage[0], total_rusage[1]
+            )
+        )
+        logger.debug(
+            "Start time was %.4fs; end time was %.4fs" % (
+                start_time, time.time()
+            )
+        )
+
+def log_process_output(process_name, sout, serr, logger):
+    """
+    Log stdout/stderr from a process if they contain anything interesting --
+    some line-noise produced by many CEP executables is stripped.
+
+    :param process_name: Name to be used for logging purposes
+    :param sout: Standard out to log (string)
+    :param serr: Standard error to log (string)
+    :param logger: Logger to which messages should be sent
+
+    The ``sout`` and ``serr`` params are intended to be used with the output
+    of :meth:`subprocess.Popen.communicate`, but any string-a-like should
+    work.
+    """
+    #     These lines are never logged, since they don't tell us anything useful
+    # --------------------------------------------------------------------------
+    excludepatterns = (
+        "Debug: registered context Global=0\n",
+        "tcgetattr: Invalid argument\n",
+    )
+
+    for pattern in excludepatterns:
+        sout = sout.replace(pattern, "")
+        serr = serr.replace(pattern, "")
+
+    if len(sout.strip()) > 0:
+        logger.debug("%s stdout: %s" % (process_name, sout))
+    if len(serr.strip()) > 0:
+        logger.debug("%s stderr: %s" % (process_name, serr))
diff --git a/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py b/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py
new file mode 100644
index 00000000000..3570565b30a
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/remotecommand.py
@@ -0,0 +1,307 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                           Run a remote command
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from collections import defaultdict
+from threading import BoundedSemaphore
+
+import re
+import os
+import signal
+import threading
+import time
+
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.utilities import spawn_process
+from lofarpipe.support.jobserver import job_server
+
+# By default, Linux allocates lots more memory than we need(?) for a new stack
+# frame. When multiplexing lots of threads, that will cause memory issues.
+threading.stack_size(1048576)
+
+class ParamikoWrapper(object):
+    """
+    Sends an SSH command to a host using paramiko, then emulates a Popen-like
+    interface so that we can pass it back to pipeline recipes.
+    """
+    def __init__(self, paramiko_client, command):
+        self.returncode = None
+        self.client = paramiko_client
+        self.chan = paramiko_client.get_transport().open_session()
+        self.chan.get_pty()
+        self.chan.exec_command(command)
+        self.stdout = self.chan.makefile('rb', -1)
+        self.stderr = self.chan.makefile_stderr('rb', -1)
+
+    def communicate(self):
+        if not self.returncode:
+            self.returncode = self.chan.recv_exit_status()
+        stdout = "\n".join(line.strip() for line in self.stdout.readlines()) + "\n"
+        stderr = "\n".join(line.strip() for line in self.stdout.readlines()) + "\n"
+        return stdout, stderr
+
+    def poll(self):
+        if not self.returncode and self.chan.exit_status_ready():
+            self.returncode = self.chan.recv_exit_status()
+        return self.returncode
+
+    def wait(self):
+        if not self.returncode:
+            self.returncode = self.chan.recv_exit_status()
+        return self.returncode
+
+    def kill(self):
+        self.chan.close()
+
+def run_remote_command(config, logger, host, command, env, arguments=None):
+    """
+    Run command on host, passing it arguments from the arguments list and
+    exporting key/value pairs from env(a dictionary).
+
+    Returns an object with poll() and communicate() methods, similar to
+    subprocess.Popen.
+
+    This is a generic interface to potentially multiple ways of running
+    commands (SSH, mpirun, etc). The appropriate method is chosen from the
+    config block supplied (with SSH as a fallback).
+    """
+    try:
+        method = config.get('remote', 'method')
+    except:
+        method = None
+
+    if method=="paramiko":
+        try:
+            key_filename = config.get('remote', 'key_filename')
+        except:
+            key_filename = None
+        return run_via_paramiko(logger, host, command, env, arguments, key_filename)
+    elif method=="mpirun":
+        return run_via_mpirun(logger, host, command, env, arguments)
+    else:
+        return run_via_ssh(logger, host, command, env, arguments)
+
+def run_via_mpirun(logger, host, command, environment, arguments):
+    """
+    Dispatch a remote command via mpirun.
+
+    Return a Popen object pointing at the MPI command, to which we add a kill
+    method for shutting down the connection if required.
+    """
+    logger.debug("Dispatching command to %s with mpirun" % host)
+    mpi_cmd = ["/usr/bin/mpirun", "-host", host]
+    for key in environment.keys():
+        mpi_cmd.extend(["-x", key])
+    mpi_cmd.append("--")
+    mpi_cmd.extend(command.split()) # command is split into (python, script)
+    mpi_cmd.extend(str(arg) for arg in arguments)
+    env = os.environ
+    env.update(environment)
+    process = spawn_process(mpi_cmd, logger, env=env)
+    # mpirun should be killed with a SIGTERM to enable it to shut down the
+    # remote command.
+    process.kill = lambda : os.kill(process.pid, signal.SIGTERM)
+    return process
+
+def run_via_ssh(logger, host, command, environment, arguments):
+    """
+    Dispatch a remote command via SSH.
+
+    We return a Popen object pointing at the SSH session, to which we add a
+    kill method for shutting down the connection if required.
+    """
+    logger.debug("Dispatching command to %s with ssh" % host)
+    ssh_cmd = ["ssh", "-n", "-tt", "-x", host, "--", "/bin/sh", "-c"]
+
+    commandstring = ["%s=%s" % (key, value) for key, value in environment.items()]
+    commandstring.append(command)
+    commandstring.extend(re.escape(str(arg)) for arg in arguments)
+    ssh_cmd.append('"' + " ".join(commandstring) + '"')
+    process = spawn_process(ssh_cmd, logger)
+    process.kill = lambda : os.kill(process.pid, signal.SIGKILL)
+    return process
+
+def run_via_paramiko(logger, host, command, environment, arguments, key_filename):
+    """
+    Dispatch a remote command via paramiko.
+
+    We return an instance of ParamikoWrapper.
+    """
+    logger.debug("Dispatching command to %s with paramiko" % host)
+    import paramiko
+    client = paramiko.SSHClient()
+    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+    client.connect(host, key_filename=key_filename)
+    commandstring = ["%s=%s" % (key, value) for key, value in environment.items()]
+    commandstring.append(command)
+    commandstring.extend(re.escape(str(arg)) for arg in arguments)
+    return ParamikoWrapper(client, " ".join(commandstring))
+
+class ProcessLimiter(defaultdict):
+    """
+    Provide a dictionary-like structure of bounded semaphores with arbitrary
+    keys.
+
+    This gives a convenient way to keep tabs on the number of simultaneous
+    jobs running on a given host.
+
+    :param nproc: Bound value for semaphore (ie, maximum number of jobs)
+    :type nproc: integer or none
+    """
+    def __init__(self, nproc=None):
+        if nproc:
+            super(ProcessLimiter, self).__init__(
+                lambda: BoundedSemaphore(int(nproc))
+            )
+        else:
+            class Unlimited(object):
+                """
+                Dummy semaphore for the unlimited case.
+                Acquire and release always succeed.
+                """
+                def acquire(self):
+                    return True
+                def release(self):
+                    return True
+            super(ProcessLimiter, self).__init__(Unlimited)
+
+class ComputeJob(object):
+    """
+    Container for information about a job to be dispatched to a compute node.
+
+    :param host: Target host for job
+    :param command: Full path to command to be run on target host
+    :param arguments: List of arguments which will be passed to command
+    """
+    def __init__(self, host, command, arguments=[]):
+        self.host = host
+        self.command = command
+        self.arguments = arguments
+        self.results = {}
+
+    def dispatch(self, logger, config, limiter, id, jobhost, jobport, error, killswitch):
+
+        """
+        Dispatch this job to the relevant compute node.
+
+        Note that error is an instance of threading.Event, which will be set
+        if the remote job fails for some reason.
+        """
+        self.id = id
+        limiter[self.host].acquire()
+        try:
+            if killswitch.isSet():
+                logger.debug("Shutdown in progress: not starting remote job")
+                error.set()
+                return 1
+            process = run_remote_command(
+                config,
+                logger,
+                self.host,
+                self.command,
+                {
+                    "PYTHONPATH": config.get('deploy', 'engine_ppath'),
+                    "LD_LIBRARY_PATH": config.get('deploy', 'engine_lpath')
+                },
+                arguments=[id, jobhost, jobport]
+            )
+            # Wait for process to finish. In the meantime, if the killswitch
+            # is set (by an exception in the main thread), forcibly kill our
+            # job off.
+            while process.poll() == None:
+                if killswitch.isSet():
+                    process.kill()
+                else:
+                    time.sleep(1)
+            sout, serr = process.communicate()
+
+            serr = serr.replace("Connection to %s closed.\r\n" % self.host, "")
+            log_process_output("Remote command", sout, serr, logger)
+        except Exception, e:
+            logger.exception("Failed to run remote process %s (%s)" % (self.command, str(e)))
+            error.set()
+            return 1
+        finally:
+            limiter[self.host].release()
+        if process.returncode != 0:
+            logger.error(
+                "Remote process %s failed (status: %d)" % (self.command, process.returncode)
+            )
+            error.set()
+        return process.returncode
+
+def threadwatcher(threadpool, logger, killswitch):
+    """
+    Start and watch a pool of threads. If an exception is thrown during
+    processing, set the killswitch so that all threads can shut down cleanly,
+    then join all the threads to wait for them to finish.
+
+    :param threadpool: Pool of threads to handle
+    :param logger: Logger
+    :type logger: logging.Logger or descendant
+    :param killswitch: Indication for threads to abort
+    :type killswitch: threading.Event
+    """
+    # If we receive a SIGTERM, shut down processing.
+    signal.signal(signal.SIGTERM, killswitch.set)
+    try:
+        # Start all the threads, but don't just join them, as that
+        # blocks all exceptions in the main thread. Instead, we wake
+        # up every second to handle exceptions.
+        [thread.start() for thread in threadpool]
+        logger.info("Waiting for compute threads...")
+
+        while True in [thread.isAlive() for thread in threadpool]:
+            time.sleep(1)
+    except:
+        # If something throws an exception (normally a
+        # KeyboardException, ctrl-c) set the kill switch to tell the
+        # comput threads to terminate, then wait for them.
+        logger.warn("Processing interrupted: shutting down")
+        killswitch.set()
+    finally:
+        # Always make sure everything has finished. Note that if an exception
+        # is thrown before all the threads have started, they will not all be
+        # alive (and hence not join()-able).
+        [thread.join() for thread in threadpool if thread.isAlive()]
+
+class RemoteCommandRecipeMixIn(object):
+    """
+    Mix-in for recipes to dispatch jobs using the remote command mechanism.
+    """
+    def _schedule_jobs(self, jobs, max_per_node=None):
+        """
+        Schedule a series of compute jobs. Blocks until completion.
+
+        :param jobs: iterable of :class:`~lofarpipe.support.remotecommand.ComputeJob` to be scheduled
+        :param max_per_node: maximum number of simultaneous jobs on any given node
+        :type max_per_node: integer or none
+        :rtype: dict mapping integer job id to :class:`~lofarpipe.support.remotecommand.ComputeJob`
+        """
+        threadpool = []
+        jobpool = {}
+        limiter = ProcessLimiter(max_per_node)
+        killswitch = threading.Event()
+
+        if max_per_node:
+            self.logger.info("Limiting to %d simultaneous jobs/node" % max_per_node)
+
+        with job_server(self.logger, jobpool, self.error) as (jobhost, jobport):
+            self.logger.debug("Job dispatcher at %s:%d" % (jobhost, jobport))
+            for job_id, job in enumerate(jobs):
+                jobpool[job_id] = job
+                threadpool.append(
+                    threading.Thread(
+                        target=job.dispatch,
+                        args=(
+                            self.logger, self.config, limiter, job_id,
+                            jobhost, jobport, self.error, killswitch
+                        )
+                    )
+                )
+            threadwatcher(threadpool, self.logger, killswitch)
+        return jobpool
diff --git a/CEP/Pipeline/framework/lofarpipe/support/stateful.py b/CEP/Pipeline/framework/lofarpipe/support/stateful.py
new file mode 100644
index 00000000000..90e8bc3d97b
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/stateful.py
@@ -0,0 +1,96 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                          Stateful LOFAR Recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from functools import wraps
+
+import os.path
+import cPickle
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.lofarexceptions import PipelineException
+
+def stateful(run_task):
+    @wraps(run_task)
+    def wrapper(self, configblock, datafiles=[], **kwargs):
+        try:
+            my_state = self.completed.pop()
+        except (AttributeError, IndexError):
+            my_state = ('','')
+
+        if configblock == my_state[0]:
+            # We have already run this task and stored its state, or...
+            self.logger.info("Task already exists in saved state; skipping")
+            return my_state[1]
+        elif my_state[0] != '':
+            # There is a stored task, but it doesn't match this one, or...
+            self.logger.error("Stored state does not match pipeline definition; bailing out")
+            raise PipelineException("Stored state does not match pipeline definition")
+        else:
+            # We need to run this task now.
+            outputs = run_task(self, configblock, datafiles, **kwargs)
+            self.state.append((configblock, outputs))
+            self._save_state()
+            return outputs
+    return wrapper
+
+class StatefulRecipe(BaseRecipe):
+    """
+    Enables recipes to save and restore state.
+
+    This is used exactly as :class:`~lofarpipe.support.baserecipe.BaseRecipe`,
+    but will write a ``statefile`` in the job directory, recording the current
+    state of the pipeline after each recipe completes. If the pipeline is
+    interrupted, it can automatically resume where it left off.
+
+    To reset the pipeline and start from the beginning again, just remove the
+    ``statefile``.
+    """
+    inputs = {} # No non-default inputs
+    def __init__(self):
+        super(StatefulRecipe, self).__init__()
+        self.state = []
+        self.completed = []
+
+    def _save_state(self):
+        """
+        Dump pipeline state to file.
+        """
+        statefile = open(
+            os.path.join(
+                self.config.get('layout', 'job_directory'),
+                'statefile'
+            ),
+        'w')
+        state = [self.inputs, self.state]
+        cPickle.dump(state, statefile)
+
+    def go(self):
+        super(StatefulRecipe, self).go()
+        statefile = os.path.join(
+            self.config.get('layout', 'job_directory'),
+            'statefile'
+        )
+        try:
+            statefile = open(statefile, 'r')
+            inputs, self.state = cPickle.load(statefile)
+            statefile.close()
+
+            # What's the correct thing to do if inputs differ from the saved
+            # state? start_time will always change.
+            for key, value in inputs.iteritems():
+                if key != "start_time" and self.inputs[key] != value:
+                    raise PipelineException(
+                        "Input %s (%s) differs from saved state (%s)" %
+                        (key, str(self.inputs[key]), inputs[key])
+                    )
+
+            self.completed = list(reversed(self.state))
+        except (IOError, EOFError):
+            # Couldn't load state
+            self.completed = []
+
+    run_task = stateful(BaseRecipe.run_task)
diff --git a/CEP/Pipeline/framework/lofarpipe/support/utilities.py b/CEP/Pipeline/framework/lofarpipe/support/utilities.py
new file mode 100644
index 00000000000..09cbd93ccf3
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/support/utilities.py
@@ -0,0 +1,231 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                               Utility routines
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from subprocess import Popen, CalledProcessError, PIPE
+from itertools import islice, repeat, chain, izip
+from contextlib import closing, contextmanager
+from time import sleep
+from random import randint
+
+import os
+import errno
+import shutil
+import subprocess
+
+from lofarpipe.support.pipelinelogging import log_process_output
+
+#                                                                  Compatibility
+#                               The following used to be defined in this module;
+#                        they are now included so as not to break existing code.
+# ------------------------------------------------------------------------------
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.parset import get_parset, patch_parset
+
+#                                                File and Directory Manipulation
+# ------------------------------------------------------------------------------
+
+def get_mountpoint(path):
+    """
+    Return the path to the mount point containing the given path.
+
+    :param path: Path to check
+    """
+    return path if os.path.ismount(path) else get_mountpoint(
+        os.path.abspath(os.path.join(path, os.pardir))
+    )
+
+def create_directory(dirname):
+    """
+    Recursively create a directory, without failing if it already exists.
+    """
+    try:
+        os.makedirs(dirname)
+    except OSError, failure:
+        if failure.errno != errno.EEXIST:
+            raise
+
+#                                                    IPython Dependency Checking
+# ------------------------------------------------------------------------------
+
+def build_available_list(listname):
+    """
+    This can be pushed to an IPython engine and run there to generate a list
+    of data which is locally stored (and hence available for processing).
+    """
+    import os
+    from IPython.kernel.engineservice import get_engine
+    available = [
+        filename for filename in filenames if os.access(filename, os.R_OK)
+    ]
+    properties = get_engine(id).properties
+    properties[listname] = available
+
+def clear_available_list(listname):
+    """
+    Delete lists of locally stored data to free up resources on the engine.
+    """
+    from IPython.kernel.engineservice import get_engine
+    del(get_engine(id).properties[listname])
+
+def check_for_path(properties, dependargs):
+    """
+    Run on IPython engine to check for the existence of a given path in the
+    locally available data, as recorded by build_available_list().
+
+    Used for dependency checking when scheduling jobs.
+    """
+    try:
+        return dependargs[0] in properties[dependargs[1]]
+    except NameError:
+        return False
+
+#                                                       Iterators and Generators
+# ------------------------------------------------------------------------------
+
+def is_iterable(obj):
+    """
+    Return True if the given object is iterable, else False.
+    """
+    try:
+        iter(obj)
+    except:
+        return False
+    else:
+        return True
+
+try:
+    from itertools import izip_longest
+except ImportError:
+    def izip_longest(*args, **kwds):
+        """
+        This code is lifted from the Python 2.6 manual, since izip_longest
+        isn't available in the 2.5 standard library.
+        izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
+        """
+        fillvalue = None
+        def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
+            yield counter()         # yields the fillvalue, or raises IndexError
+        fillers = repeat(fillvalue)
+        iters = [chain(it, sentinel(), fillers) for it in args]
+        try:
+            for tup in izip(*iters):
+                yield tup
+        except IndexError:
+            pass
+
+def group_iterable(iterable, size):
+    """
+    Group the iterable into tuples of given size.  Returns a generator.
+
+    Example:
+    >>> for x in group([1,2,3,4,5], 3): print x
+    (1, 2, 3)
+    (4, 5)
+    """
+    return (
+        filter(lambda x: x is not None, x)
+        for x in izip_longest(
+            *[islice(iterable, n, None, size) for n in xrange(size)]
+        )
+    )
+
+#                                                                  Miscellaneous
+# ------------------------------------------------------------------------------
+
+def read_initscript(logger, filename, shell="/bin/sh"):
+    """
+    Return a dict of the environment after sourcing the given script in a shell.
+    """
+    if not os.path.exists(filename):
+        logger.warn("Environment initialisation script not found!")
+        return {}
+    else:
+        logger.debug("Reading environment from %s" % filename)
+        p = subprocess.Popen(
+            ['. %s ; env' % (filename)],
+            shell=True,
+            executable=shell,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            close_fds=True
+        )
+        so, se = p.communicate()
+        environment = [x.split('=', 1) for x in so.strip().split('\n')]
+        environment = filter(lambda x: len(x) == 2, environment)
+        return dict(environment)
+
+def string_to_list(my_string):
+    """
+    Convert a list-like string (as in pipeline.cfg) to a list of values.
+    """
+    return [x.strip() for x in my_string.strip('[] ').split(',') if x.strip()]
+
+def spawn_process(cmd, logger, cwd=None, env=None, max_tries=2, max_timeout=30):
+    """
+    Tries to spawn a process.
+
+    If it hits an OSError due to lack of memory or too many open files, it
+    will keep trying max_tries times, waiting timeout seconds between each.
+
+    If successful, the process object is returned. Otherwise, we eventually
+    propagate the exception.
+    """
+    trycounter = 0
+    while True:
+        try:
+            process = Popen(
+                cmd, cwd=cwd, env=env, stdin=PIPE, stdout=PIPE, stderr=PIPE
+            )
+        except OSError, e:
+            logger.warn(
+                "Failed to spawn external process %s (%s)" % (" ".join(cmd), str(e))
+            )
+            if trycounter < max_tries:
+                timeout = randint(1, max_timeout)
+                logger.warn(
+                    "Retrying in %d seconds (%d more retries)." %
+                    (timeout, max_tries - trycounter - 1)
+                )
+                trycounter += 1
+                sleep(timeout)
+            else:
+                raise
+        else:
+            break
+    return process
+
+def catch_segfaults(cmd, cwd, env, logger, max=1, cleanup=lambda: None):
+    """
+    Run cmd in cwd with env, sending output to logger.
+
+    If it segfaults, retry upto max times.
+    """
+    tries = 0
+    while tries <= max:
+        if tries > 0:
+            logger.debug("Retrying...")
+        logger.debug("Running: %s" % (' '.join(cmd),))
+        process = spawn_process(cmd, logger, cwd, env)
+        sout, serr = process.communicate()
+        log_process_output(cmd[0], sout, serr, logger)
+        if process.returncode == 0:
+            break
+        elif process.returncode == -11:
+            logger.warn("%s process segfaulted!" % cmd[0])
+            cleanup()
+            tries += 1
+            continue
+        else:
+            raise CalledProcessError(
+                process.returncode, cmd[0]
+            )
+    if tries > max:
+        logger.error("Too many segfaults from %s; aborted" % (cmd[0]))
+        raise CalledProcessError(process.returncode, cmd[0])
+    return process
diff --git a/CEP/Pipeline/framework/lofarpipe/tests/__init__.py b/CEP/Pipeline/framework/lofarpipe/tests/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py b/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py
new file mode 100644
index 00000000000..32e5e81af9d
--- /dev/null
+++ b/CEP/Pipeline/framework/lofarpipe/tests/lofaringredient.py
@@ -0,0 +1,216 @@
+#                                                       LOFAR PIPELINE FRAMEWORK
+#
+#                                                             Tests: ingredients
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+import unittest
+import os
+
+class StringFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.StringField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import StringField
+        self.stringfield = StringField(default="a string")
+
+    def test_validator(self):
+        """
+        Check that strings are correctly regarded as valid, and integers
+        aren't.
+        """
+        self.assertFalse(self.stringfield.is_valid(1))
+        self.assertTrue(self.stringfield.is_valid("1"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.stringfield.default, "a string")
+
+class IntFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.IntField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import IntField
+        self.intfield = IntField(default=1)
+
+    def test_validator(self):
+        """
+        Check that integers are correctly regarded as valid, and strings
+        aren't.
+        """
+        self.assertFalse(self.intfield.is_valid("1"))
+        self.assertTrue(self.intfield.is_valid(1))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.intfield.default, 1)
+
+    def test_coerce(self):
+        """
+        Check that a string is correctly coerced to an integer.
+        """
+        self.assertEqual(self.intfield.coerce("1"), 1)
+
+class FloatFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.FloatField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import FloatField
+        self.floatfield = FloatField(default=1.0)
+
+    def test_validator(self):
+        """
+        Check that floats are correctly regarded as valid, and strings
+        aren't.
+        """
+        self.assertFalse(self.floatfield.is_valid("1"))
+        self.assertTrue(self.floatfield.is_valid(1.0))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.floatfield.default, 1.0)
+
+    def test_coerce(self):
+        """
+        Check that a string is correctly coerced to an float.
+        """
+        self.assertEqual(self.floatfield.coerce("1"), 1.0)
+
+class FileFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.FileField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import FileField
+        self.filefield = FileField(default='/')
+
+    def test_validator(self):
+        """
+        Integers are not valid as filenames, and certainly don't exist on
+        disk.
+
+        ``/`` should, though.
+        """
+        self.assertFalse(self.filefield.is_valid(1))
+        self.assertTrue(self.filefield.is_valid("/"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.filefield.default, "/")
+
+class ExecFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.ExecField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import ExecField
+        self.execfield = ExecField(default='/bin/ls')
+
+    def test_validator(self):
+        """
+        ``/etc/passwd`` should always exist as a file on disk, but not be
+        executable.
+
+        ``/bin/ls`` should always exist, and must be executable.
+        """
+        self.assertFalse(self.execfield.is_valid("/etc/passwd"))
+        self.assertTrue(self.execfield.is_valid("/bin/ls"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.execfield.default, "/bin/ls")
+
+class DirectoryFieldTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.DirectoryField`
+    """
+    def setUp(self):
+        from lofarpipe.support.lofaringredient import DirectoryField
+        self.directoryfield = DirectoryField(default='/tmp')
+
+    def test_validator(self):
+        """
+        An integer is not a valid directory.
+
+        ``/tmp`` should always be valid.
+        """
+        self.assertFalse(self.directoryfield.is_valid(1))
+        self.assertTrue(self.directoryfield.is_valid("/tmp"))
+
+    def test_default(self):
+        """
+        Check that default is correctly set.
+        """
+        self.assertEqual(self.directoryfield.default, "/tmp")
+
+    def test_coerce(self):
+        """
+        Coercing a create-able directory name should cause it to exist. We
+        should always be able to write in ``/tmp``.
+        """
+        self.directoryfield.coerce("/tmp/foo")
+        self.assertTrue(os.path.exists("/tmp/foo"))
+
+class LOFARIngredientTest(unittest.TestCase):
+    """
+    Tests for :class:`lofarpipe.support.lofaringredient.LOFARingredient`
+    """
+    def setUp(self):
+        """
+        An instance of
+        :class:`~lofarpipe.support.lofaringredient.LOFARingredient` is defined
+        which contains three instances of
+        :class:`~lofarpipe.support.lofaringredient.StringField`.
+        """
+        from lofarpipe.support.lofaringredient import StringField
+        from lofarpipe.support.lofaringredient import LOFARingredient
+        f = StringField(default="foo")
+        g = StringField()
+        h = StringField(default=1)
+        self.lofaringredient = LOFARingredient({"f": f, "g": g, "h": h})
+
+    def test_keys(self):
+        """
+        ``self.lofaringredient`` should contain keys for the two fields
+        which have default parameters, but not for the one which is unset.
+        """
+        self.assertEqual(len(self.lofaringredient.keys()), 2)
+        self.assertRaises(KeyError, lambda: self.lofaringredient['g'])
+
+    def test_values(self):
+        """
+        Prior to setting, the value of the fields should be equal to
+        the default value.
+        """
+        self.assertEqual(self.lofaringredient['f'], "foo")
+
+    def test_set(self):
+        """
+        When set, the value of the fields should be equal to the new value.
+        """
+        self.lofaringredient['g'] = "bar"
+        self.assertEqual(self.lofaringredient['g'], "bar")
+
+    def test_bad_values(self):
+        """
+        Unacceptable values should raise an exception.
+        """
+        self.assertRaises(TypeError, lambda: self.lofaringredient['h'])
+        self.lofaringredient['h'] = "bar"
+        self.assertEqual(self.lofaringredient['h'], "bar")
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/CEP/Pipeline/framework/setup.py b/CEP/Pipeline/framework/setup.py
new file mode 100644
index 00000000000..1ef0cc273a5
--- /dev/null
+++ b/CEP/Pipeline/framework/setup.py
@@ -0,0 +1,16 @@
+from distutils.core import setup
+
+setup(
+    name="Pipeline Framework",
+    version="0.1.dev",
+    packages=[
+        'lofarpipe',
+        'lofarpipe.cuisine',
+        'lofarpipe.support',
+        'lofarpipe.tests',
+    ],
+    description="LOFAR Pipeline System",
+    author="John Swinbank",
+    author_email="swinbank@transientskp.org",
+    url="http://www.transientskp.org/",
+)
diff --git a/CEP/Pipeline/mac/Makefile b/CEP/Pipeline/mac/Makefile
new file mode 100644
index 00000000000..62c5e49d3ab
--- /dev/null
+++ b/CEP/Pipeline/mac/Makefile
@@ -0,0 +1,45 @@
+LOFARROOT = /home/swinbank/Work/LOFAR
+
+MACPRTSRC = $(LOFARROOT)/MAC/MACIO/src/EventPort.cc
+MACEVTSRC = $(LOFARROOT)/MAC/MACIO/src/GCF_Event.cc
+MACINC    = -I$(LOFARROOT)/MAC/MACIO/include
+
+COMMONSRC = $(LOFARROOT)/LCS/Common/src/SymbolTable.cc $(LOFARROOT)/LCS/Common/src/AddressTranslator.cc $(LOFARROOT)/LCS/Common/src/hexdump.cc $(LOFARROOT)/LCS/Common/src/SystemUtil.cc $(LOFARROOT)/LCS/Common/src/StringUtil.cc $(LOFARROOT)/LCS/Common/src/Backtrace.cc $(LOFARROOT)/LCS/Common/src/Net/Socket.cc $(LOFARROOT)/LCS/Common/src/LofarLogger.cc
+COMMONINC = -I$(LOFARROOT)/LCS/Common/include
+
+APLINC    = -I$(LOFARROOT)/MAC/APL/APLCommon/include
+GCFINC    = -I$(LOFARROOT)/MAC/GCF/TM/include
+
+LOG4CPLUS = -I/data/sys/opt/LofIm/external/log4cplus/builds/log4cplus-1.0.2-3/include -L/data/sys/opt/LofIm/external/log4cplus/builds/log4cplus-1.0.2-3/lib/ -llog4cplus
+BPYTHON   = -I/usr/include/python2.5 -lboost_python -lpython2.5
+
+OUTPUTDIR = ep
+
+default: ep_interface ep_echo ep_control
+
+ep_interface: sb_protocol
+	g++ -fPIC -shared -o $(OUTPUTDIR)/interface/_ep_interface.so src/ep_interface.cc src/SB_Protocol.cc -I./include $(MACPRTSRC) $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+ep_echo: echo_protocol ep_interface
+	g++ -fPIC -shared -o $(OUTPUTDIR)/echo/_ep_echo.so src/ep_echo.cc src/eventwrappers.cc src/Echo_Protocol.cc -I./include $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+ep_control: controller_protocol ep_interface
+	g++ -fPIC -shared -o $(OUTPUTDIR)/control/_ep_control.so src/ep_control.cc src/Controller_Protocol.cc -I./include $(MACEVTSRC) $(COMMONSRC) $(LOFARCONF) $(COMMONINC) $(MACINC) $(APLINC) $(GCFINC) $(LOG4CPLUS) $(BPYTHON) -lbfd
+
+sb_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/MACIO/src/SB_Protocol.prot
+	mv -f SB_Protocol.cc  src/
+	mv -f SB_Protocol.ph include/
+	
+echo_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/MACIO/test/Echo_Protocol.prot
+	mv -f Echo_Protocol.cc  src/
+	mv -f Echo_Protocol.ph include/
+
+controller_protocol:
+	autogen -L $(LOFARROOT)/MAC/MACIO/autogen/ $(LOFARROOT)/MAC/APL/APLCommon/src/Controller_Protocol.prot
+	mv -f Controller_Protocol.cc  src/
+	mv -f Controller_Protocol.ph include/
+
+clean:
+	\rm -f src/Echo_Protocol.cc src/SB_Protocol.cc src/Controller_Protocol.cc include/Echo_Protocol.ph include/Controller_Protocol.ph include/SB_Protocol.ph ep/interface/_ep_interface.so ep/echo/_ep_echo.so ep/control/_ep_control.so
diff --git a/CEP/Pipeline/mac/README b/CEP/Pipeline/mac/README
new file mode 100644
index 00000000000..34d893d45e6
--- /dev/null
+++ b/CEP/Pipeline/mac/README
@@ -0,0 +1 @@
+Simple demonstration of the Echo Protocol over an EventPort using Python.
diff --git a/CEP/Pipeline/mac/ep/__init__.py b/CEP/Pipeline/mac/ep/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/mac/ep/control/__init__.py b/CEP/Pipeline/mac/ep/control/__init__.py
new file mode 100644
index 00000000000..ed2ec0c75b7
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/control/__init__.py
@@ -0,0 +1,69 @@
+# Note that we require GenericEvent, so ep.interface must be available first
+# We'll use it to create a customised EventPort interface
+from ep.interface import EventPort_Interface, EventNotFoundException
+
+# Module level constants
+
+# Protocol ID
+from _ep_control import PROTOCOL
+
+# Result/error states
+from _ep_control import OK, LOST_CONN
+
+# Event signals
+from _ep_control import CONTROL_STARTED
+from _ep_control import CONTROL_CONNECT
+from _ep_control import CONTROL_CONNECTED
+from _ep_control import CONTROL_RESYNC
+from _ep_control import CONTROL_RESYNCED
+from _ep_control import CONTROL_SCHEDULE
+from _ep_control import CONTROL_SCHEDULED
+from _ep_control import CONTROL_CLAIM
+from _ep_control import CONTROL_CLAIMED
+from _ep_control import CONTROL_PREPARE
+from _ep_control import CONTROL_PREPARED
+from _ep_control import CONTROL_RESUME
+from _ep_control import CONTROL_RESUMED
+from _ep_control import CONTROL_SUSPEND
+from _ep_control import CONTROL_SUSPENDED
+from _ep_control import CONTROL_RELEASE
+from _ep_control import CONTROL_RELEASED
+from _ep_control import CONTROL_QUIT
+from _ep_control import CONTROL_QUITED
+
+# Events we might receive
+from _ep_control import ControlConnectEvent
+from _ep_control import ControlClaimEvent
+from _ep_control import ControlPrepareEvent
+from _ep_control import ControlResumeEvent
+from _ep_control import ControlSuspendEvent
+from _ep_control import ControlReleaseEvent
+from _ep_control import ControlQuitEvent
+from _ep_control import ControlResyncEvent
+from _ep_control import ControlScheduleEvent
+
+# Events we can send
+from _ep_control import ControlConnectedEvent
+from _ep_control import ControlResyncedEvent
+from _ep_control import ControlClaimedEvent
+from _ep_control import ControlPreparedEvent
+from _ep_control import ControlResumedEvent
+from _ep_control import ControlSuspendedEvent
+from _ep_control import ControlReleasedEvent
+from _ep_control import ControlQuitedEvent
+from _ep_control import ControlScheduledEvent
+
+class ControllerPort_Interface(EventPort_Interface):
+    def __init__(self, servicemask, hostname):
+        event_mapping = {
+            CONTROL_CONNECTED:  ControlConnectedEvent,
+            CONTROL_RESYNC:   ControlResyncEvent,
+            CONTROL_SCHEDULE: ControlScheduleEvent,
+            CONTROL_CLAIM:    ControlClaimEvent,
+            CONTROL_PREPARE:  ControlPrepareEvent,
+            CONTROL_RESUME:   ControlResumeEvent,
+            CONTROL_SUSPEND:  ControlSuspendEvent,
+            CONTROL_RELEASE:  ControlReleaseEvent,
+            CONTROL_QUIT:     ControlQuitEvent
+        }
+        super(ControllerPort_Interface, self).__init__(servicemask, PROTOCOL, event_mapping, hostname)
diff --git a/CEP/Pipeline/mac/ep/echo/__init__.py b/CEP/Pipeline/mac/ep/echo/__init__.py
new file mode 100644
index 00000000000..d36d3344078
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/echo/__init__.py
@@ -0,0 +1,17 @@
+# We'll create a customized EventPort interface
+# Note that we require GenericEvent, so ep.interface must be available first
+from ep.interface import EventPort_Interface, EventNotFoundException
+
+# Relevant protocol & event names
+from _ep_echo import PROTOCOL
+from _ep_echo import ECHO, PING, CLOCK
+
+# Events we can handle
+from _ep_echo import EchoPingEvent, EchoEchoEvent
+
+class EchoPort_Interface(EventPort_Interface):
+    def __init__(self):
+        event_mapping = {
+            ECHO: EchoEchoEvent
+        }
+        super(EchoPort_Interface, self).__init__("EchoServer:test", PROTOCOL, event_mapping)
diff --git a/CEP/Pipeline/mac/ep/interface/__init__.py b/CEP/Pipeline/mac/ep/interface/__init__.py
new file mode 100644
index 00000000000..9218a05c338
--- /dev/null
+++ b/CEP/Pipeline/mac/ep/interface/__init__.py
@@ -0,0 +1,22 @@
+from _ep_interface import *
+
+class EventPortException(Exception):
+    pass
+
+class EventNotFoundException(EventPortException):
+    pass
+
+class EventPort_Interface(EP_Interface):
+
+    def __init__(self, servicemask, protocol, mapping, hostname='localhost'):
+        super(EventPort_Interface, self).__init__(servicemask, protocol, hostname)
+        self.__event_mapping = mapping
+
+    def receive_event(self):
+        gev = super(EventPort_Interface, self).receive_event()
+        try:
+            return self.__event_mapping[gev.signal](gev)
+        except KeyError:
+            raise EventNotFoundException
+
+
diff --git a/CEP/Pipeline/mac/include/controlwrappers.h b/CEP/Pipeline/mac/include/controlwrappers.h
new file mode 100644
index 00000000000..eb0c7f595d5
--- /dev/null
+++ b/CEP/Pipeline/mac/include/controlwrappers.h
@@ -0,0 +1,313 @@
+#ifndef EP_CONTROLWRAPPERS_H
+#define EP_CONTROLWRAPPERS_H
+
+/*!
+  \file controlwrappers.h
+  \ingroup pipeline
+*/
+
+#include "Controller_Protocol.ph"
+#include "eventwrappers.h"
+
+typedef LOFAR::TYPES::uint16 uint16;
+typedef LOFAR::TYPES::uint32 uint32;
+
+// === Sendable messages ========================================================
+
+/*!
+  \class CONTROLConnectEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLConnectEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLConnectEvent* my_event;
+public:
+    CONTROLConnectEventWrapper(std::string cntlrName) {
+        this->my_event = new CONTROLConnectEvent;
+        this->my_event->cntlrName = cntlrName;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLConnectEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+  \class CONTROLResyncedEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLResyncedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResyncedEvent* my_event;
+public:
+    CONTROLResyncedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLResyncedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResyncedEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+  \class CONTROLClaimedEventWrapper
+  \ingroup pipeline
+*/
+class CONTROLClaimedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLClaimedEvent* my_event;
+public:
+    CONTROLClaimedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLClaimedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLClaimedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLPreparedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLPreparedEvent* my_event;
+public:
+    CONTROLPreparedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLPreparedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLPreparedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLScheduledEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLScheduledEvent* my_event;
+public:
+    CONTROLScheduledEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLScheduledEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLScheduledEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLResumedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResumedEvent* my_event;
+public:
+    CONTROLResumedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLResumedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResumedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLSuspendedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLSuspendedEvent* my_event;
+public:
+    CONTROLSuspendedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLSuspendedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLSuspendedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLReleasedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLReleasedEvent* my_event;
+public:
+    CONTROLReleasedEventWrapper(std::string cntlrName, uint16 result) {
+        this->my_event = new CONTROLReleasedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLReleasedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLQuitedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLQuitedEvent* my_event;
+public:
+    CONTROLQuitedEventWrapper(std::string cntlrName, uint32 treeID, uint16 result, std::string errorMsg) {
+        this->my_event = new CONTROLQuitedEvent;
+        this->my_event->cntlrName = cntlrName;
+        this->my_event->result = result;
+        this->my_event->treeID = treeID;
+        this->my_event->errorMsg = errorMsg;
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    uint32 get_treeID() { return this->my_event->treeID; }
+    std::string get_errorMsg() { return this->my_event->errorMsg; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLQuitedEvent* get_event_ptr() { return this->my_event; }
+};
+
+// Receivable messages
+
+// First the simple: connected, claim, prepare, resume, suspend, release, quit
+class CONTROLConnectedEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLConnectedEvent* my_event;
+public:
+    CONTROLConnectedEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLConnectedEvent(*event_ptr);
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_result() { return this->my_event->result; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLConnectedEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLClaimEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLClaimEvent* my_event;
+public:
+    CONTROLClaimEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLClaimEvent(*event_ptr);
+    }
+    CONTROLClaimEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLClaimEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLPrepareEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLPrepareEvent* my_event;
+public:
+    CONTROLPrepareEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLPrepareEvent(*event_ptr);
+    }
+    CONTROLPrepareEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLPrepareEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLResumeEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResumeEvent* my_event;
+public:
+    CONTROLResumeEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLResumeEvent(*event_ptr);
+    }
+    CONTROLResumeEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResumeEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLSuspendEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLSuspendEvent* my_event;
+public:
+    CONTROLSuspendEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLSuspendEvent(*event_ptr);
+    }
+    CONTROLSuspendEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLSuspendEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLReleaseEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLReleaseEvent* my_event;
+public:
+    CONTROLReleaseEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLReleaseEvent(*event_ptr);
+    }
+    CONTROLReleaseEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLReleaseEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLQuitEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLQuitEvent* my_event;
+public:
+    CONTROLQuitEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLQuitEvent(*event_ptr);
+    }
+    CONTROLQuitEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLQuitEvent* get_event_ptr() { return this->my_event; }
+};
+
+// ...then the more awkward: resync, schedule.
+
+class CONTROLResyncEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLResyncEvent* my_event;
+public:
+    CONTROLResyncEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLResyncEvent(*event_ptr);
+    }
+    CONTROLResyncEventWrapper();
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    uint16 get_curState() { return this->my_event->curState; }
+    std::string get_hostname() { return this->my_event->hostname; }
+
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLResyncEvent* get_event_ptr() { return this->my_event; }
+};
+
+class CONTROLScheduleEventWrapper : public GenericEventWrapper {
+private:
+    CONTROLScheduleEvent* my_event;
+public:
+    CONTROLScheduleEventWrapper(GenericEventWrapper& my_gev) {
+        LOFAR::MACIO::GCFEvent* event_ptr;
+        event_ptr = my_gev.get_event_ptr();
+        this->my_event = new CONTROLScheduleEvent(*event_ptr);
+    }
+    std::string get_cntlrName() { return this->my_event->cntlrName; }
+    time_t get_startTime() { return this->my_event->startTime; }
+    time_t get_stopTime() { return this->my_event->stopTime; }
+
+    virtual uint16 get_signal() { return this->my_event->signal; }
+    virtual CONTROLScheduleEvent* get_event_ptr() { return this->my_event; }
+};
+
+
+#endif
+
diff --git a/CEP/Pipeline/mac/include/echowrappers.h b/CEP/Pipeline/mac/include/echowrappers.h
new file mode 100644
index 00000000000..3ae0ee3ac68
--- /dev/null
+++ b/CEP/Pipeline/mac/include/echowrappers.h
@@ -0,0 +1,46 @@
+#ifndef EP_ECHOWRAPPERS_H
+#define EP_ECHOWRAPPERS_H
+
+/*!
+	\file echowrappers.h
+	\ingroup pipeline
+*/
+
+#include "Echo_Protocol.ph"
+#include "eventwrappers.h"
+
+/*!
+	\class EchoPingEventWrapper
+	\ingroup pipeline
+*/
+class EchoPingEventWrapper : public GenericEventWrapper {
+private:
+    EchoPingEvent* my_event;
+public:
+    EchoPingEventWrapper();
+    double get_pt();
+    void set_pt(double);
+    LOFAR::TYPES::uint16 get_seqnr();
+    void set_seqnr(LOFAR::TYPES::uint16);
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual EchoPingEvent* get_event_ptr() { return this->my_event; }
+};
+
+/*!
+	\class EchoEchoEventWrapper
+	\ingroup pipeline
+*/
+class EchoEchoEventWrapper : public GenericEventWrapper {
+private:
+    EchoEchoEvent* my_event;
+public:
+    EchoEchoEventWrapper(LOFAR::MACIO::GCFEvent*);
+    EchoEchoEventWrapper(GenericEventWrapper&);
+    double get_pt();
+    double get_et();
+    LOFAR::TYPES::uint16 get_seqnr();
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual EchoEchoEvent* get_event_ptr() { return this->my_event; }
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/ep_interface.h b/CEP/Pipeline/mac/include/ep_interface.h
new file mode 100644
index 00000000000..29e38c922ac
--- /dev/null
+++ b/CEP/Pipeline/mac/include/ep_interface.h
@@ -0,0 +1,42 @@
+#ifndef EP_INTERFACE_H
+#define EP_INTERFACE_H
+
+/*!
+  \file ep_interface.h
+  \ingroup pipeline
+*/
+
+#include <boost/python.hpp>
+#include <lofar_config.h>
+#include <Common/LofarLogger.h>
+#include <MACIO/EventPort.h>
+#include <string>
+
+#include "eventwrappers.h"
+
+/*!
+  \class EP_Interface
+  \ingroup pipeline
+  \brief Event Port Interface
+*/
+class EP_Interface {
+private:
+    LOFAR::MACIO::EventPort* my_EventPort;
+public:
+    EP_Interface(std::string servicename, short protocol_id, std::string host = "") {
+        this->my_EventPort = new LOFAR::MACIO::EventPort(servicename, false, protocol_id, host, true);
+    }
+    GenericEventWrapper* receive_event() {
+        LOFAR::MACIO::GCFEvent* ackPtr;
+        Py_BEGIN_ALLOW_THREADS
+        ackPtr = my_EventPort->receive();
+        Py_END_ALLOW_THREADS
+        return new GenericEventWrapper(ackPtr);
+    }
+    void send_event(GenericEventWrapper* wrapped_event) {
+        this->my_EventPort->send(wrapped_event->get_event_ptr());
+        }
+    ~EP_Interface() { delete this->my_EventPort; }
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/eventwrappers.h b/CEP/Pipeline/mac/include/eventwrappers.h
new file mode 100644
index 00000000000..6ce4913d198
--- /dev/null
+++ b/CEP/Pipeline/mac/include/eventwrappers.h
@@ -0,0 +1,32 @@
+#ifndef EP_EVENTWRAPPERS_H
+#define EP_EVENTWRAPPERS_H
+
+/*!
+	\file eventwrappers.h
+	\ingroup pipeline
+*/
+
+#include <lofar_config.h>
+#include <Common/LofarLogger.h>
+#include <sys/time.h>
+
+/*!
+	\class GenericEventWrapper
+	\ingroup pipeline
+	\brief Interface definition class for a generic event
+*/
+class GenericEventWrapper {
+private:
+    LOFAR::MACIO::GCFEvent* my_event;
+public:
+    GenericEventWrapper() {
+        this->my_event = new LOFAR::MACIO::GCFEvent;
+    }
+    GenericEventWrapper(LOFAR::MACIO::GCFEvent* event_ptr) {
+        this->my_event = event_ptr;
+    }
+    virtual LOFAR::TYPES::uint16 get_signal() { return this->my_event->signal; }
+    virtual LOFAR::MACIO::GCFEvent* get_event_ptr() { return my_event;}
+};
+
+#endif
diff --git a/CEP/Pipeline/mac/include/lofar_config.h b/CEP/Pipeline/mac/include/lofar_config.h
new file mode 100644
index 00000000000..d1d5a8a5a86
--- /dev/null
+++ b/CEP/Pipeline/mac/include/lofar_config.h
@@ -0,0 +1,167 @@
+/* $Id$ */
+
+/*!
+	\file lofar_config.h
+	\ingroup pipeline
+*/
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) header files          |
+\*-------------------------------------------------------------------------*/
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <net/ethernet.h> header file. */
+#define HAVE_NET_ETHERNET_H 1
+
+/* Define to 1 if you have the <netinet/in.h> header file. */
+#define HAVE_NETINET_IN_H 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#define HAVE_SYS_MMAN_H 1
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#define HAVE_SYS_RESOURCE_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+#define HAVE_SYS_TIME_H 1
+
+/* Define to 1 if you have the <sys/timepps.h> header file. */
+/* #undef HAVE_SYS_TIMEPPS_H */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) types                 |
+\*-------------------------------------------------------------------------*/
+
+/* Define if `long long' is supported */
+#define HAVE_LONG_LONG 1
+
+/* Define if `uint' is supported */
+#define HAVE_UINT 1
+
+/* Define if `ulong' is supported */
+#define HAVE_ULONG 1
+
+/* Define if `ushort' is supported */
+#define HAVE_USHORT 1
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+   significant byte first (like Motorola and SPARC, unlike Intel). */
+/* #undef WORDS_BIGENDIAN */
+
+
+/*-------------------------------------------------------------------------*\
+|     Defines for the presence or absence of (system) libraries             |
+\*-------------------------------------------------------------------------*/
+
+/* Define if AIPS++ is installed */
+#define HAVE_AIPSPP
+
+/* Define if libbfd is available */
+#define HAVE_BFD 1
+
+/* Define if BG/L MPICH is installed */
+/* #undef HAVE_BGLMPICH */
+
+/* Define if Blitz is installed */
+/* #undef HAVE_BLITZ */
+
+/* Define if BOOST is installed */
+#define HAVE_BOOST 1
+
+/* Define if BOOST component regex is installed */
+#define HAVE_BOOST_REGEX 1
+
+/* Define if BOOST component date_time is installed */
+#define HAVE_BOOST_DATE_TIME 1
+
+/* Define if FFTW2 is installed */
+/* #undef HAVE_FFTW2 */
+
+/* Define if FFTW3 is installed */
+/* #undef HAVE_FFTW3 */
+
+/* Define if LAM is installed */
+/* #undef HAVE_LAM */
+
+/* Define if LOG4CPLUS is installed */
+#define HAVE_LOG4CPLUS 1
+
+/* Define if LOG4CXX is installed */
+/* #undef HAVE_LOG4CXX */
+
+/* Define if we have an MPI implementation installed */
+/* #undef HAVE_MPI */
+
+/* Define if MASS is installed */ 
+/* #undef HAVE_MASS */
+
+/* Define if MPICH is installed */
+/* #undef HAVE_MPICH */
+
+/* Define if libpqxx is installed */
+#define HAVE_PQXX
+
+/* Define if PVSS is installed */
+/* #undef HAVE_PVSS */
+
+/* Define if using Rational Purify */
+/* #undef HAVE_PURIFY */
+
+/* Define if readline is installed */
+#define HAVE_READLINE 1
+
+/* Define if ScaMPI is installed */
+/* #undef HAVE_SCAMPI */
+
+/* Defined if shared memory is used */
+#define HAVE_SHMEM 1
+
+
+/*-------------------------------------------------------------------------*\
+|  Defines for the presence or absence of (system) functions                |
+\*-------------------------------------------------------------------------*/
+
+/* Define to __PRETTY_FUNCTION__, __FUNCTION__, or "<unknown>" */
+#define AUTO_FUNCTION_NAME __PRETTY_FUNCTION__
+
+/* Define to 1 if you have the `backtrace' function. */
+#define HAVE_BACKTRACE 1
+
+/* Define to 1 if you have the `cplus_demangle' function. */
+/* #undef HAVE_CPLUS_DEMANGLE */
+
+/* Define to 1 if you have a declaration for the `basename' function. */
+/* #undef HAVE_DECL_BASENAME */
diff --git a/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py b/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py
new file mode 100644
index 00000000000..ef749b66383
--- /dev/null
+++ b/CEP/Pipeline/mac/mac_wrapper/pipeline_wrapper.py
@@ -0,0 +1,63 @@
+#!/usr/bin/python
+
+import sys, os
+import datetime
+import subprocess
+from ConfigParser import SafeConfigParser as ConfigParser
+from lofar.parameterset import parameterset
+from lofarpipe.support.utilities import create_directory, string_to_list
+
+# USER EDITABLE PARAMETERS ----------------------------------------------------
+
+# NB: task file is defined in configuration file, not here.
+pipeline_definition = '/please/provide/path/to/sip.py'
+config_file         = '/please/provide/path/to/pipeline.cfg'
+
+# Set up environment for pipeline run
+pipeline_environment = {
+    "PYTHONPATH": "/opt/pipeline/dependencies/lib/python2.5/site-packages:/opt/pipeline/framework/lib/python2.5/site-packages:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/lofar/lib/python2.5/site-packages:/opt/pythonlibs/lib/python/site-packages",
+    "LD_LIBRARY_PATH": "/opt/pipeline/dependencies/lib:/opt/LofIm/daily/pyrap/lib:/opt/LofIm/daily/casacore/lib:/opt/LofIm/daily/lofar/lib:/opt/wcslib/lib/:/opt/hdf5/lib:/opt/LofIm/daily/casarest/lib:/data/sys/opt/lofar/external/log4cplus/lib", 
+    "PATH": "/opt/pipeline/dependencies/bin:/home/swinbank/sw/bin:/opt/pipeline/dependencies/bin:/usr/local/bin:/usr/bin:/usr/X11R6/bin:/bin:/usr/games:/opt/LofIm/daily/casarest/bin:/opt/LofIm/daily/casarest/bin",
+}
+
+# -----------------------------------------------------------------------------
+
+# To ensure consistency in the configuration between this wrapper and the
+# pipeline, we will set the start time here.
+start_time = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+# We should always be called with standard command line arguments:
+# tree ID, parset, ... others?
+input_parset = parameterset(sys.argv[1])
+tree_id      = sys.argv[2] # check this!
+
+# Extract runtime, working, results directories from input parset
+runtime_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.runtimeDirectory")
+working_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.workingDirectory")
+results_directory = input_parset.getString("ObsSW.Observation.ObservationControl.PythonControl.resultDirectory")
+
+# Set up configuration for later processing stages
+config = ConfigParser({
+    "job_name": tree_id,
+    "cwd": os.getcwd(),
+    "start_time": start_time,
+})
+config.read(config_file)
+config.set('DEFAULT', 'runtime_directory', runtime_directory)
+config.set('DEFAULT', 'default_working_directory', working_directory)
+
+# Extract input file list from parset
+to_process = input_parset.getStringVector('ObsSW.Observation.DataProducts.measurementSets')
+
+# Read config file to establish location of parset directory to use
+parset_directory = config.get("layout", "parset_directory")
+create_directory(parset_directory)
+
+# For each task (currently only ndppp), extract and write parset
+tasks = ConfigParser(config.defaults())
+tasks.read(string_to_list(config.get("DEFAULT", "task_files")))
+ndppp_parset_location = tasks.get("ndppp", "parset")
+input_parset.makeSubset("ObsSW.Observation.ObservationControl.PythonControl.DPPP.").writeFile(ndppp_parset_location)
+
+# Run pipeline & wait for result
+subprocess.check_call(['python', pipeline_definition, '-j', tree_id, '-d', '--config', config_file, '--runtime-directory', runtime_directory, '--default-working-directory', working_directory, '--start-time', start_time])
diff --git a/CEP/Pipeline/mac/src/ep_control.cc b/CEP/Pipeline/mac/src/ep_control.cc
new file mode 100644
index 00000000000..9d94cbdec1a
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_control.cc
@@ -0,0 +1,131 @@
+#include "Controller_Protocol.ph"
+#include "controlwrappers.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_control)
+{
+    using namespace boost::python;
+    
+    // Module level attributes in Python
+    
+    // Protocol ID
+    scope().attr("PROTOCOL")          = (short) CONTROLLER_PROTOCOL;
+
+    // Possible errors
+    scope().attr("OK")                = (short) CONTROL_OK_ERR;
+    scope().attr("LOST_CONN")         = (short) CONTROL_LOST_CONN_ERR;
+
+    // Possible signals
+    scope().attr("CONTROL_STARTED")   = CONTROL_STARTED;
+    scope().attr("CONTROL_CONNECT")   = CONTROL_CONNECT;
+    scope().attr("CONTROL_CONNECTED") = CONTROL_CONNECTED;
+    scope().attr("CONTROL_RESYNC")    = CONTROL_RESYNC;
+    scope().attr("CONTROL_RESYNCED")  = CONTROL_RESYNCED;
+    scope().attr("CONTROL_SCHEDULE")  = CONTROL_SCHEDULE;
+    scope().attr("CONTROL_SCHEDULED") = CONTROL_SCHEDULED;
+    scope().attr("CONTROL_CLAIM")     = CONTROL_CLAIM;
+    scope().attr("CONTROL_CLAIMED")   = CONTROL_CLAIMED;
+    scope().attr("CONTROL_PREPARE")   = CONTROL_PREPARE;
+    scope().attr("CONTROL_PREPARED")  = CONTROL_PREPARED;
+    scope().attr("CONTROL_RESUME")    = CONTROL_RESUME;
+    scope().attr("CONTROL_RESUMED")   = CONTROL_RESUMED;
+    scope().attr("CONTROL_SUSPEND")   = CONTROL_SUSPEND;
+    scope().attr("CONTROL_SUSPENDED") = CONTROL_SUSPENDED;
+    scope().attr("CONTROL_RELEASE")   = CONTROL_RELEASE;
+    scope().attr("CONTROL_RELEASED")  = CONTROL_RELEASED;
+    scope().attr("CONTROL_QUIT")      = CONTROL_QUIT;
+    scope().attr("CONTROL_QUITED")    = CONTROL_QUITED;
+    scope().attr("CONTROL_COMMON")    = CONTROL_COMMON;
+
+    // Events
+
+    // These events may be sent
+    class_<CONTROLConnectEventWrapper, bases<GenericEventWrapper> >("ControlConnectEvent", init<std::string>())
+        .add_property("controller_name", &CONTROLConnectedEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLScheduledEventWrapper, bases<GenericEventWrapper> >("ControlScheduledEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLScheduledEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLScheduledEventWrapper::get_result)
+    ;
+
+    class_<CONTROLResyncedEventWrapper, bases<GenericEventWrapper> >("ControlResyncedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLResyncedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLResyncedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLClaimedEventWrapper, bases<GenericEventWrapper> >("ControlClaimedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLClaimedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLClaimedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLPreparedEventWrapper, bases<GenericEventWrapper> >("ControlPreparedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLPreparedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLPreparedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLResumedEventWrapper, bases<GenericEventWrapper> >("ControlResumedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLResumedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLResumedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLSuspendedEventWrapper, bases<GenericEventWrapper> >("ControlSuspendedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLSuspendedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLSuspendedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLReleasedEventWrapper, bases<GenericEventWrapper> >("ControlReleasedEvent", init<std::string, uint16>())
+        .add_property("controller_name", &CONTROLReleasedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLReleasedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLQuitedEventWrapper, bases<GenericEventWrapper> >("ControlQuitedEvent", init<std::string, uint32, uint16, std::string>())
+        .add_property("controller_name", &CONTROLQuitedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLQuitedEventWrapper::get_result)
+        .add_property("treeID", &CONTROLQuitedEventWrapper::get_treeID)
+        .add_property("error_message", &CONTROLQuitedEventWrapper::get_errorMsg)
+    ;
+
+    // These events may be received
+    class_<CONTROLConnectedEventWrapper, bases<GenericEventWrapper> >("ControlConnectedEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLConnectedEventWrapper::get_cntlrName)
+        .add_property("result", &CONTROLConnectedEventWrapper::get_result)
+    ;
+
+    class_<CONTROLClaimEventWrapper, bases<GenericEventWrapper> >("ControlClaimEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLClaimEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLPrepareEventWrapper, bases<GenericEventWrapper> >("ControlPrepareEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLPrepareEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLResumeEventWrapper, bases<GenericEventWrapper> >("ControlResumeEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLResumeEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLSuspendEventWrapper, bases<GenericEventWrapper> >("ControlSuspendEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLSuspendEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLReleaseEventWrapper, bases<GenericEventWrapper> >("ControlReleaseEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLReleaseEventWrapper::get_cntlrName)
+    ;
+        
+    class_<CONTROLQuitEventWrapper, bases<GenericEventWrapper> >("ControlQuitEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLQuitEventWrapper::get_cntlrName)
+    ;
+
+    class_<CONTROLResyncEventWrapper, bases<GenericEventWrapper> >("ControlResyncEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLResyncEventWrapper::get_cntlrName)
+        .add_property("current_state", &CONTROLResyncEventWrapper::get_curState)
+        .add_property("hostname", &CONTROLResyncEventWrapper::get_hostname)
+    ;
+
+    class_<CONTROLScheduleEventWrapper, bases<GenericEventWrapper> >("ControlScheduleEvent", init<GenericEventWrapper&>())
+        .add_property("controller_name", &CONTROLScheduleEventWrapper::get_cntlrName)
+        .add_property("start_time", &CONTROLScheduleEventWrapper::get_startTime)
+        .add_property("stop_time", &CONTROLScheduleEventWrapper::get_stopTime)
+    ;
+
+}
diff --git a/CEP/Pipeline/mac/src/ep_echo.cc b/CEP/Pipeline/mac/src/ep_echo.cc
new file mode 100644
index 00000000000..bfc84883781
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_echo.cc
@@ -0,0 +1,24 @@
+#include "Echo_Protocol.ph"
+#include "echowrappers.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_echo) {
+    using namespace boost::python;
+
+    // These are exposed as module-level attributes in Python
+    scope().attr("PROTOCOL") = (short) ECHO_PROTOCOL;
+    scope().attr("PING") = ECHO_PING;
+    scope().attr("ECHO") = ECHO_ECHO;
+    scope().attr("CLOCK") = ECHO_CLOCK;
+
+    class_<EchoPingEventWrapper, boost::noncopyable, bases<GenericEventWrapper> >("EchoPingEvent")
+        .add_property("ping_time", &EchoPingEventWrapper::get_pt, &EchoPingEventWrapper::set_pt)
+        .add_property("seqnr", &EchoPingEventWrapper::get_seqnr, &EchoPingEventWrapper::set_seqnr)
+    ;
+
+    class_<EchoEchoEventWrapper, bases<GenericEventWrapper> >("EchoEchoEvent", init<GenericEventWrapper&>())
+        .add_property("ping_time", &EchoEchoEventWrapper::get_pt)
+        .add_property("echo_time", &EchoEchoEventWrapper::get_et)
+        .add_property("seqnr", &EchoEchoEventWrapper::get_seqnr)
+    ;
+}
diff --git a/CEP/Pipeline/mac/src/ep_interface.cc b/CEP/Pipeline/mac/src/ep_interface.cc
new file mode 100644
index 00000000000..36e2bfac877
--- /dev/null
+++ b/CEP/Pipeline/mac/src/ep_interface.cc
@@ -0,0 +1,17 @@
+#include "ep_interface.h"
+#include <boost/python.hpp>
+
+BOOST_PYTHON_MODULE(_ep_interface) {
+    using namespace boost::python;
+
+    // We export GenericEventWrapper here, but it is required by all the
+    // protocols. Hence, ep.interface must always be imported first.
+    class_<GenericEventWrapper>("GenericEvent")
+        .add_property("signal", &GenericEventWrapper::get_signal)
+    ;
+
+    class_<EP_Interface>("EP_Interface", "EP_Interface(ServiceMask, Protocol, Host=localhost)", init<std::string, short, optional<std::string> >())
+        .def("receive_event", &EP_Interface::receive_event, return_value_policy<manage_new_object>())
+        .def("send_event", &EP_Interface::send_event)
+    ;
+}
diff --git a/CEP/Pipeline/mac/src/eventwrappers.cc b/CEP/Pipeline/mac/src/eventwrappers.cc
new file mode 100644
index 00000000000..5542310e029
--- /dev/null
+++ b/CEP/Pipeline/mac/src/eventwrappers.cc
@@ -0,0 +1,53 @@
+#include "echowrappers.h"
+
+//
+// Specific event types
+//
+
+// Ping event
+EchoPingEventWrapper::EchoPingEventWrapper() {
+    this->my_event = new EchoPingEvent;
+    timeval ping_time;
+    gettimeofday(&ping_time, 0);
+    this->my_event->ping_time = ping_time;
+}
+
+double EchoPingEventWrapper::get_pt() {
+    return 1.0 * this->my_event->ping_time.tv_sec + (this->my_event->ping_time.tv_usec / 1000000.0); 
+}
+
+void EchoPingEventWrapper::set_pt(double my_time) {
+    this->my_event->ping_time.tv_sec = (int) my_time;
+    this->my_event->ping_time.tv_usec = (int) (1000000.0 * (my_time - (int) my_time));
+}
+
+LOFAR::TYPES::uint16 EchoPingEventWrapper::get_seqnr() {
+    return this->my_event->seqnr;
+}
+
+void EchoPingEventWrapper::set_seqnr(LOFAR::TYPES::uint16 my_seqnr) {
+    this->my_event->seqnr = my_seqnr;
+}
+
+// Echo event
+EchoEchoEventWrapper::EchoEchoEventWrapper(LOFAR::MACIO::GCFEvent* my_event) {
+    this->my_event = new EchoEchoEvent(*my_event);
+}
+
+EchoEchoEventWrapper::EchoEchoEventWrapper(GenericEventWrapper& my_gev) {
+    LOFAR::MACIO::GCFEvent* event_ptr;
+    event_ptr = my_gev.get_event_ptr();
+    this->my_event = new EchoEchoEvent(*event_ptr);
+}
+
+double EchoEchoEventWrapper::get_pt() {
+    return 1.0 * this->my_event->ping_time.tv_sec + (this->my_event->ping_time.tv_usec / 1000000.0); 
+}
+
+double EchoEchoEventWrapper::get_et() {
+    return 1.0 * this->my_event->echo_time.tv_sec + (this->my_event->echo_time.tv_usec / 1000000.0); 
+}
+
+LOFAR::TYPES::uint16 EchoEchoEventWrapper::get_seqnr() {
+    return this->my_event->seqnr;
+}
diff --git a/CEP/Pipeline/mac/test_ep.py b/CEP/Pipeline/mac/test_ep.py
new file mode 100644
index 00000000000..e8397d4caa4
--- /dev/null
+++ b/CEP/Pipeline/mac/test_ep.py
@@ -0,0 +1,97 @@
+import unittest
+import ep.echo
+import ep.control
+import time
+
+class TestPingEvent(unittest.TestCase):
+    def setUp(self):
+        self.epe = ep.echo.EchoPingEvent()
+
+    def test_init_time(self):
+        self.assertTrue(self.epe.ping_time < time.time())
+
+    def test_change_time(self):
+        now = time.time()
+        self.epe.ping_time = now
+        self.assertAlmostEqual(self.epe.ping_time, now, 5)
+
+    def test_changq_seqnr(self):
+        self.epe.seqnr = 10
+        self.assertEqual(self.epe.seqnr, 10)
+
+class TestReceiveEcho(unittest.TestCase):
+    def setUp(self):
+        interface = ep.echo.EchoPort_Interface()
+        self.epe = ep.echo.EchoPingEvent()
+        interface.send_event(self.epe)
+        self.eee = interface.receive_event()
+
+    def test_ping_time(self):
+        self.assertEqual(self.epe.ping_time, self.eee.ping_time)
+
+    def test_seqnr(self):
+        self.assertEqual(self.epe.seqnr, self.eee.seqnr)
+
+    def test_long_ping(self):
+        self.assertTrue(self.eee.echo_time > self.eee.ping_time)
+
+class TestControllerSendables(unittest.TestCase):
+    def test_control_started(self):
+        event = ep.control.ControlStartedEvent("controller name", True)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertTrue(event.successful)
+        self.assertEqual(event.signal, ep.control.CONTROL_STARTED)
+
+    def test_control_connected(self):
+        event = ep.control.ControlConnectedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_CONNECTED)
+
+    def test_control_resynced(self):
+        event = ep.control.ControlResyncedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RESYNCED)
+
+    def test_control_claimed(self):
+        event = ep.control.ControlClaimedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_CLAIMED)
+
+    def test_control_prepared(self):
+        event = ep.control.ControlPreparedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_PREPARED)
+
+    def test_control_resumed(self):
+        event = ep.control.ControlResumedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RESUMED)
+
+    def test_control_suspended(self):
+        event = ep.control.ControlSuspendedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_SUSPENDED)
+
+    def test_control_released(self):
+        event = ep.control.ControlReleasedEvent("controller name", ep.control.OK)
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.signal, ep.control.CONTROL_RELEASED)
+
+    def test_control_quited(self):
+        event = ep.control.ControlQuitedEvent("controller name", 1, ep.control.OK, "no error")
+        self.assertEqual(event.controller_name, "controller name")
+        self.assertEqual(event.result, ep.control.OK)
+        self.assertEqual(event.treeID, 1)
+        self.assertEqual(event.error_message, "no error")
+        self.assertEqual(event.signal, ep.control.CONTROL_QUITED)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/CEP/Pipeline/recipes/examples/master/example.py b/CEP/Pipeline/recipes/examples/master/example.py
new file mode 100644
index 00000000000..bd9e2777d8e
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/master/example.py
@@ -0,0 +1,60 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                 Example recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+import subprocess
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.lofaringredient import ExecField, StringField
+from lofarpipe.support.pipelinelogging import log_process_output
+
+class example(BaseRecipe):
+    inputs = {
+        'executable': ExecField(
+            '--executable',
+            help="Command to run",
+            default="/bin/ls"
+        )
+    }
+
+    outputs = {
+        'stdout': StringField()
+    }
+
+    def go(self):
+        self.logger.info("Starting example recipe run")
+        super(example, self).go()
+
+        self.logger.info("This is a log message")
+
+        my_process = subprocess.Popen(
+            [
+                self.inputs['executable']
+            ],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+        )
+        sout, serr = my_process.communicate()
+        self.outputs['stdout'] = sout
+        log_process_output(
+            self.inputs['executable'],
+            sout,
+            serr,
+            self.logger
+        )
+
+        if my_process.returncode == 0:
+            return 0
+        else:
+            self.logger.warn(
+                "Return code (%d) is not 0." % my_process.returncode
+            )
+            return 1
+
+
+if __name__ == '__main__':
+    sys.exit(example().main())
diff --git a/CEP/Pipeline/recipes/examples/master/example_parallel.py b/CEP/Pipeline/recipes/examples/master/example_parallel.py
new file mode 100644
index 00000000000..fb2131cb4fd
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/master/example_parallel.py
@@ -0,0 +1,25 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                    Example recipe with simple job distribution
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+
+class example_parallel(BaseRecipe, RemoteCommandRecipeMixIn):
+    def go(self):
+        super(example_parallel, self).go()
+        node_command = "python %s" % (self.__file__.replace("master", "nodes"))
+        job = ComputeJob("localhost", node_command, arguments=["example_argument"])
+        self._schedule_jobs([job])
+        if self.error.isSet():
+            return 1
+        else:
+            return 0
+
+if __name__ == "__main__":
+    sys.exit(example_parallel().main())
diff --git a/CEP/Pipeline/recipes/examples/nodes/example_parallel.py b/CEP/Pipeline/recipes/examples/nodes/example_parallel.py
new file mode 100644
index 00000000000..85239c3b2c2
--- /dev/null
+++ b/CEP/Pipeline/recipes/examples/nodes/example_parallel.py
@@ -0,0 +1,20 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                Example of a simple node script
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import sys
+from lofarpipe.support.lofarnode import LOFARnode
+
+class example_parallel(LOFARnodeTCP):
+    def run(self, *args):
+        for arg in args:
+            self.logger.info("Received %s as argument" % str(arg))
+        return 0
+
+if __name__ == "__main__":
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(example_parallel(jobid, jobhost, jobport).run_with_stored_arguments())
+
diff --git a/CEP/Pipeline/recipes/sip/master/bbs.py b/CEP/Pipeline/recipes/sip/master/bbs.py
new file mode 100644
index 00000000000..9ee952631bf
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/bbs.py
@@ -0,0 +1,415 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                BBS (BlackBoard Selfcal) recipe
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import closing
+import psycopg2, psycopg2.extensions
+import subprocess
+import sys
+import os
+import threading
+import tempfile
+import shutil
+import time
+import signal
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.group_data import gvds_iterator
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.remotecommand import run_remote_command
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.jobserver import job_server
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+class bbs(BaseRecipe):
+    """
+    The bbs recipe coordinates running BBS on a group of MeasurementSets. It
+    runs both GlobalControl and KernelControl; as yet, SolverControl has not
+    been integrated.
+
+    The recipe will also run the sourcedb and parmdb recipes on each of the
+    input MeasuementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'control_exec': ingredient.ExecField(
+            '--control-exec',
+            dest="control_exec",
+            help="BBS Control executable"
+        ),
+        'kernel_exec': ingredient.ExecField(
+            '--kernel-exec',
+            dest="kernel_exec",
+            help="BBS Kernel executable"
+        ),
+        'initscript': ingredient.FileField(
+            '--initscript',
+            dest="initscript",
+            help="Initscript to source (ie, lofarinit.sh)"
+        ),
+        'parset': ingredient.FileField(
+            '-p', '--parset',
+            dest="parset",
+            help="BBS configuration parset"
+        ),
+        'key': ingredient.StringField(
+            '--key',
+            dest="key",
+            help="Key to identify BBS session"
+        ),
+        'db_host': ingredient.StringField(
+            '--db-host',
+            dest="db_host",
+            help="Database host with optional port"
+        ),
+        'db_user': ingredient.StringField(
+            '--db-user',
+            dest="db_user",
+            help="Database user"
+        ),
+        'db_name': ingredient.StringField(
+            '--db-name',
+            dest="db_name",
+            help="Database name"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--combinevds',
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        ),
+        'makesourcedb': ingredient.ExecField(
+            '--makesourcedb',
+            help="makesourcedb executable",
+            default="/opt/LofIm/daily/lofar/bin/makesourcedb"
+        ),
+        'parmdbm': ingredient.ExecField(
+            '--parmdbm',
+            help="parmdbm executable",
+            default="/opt/LofIm/daily/lofar/bin/parmdbm"
+        ),
+        'skymodel': ingredient.FileField(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="Input sky catalogue"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting BBS run")
+        super(bbs, self).go()
+
+        #             Generate source and parameter databases for all input data
+        # ----------------------------------------------------------------------
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['executable'] = self.inputs['parmdbm']
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('parmdb', inputs, outputs):
+            self.logger.warn("parmdb reports failure")
+            return 1
+        inputs['args'] = self.inputs['args']
+        inputs['executable'] = self.inputs['makesourcedb']
+        inputs['skymodel'] = self.inputs['skymodel']
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('sourcedb', inputs, outputs):
+            self.logger.warn("sourcedb reports failure")
+            return 1
+
+        #              Build a GVDS file describing all the data to be processed
+        # ----------------------------------------------------------------------
+        self.logger.debug("Building VDS file describing all data for BBS")
+        vds_file = os.path.join(
+            self.config.get("layout", "job_directory"),
+            "vds",
+            "bbs.gvds"
+        )
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['gvds'] = vds_file
+        inputs['unlink'] = False
+        inputs['makevds'] = self.inputs['makevds']
+        inputs['combinevds'] = self.inputs['combinevds']
+        inputs['nproc'] = self.inputs['nproc']
+        inputs['directory'] = os.path.dirname(vds_file)
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('new_vdsmaker', inputs, outputs):
+            self.logger.warn("new_vdsmaker reports failure")
+            return 1
+        self.logger.debug("BBS GVDS is %s" % (vds_file,))
+
+
+        #      Iterate over groups of subbands divided up for convenient cluster
+        #          procesing -- ie, no more than nproc subbands per compute node
+        # ----------------------------------------------------------------------
+        for to_process in gvds_iterator(vds_file, int(self.inputs["nproc"])):
+            #               to_process is a list of (host, filename, vds) tuples
+            # ------------------------------------------------------------------
+            hosts, ms_names, vds_files = map(list, zip(*to_process))
+
+            #             The BBS session database should be cleared for our key
+            # ------------------------------------------------------------------
+            self.logger.debug(
+                "Cleaning BBS database for key %s" % (self.inputs["key"])
+            )
+            with closing(
+                psycopg2.connect(
+                    host=self.inputs["db_host"],
+                    user=self.inputs["db_user"],
+                    database=self.inputs["db_name"]
+                )
+            ) as db_connection:
+                db_connection.set_isolation_level(
+                    psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
+                )
+                with closing(db_connection.cursor()) as db_cursor:
+                    db_cursor.execute(
+                        "DELETE FROM blackboard.session WHERE key=%s",
+                        (self.inputs["key"],)
+                    )
+
+            #     BBS GlobalControl requires a GVDS file describing all the data
+            #          to be processed. We assemble that from the separate parts
+            #                                         already available on disk.
+            # ------------------------------------------------------------------
+            self.logger.debug("Building VDS file describing data for BBS run")
+            vds_dir = tempfile.mkdtemp()
+            vds_file = os.path.join(vds_dir, "bbs.gvds")
+            combineproc = utilities.spawn_process(
+                [
+                    self.inputs['combinevds'],
+                    vds_file,
+                ] + vds_files,
+                self.logger
+            )
+            sout, serr = combineproc.communicate()
+            log_process_output(self.inputs['combinevds'], sout, serr, self.logger)
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(
+                    combineproc.returncode, command
+                )
+
+            #      Construct a parset for BBS GlobalControl by patching the GVDS
+            #           file and database information into the supplied template
+            # ------------------------------------------------------------------
+            self.logger.debug("Building parset for BBS control")
+            bbs_parset = utilities.patch_parset(
+                self.inputs['parset'],
+                {
+                    'Observation': vds_file,
+                    'BBDB.Key': self.inputs['key'],
+                    'BBDB.Name': self.inputs['db_name'],
+                    'BBDB.User': self.inputs['db_user'],
+                    'BBDB.Host': self.inputs['db_host'],
+    #                'BBDB.Port': self.inputs['db_name'],
+                }
+            )
+            self.logger.debug("BBS control parset is %s" % (bbs_parset,))
+
+            try:
+                #        When one of our processes fails, we set the killswitch.
+                #      Everything else will then come crashing down, rather than
+                #                                         hanging about forever.
+                # --------------------------------------------------------------
+                self.killswitch = threading.Event()
+                self.killswitch.clear()
+                signal.signal(signal.SIGTERM, self.killswitch.set)
+
+                #                           GlobalControl runs in its own thread
+                # --------------------------------------------------------------
+                run_flag = threading.Event()
+                run_flag.clear()
+                bbs_control = threading.Thread(
+                    target=self._run_bbs_control,
+                    args=(bbs_parset, run_flag)
+                )
+                bbs_control.start()
+                run_flag.wait()    # Wait for control to start before proceeding
+
+                #      We run BBS KernelControl on each compute node by directly
+                #                             invoking the node script using SSH
+                #      Note that we use a job_server to send out job details and
+                #           collect logging information, so we define a bunch of
+                #    ComputeJobs. However, we need more control than the generic
+                #     ComputeJob.dispatch method supplies, so we'll control them
+                #                                          with our own threads.
+                # --------------------------------------------------------------
+                command = "python %s" % (self.__file__.replace('master', 'nodes'))
+                env = {
+                    "LOFARROOT": utilities.read_initscript(self.logger, self.inputs['initscript'])["LOFARROOT"],
+                    "PYTHONPATH": self.config.get('deploy', 'engine_ppath'),
+                    "LD_LIBRARY_PATH": self.config.get('deploy', 'engine_lpath')
+                }
+                jobpool = {}
+                bbs_kernels = []
+                with job_server(self.logger, jobpool, self.error) as (jobhost, jobport):
+                    self.logger.debug("Job server at %s:%d" % (jobhost, jobport))
+                    for job_id, details in enumerate(to_process):
+                        host, file, vds = details
+                        jobpool[job_id] = ComputeJob(
+                            host, command,
+                            arguments=[
+                                self.inputs['kernel_exec'],
+                                self.inputs['initscript'],
+                                file,
+                                self.inputs['key'],
+                                self.inputs['db_name'],
+                                self.inputs['db_user'],
+                                self.inputs['db_host']
+                            ]
+                        )
+                        bbs_kernels.append(
+                            threading.Thread(
+                                target=self._run_bbs_kernel,
+                                args=(host, command, env, job_id,
+                                    jobhost, str(jobport)
+                                )
+                            )
+                        )
+                    self.logger.info("Starting %d threads" % len(bbs_kernels))
+                    [thread.start() for thread in bbs_kernels]
+                    self.logger.debug("Waiting for all kernels to complete")
+                    [thread.join() for thread in bbs_kernels]
+
+
+                #         When GlobalControl finishes, our work here is done
+                # ----------------------------------------------------------
+                self.logger.info("Waiting for GlobalControl thread")
+                bbs_control.join()
+            finally:
+                os.unlink(bbs_parset)
+                shutil.rmtree(vds_dir)
+                if self.killswitch.isSet():
+                    #  If killswitch is set, then one of our processes failed so
+                    #                                   the whole run is invalid
+                    # ----------------------------------------------------------
+                    return 1
+
+        return 0
+
+    def _run_bbs_kernel(self, host, command, env, *arguments):
+        """
+        Run command with arguments on the specified host using ssh. Return its
+        return code.
+
+        The resultant process is monitored for failure; see
+        _monitor_process() for details.
+        """
+        try:
+            bbs_kernel_process = run_remote_command(
+                self.config,
+                self.logger,
+                host,
+                command,
+                env,
+                arguments=arguments
+            )
+        except Exception, e:
+            self.logger.exception("BBS Kernel failed to start")
+            self.killswitch.set()
+            return 1
+        result = self._monitor_process(bbs_kernel_process, "BBS Kernel on %s" % host)
+        sout, serr = bbs_kernel_process.communicate()
+        serr = serr.replace("Connection to %s closed.\r\n" % host, "")
+        log_process_output("SSH session (BBS kernel)", sout, serr, self.logger)
+        return result
+
+    def _run_bbs_control(self, bbs_parset, run_flag):
+        """
+        Run BBS Global Control and wait for it to finish. Return its return
+        code.
+        """
+        env = utilities.read_initscript(self.logger, self.inputs['initscript'])
+        self.logger.info("Running BBS GlobalControl")
+        working_dir = tempfile.mkdtemp()
+        with CatchLog4CPlus(
+            working_dir,
+            self.logger.name + ".GlobalControl",
+            os.path.basename(self.inputs['control_exec'])
+        ):
+            with utilities.log_time(self.logger):
+                try:
+                    bbs_control_process = utilities.spawn_process(
+                        [
+                            self.inputs['control_exec'],
+                            bbs_parset,
+                            "0"
+                        ],
+                        self.logger,
+                        cwd=working_dir,
+                        env=env
+                    )
+                    # _monitor_process() needs a convenient kill() method.
+                    bbs_control_process.kill = lambda : os.kill(bbs_control_process.pid, signal.SIGKILL)
+                except OSError, e:
+                    self.logger.error("Failed to spawn BBS Control (%s)" % str(e))
+                    self.killswitch.set()
+                    return 1
+                finally:
+                    run_flag.set()
+
+            returncode = self._monitor_process(
+                bbs_control_process, "BBS Control"
+            )
+            sout, serr = bbs_control_process.communicate()
+        shutil.rmtree(working_dir)
+        log_process_output(
+            self.inputs['control_exec'], sout, serr, self.logger
+        )
+        return returncode
+
+    def _monitor_process(self, process, name="Monitored process"):
+        """
+        Monitor a process for successful exit. If it fails, set the kill
+        switch, so everything else gets killed too. If the kill switch is set,
+        then kill this process off.
+
+        Name is an optional parameter used only for identification in logs.
+        """
+        while True:
+            try:
+                returncode = process.poll()
+                if returncode == None:                   # Process still running
+                    time.sleep(1)
+                elif returncode != 0:                           # Process broke!
+                    self.logger.warn(
+                        "%s returned code %d; aborting run" % (name, returncode)
+                    )
+                    self.killswitch.set()
+                    break
+                else:                                   # Process exited cleanly
+                    self.logger.info("%s clean shutdown" % (name))
+                    break
+                if self.killswitch.isSet():        # Other process failed; abort
+                    self.logger.warn("Killing %s" % (name))
+                    process.kill()
+                    returncode = process.wait()
+                    break
+            except:
+                # An exception here is likely a ctrl-c or similar. Whatever it
+                # is, we bail out.
+                self.killswitch.set()
+        return returncode
+
+if __name__ == '__main__':
+    sys.exit(bbs().main())
diff --git a/CEP/Pipeline/recipes/sip/master/cimager.py b/CEP/Pipeline/recipes/sip/master/cimager.py
new file mode 100644
index 00000000000..9a04f9aecd0
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/cimager.py
@@ -0,0 +1,310 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  cimager (ASKAP imager) recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from contextlib import contextmanager
+
+import os
+import sys
+import time
+import threading
+import collections
+import subprocess
+import tempfile
+import signal
+
+from pyrap.quanta import quantity
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.pipelinelogging import log_time, log_process_output
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.remotecommand import ProcessLimiter
+from lofarpipe.support.remotecommand import run_remote_command
+from lofarpipe.support.remotecommand import threadwatcher
+from lofarpipe.support.parset import Parset
+from lofarpipe.support.parset import get_parset
+from lofarpipe.support.parset import patched_parset, patch_parset
+from lofarpipe.support.utilities import spawn_process
+from lofarpipe.support.lofarexceptions import PipelineException
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+
+class ParsetTypeField(ingredient.StringField):
+    """
+    Input field which accepts the string values either "cimager" or
+    "mwimager". Enables specification of type of parset supplied to the
+    cimager recipe.
+    """
+    def is_valid(self, value):
+        if value == "cimager" or value == "mwimager":
+            return True
+        else:
+            return False
+
+
+class cimager(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Provides a convenient, pipeline-based mechanism of running the cimager on
+    a dataset.
+
+    Can ingest either an MWimager-style parset, converting to cimager format
+    as required, or a cimager parset directly.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'imager_exec': ingredient.ExecField(
+            '--imager-exec',
+            help="cimager executable"
+        ),
+        'convert_exec': ingredient.ExecField(
+            '--convert-exec',
+            help="convertimagerparset executable"
+        ),
+        'parset': ingredient.FileField(
+            '--parset',
+            help="Imager configuration parset (mwimager or cimager format)"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        ),
+        'timestep': ingredient.FloatField(
+            '--timestep',
+            help="If non-zero, multiple images will be made, each using timestep seconds of data",
+            default=0.0
+        ),
+        'results_dir': ingredient.DirectoryField(
+            '--results-dir',
+            help="Directory in which resulting images will be placed",
+        ),
+        'parset_type': ParsetTypeField(
+            '--parset-type',
+            default="mwimager",
+            help="cimager or mwimager"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--comebinevds',
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        )
+    }
+
+    outputs = {
+        'images': ingredient.ListField()
+    }
+
+    def go(self):
+        self.logger.info("Starting cimager run")
+        super(cimager, self).go()
+        self.outputs['images' ] = []
+
+        #              Build a GVDS file describing all the data to be processed
+        # ----------------------------------------------------------------------
+        self.logger.debug("Building VDS file describing all data for cimager")
+        gvds_file = os.path.join(
+            self.config.get("layout", "job_directory"),
+            "vds",
+            "cimager.gvds"
+        )
+        inputs = LOFARinput(self.inputs)
+        inputs['args'] = self.inputs['args']
+        inputs['gvds'] = gvds_file
+        inputs['unlink'] = False
+        inputs['makevds'] = self.inputs['makevds']
+        inputs['combinevds'] = self.inputs['combinevds']
+        inputs['nproc'] = self.inputs['nproc']
+        inputs['directory'] = os.path.dirname(gvds_file)
+        outputs = LOFARoutput(self.inputs)
+        if self.cook_recipe('new_vdsmaker', inputs, outputs):
+            self.logger.warn("new_vdsmaker reports failure")
+            return 1
+        self.logger.debug("cimager GVDS is %s" % (gvds_file,))
+
+        #                            Read data for processing from the GVDS file
+        # ----------------------------------------------------------------------
+        parset = Parset(gvds_file)
+
+        data = []
+        for part in range(parset.getInt('NParts')):
+            host = parset.getString("Part%d.FileSys" % part).split(":")[0]
+            vds  = parset.getString("Part%d.Name" % part)
+            data.append((host, vds))
+
+        #                                 Divide data into timesteps for imaging
+        #          timesteps is a list of (start, end, results directory) tuples
+        # ----------------------------------------------------------------------
+        timesteps = []
+        results_dir = self.inputs['results_dir']
+        if self.inputs['timestep'] == 0:
+            self.logger.info("No timestep specified; imaging all data")
+            timesteps = [(None, None, results_dir)]
+        else:
+            self.logger.info("Using timestep of %s s" % self.inputs['timestep'])
+            gvds = get_parset(gvds_file)
+            start_time = quantity(gvds['StartTime'].get()).get('s').get_value()
+            end_time = quantity(gvds['EndTime'].get()).get('s').get_value()
+            step = float(self.inputs['timestep'])
+            while start_time < end_time:
+                timesteps.append(
+                    (
+                        start_time, start_time+step,
+                        os.path.join(results_dir, str(start_time))
+                    )
+                )
+                start_time += step
+
+        #                          Run each cimager process in a separate thread
+        # ----------------------------------------------------------------------
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        for label, timestep in enumerate(timesteps):
+            self.logger.info("Processing timestep %d" % label)
+            jobs = []
+            parsets = []
+            start_time, end_time, resultsdir = timestep
+            for host, vds in data:
+                vds_data = Parset(vds)
+                frequency_range = [
+                    vds_data.getDoubleVector("StartFreqs")[0],
+                    vds_data.getDoubleVector("EndFreqs")[-1]
+                ]
+                parsets.append(
+                    self.__get_parset(
+                        os.path.basename(vds_data.getString('FileName')).split('.')[0],
+                        vds_data.getString("FileName"),
+                        str(frequency_range),
+                        vds_data.getString("Extra.FieldDirectionType"),
+                        vds_data.getStringVector("Extra.FieldDirectionRa")[0],
+                        vds_data.getStringVector("Extra.FieldDirectionDec")[0],
+                        'True', # cimager bug: non-restored image unusable
+                    )
+                )
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            self.inputs['imager_exec'],
+                            vds,
+                            parsets[-1],
+                            resultsdir,
+                            start_time,
+                            end_time
+                        ]
+                    )
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+            for parset in parsets:
+                parset = Parset(parset)
+                image_names = parset.getStringVector("Cimager.Images.Names")
+                self.outputs['images'].extend(image_names)
+            [os.unlink(parset) for parset in parsets]
+
+        #                Check if we recorded a failing process before returning
+        # ----------------------------------------------------------------------
+        if self.error.isSet():
+            self.logger.warn("Failed imager process detected")
+            return 1
+        else:
+            return 0
+
+    def __get_parset(
+        self, name, dataset, frequency, ms_dir_type,
+        ms_dir_ra, ms_dir_dec, restore
+    ):
+        def convert_mwimager_parset(parset):
+            try:
+                with patched_parset(
+                    parset,
+                    {
+                        'dataset': dataset,
+                        'Images.frequency': frequency,
+                        'msDirType': ms_dir_type,
+                        'msDirRa': ms_dir_ra,
+                        'msDirDec': ms_dir_dec,
+                        'restore': restore # cimager bug: non-restored image unusable
+                    }
+                ) as cimager_parset:
+                    fd, converted_parset = tempfile.mkstemp(
+                        dir=self.config.get("layout", "job_directory")
+                    )
+                    convert_process = spawn_process(
+                        [
+                            self.inputs['convert_exec'],
+                            cimager_parset,
+                            converted_parset
+                        ],
+                        self.logger
+                    )
+                    os.close(fd)
+                    sout, serr = convert_process.communicate()
+                    log_process_output(self.inputs['convert_exec'], sout, serr, self.logger)
+                    if convert_process.returncode != 0:
+                        raise subprocess.CalledProcessError(
+                            convert_process.returncode, convert_exec
+                        )
+                    return converted_parset
+            except OSError, e:
+                self.logger.error("Failed to spawn convertimagerparset (%s)" % str(e))
+                raise
+            except subprocess.CalledProcessError, e:
+                self.logger.error(str(e))
+                raise
+
+        def populate_cimager_parset(parset):
+            input_parset = Parset(parset)
+            patch_dictionary = {
+                'Cimager.dataset': dataset,
+                'Cimager.restore': restore
+            }
+            image_names = []
+            for image_name in input_parset.getStringVector('Cimager.Images.Names'):
+                image_names.append("%s_%s" % (image_name, name))
+                subset = input_parset.makeSubset(
+                    "Cimager.Images.%s" % image_name,
+                    "Cimager.Images.%s" % image_names[-1]
+                )
+                patch_dictionary[
+                    "Cimager.Images.%s.frequency" % image_names[-1]
+                ] = frequency
+                patch_dictionary[
+                    "Cimager.Images.%s.direction" % image_names[-1]
+                ] = "[ %s,%s,%s ]" % (ms_dir_ra, ms_dir_dec, ms_dir_type)
+                for key in subset:
+                    patch_dictionary[key] = subset[key].get()
+            input_parset.subtractSubset('Cimager.Images.image')
+            for key in input_parset:
+                patch_dictionary[key] = input_parset[key].get()
+            patch_dictionary['Cimager.Images.Names'] = "[ %s ]" % ", ".join(image_names)
+            return patch_parset(
+                None, patch_dictionary,
+                self.config.get("layout", "job_directory")
+            )
+
+        try:
+            if self.inputs['parset_type'] == "mwimager":
+                cimager_parset = convert_mwimager_parset(self.inputs['parset'])
+            elif self.inputs['parset_type'] == "cimager":
+                cimager_parset = populate_cimager_parset(self.inputs['parset'])
+        except Exception, e:
+            self.logger.exception("Failed to generate imager parset")
+            raise
+
+        return cimager_parset
+
+if __name__ == '__main__':
+    sys.exit(cimager().main())
diff --git a/CEP/Pipeline/recipes/sip/master/count_timesteps.py b/CEP/Pipeline/recipes/sip/master/count_timesteps.py
new file mode 100644
index 00000000000..04311bb409e
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/count_timesteps.py
@@ -0,0 +1,62 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                             Return total length of observation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class count_timesteps(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Accept a list of baselines (in the format used by NDPPP logging).
+
+    Flag them in all MeasurementSets.
+    """
+    inputs = {
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+    outputs = {
+        'start_time': ingredient.FloatField(),
+        'end_time': ingredient.FloatField()
+    }
+
+    def go(self):
+        self.logger.info("Starting count_timesteps run")
+        super(count_timesteps, self).go()
+
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command, arguments=[ms]
+                )
+            )
+        jobs = self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        self.outputs['start_time'] = min(job.results['start_time'] for job in jobs.itervalues())
+        self.outputs['end_time'] = max(job.results['end_time'] for job in jobs.itervalues())
+
+        if self.error.isSet():
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(count_timesteps().main())
diff --git a/CEP/Pipeline/recipes/sip/master/datamapper.py b/CEP/Pipeline/recipes/sip/master/datamapper.py
new file mode 100644
index 00000000000..6f4a636dfd6
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/datamapper.py
@@ -0,0 +1,81 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                 Map subbands on storage nodes to compute nodes
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os.path
+from itertools import cycle
+from collections import defaultdict
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+from lofarpipe.support.parset import Parset
+import lofarpipe.support.lofaringredient as ingredient
+
+class datamapper(BaseRecipe):
+    """
+    Parses a list of filenames and attempts to map them to appropriate compute
+    nodes (ie, which can access the files) on the LOFAR CEP cluster. Mapping
+    by filename in this way is fragile, but is the best we can do for now.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Full path (including filename) of mapfile to produce (clobbered if exists)"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="Full path (including filename) of generated mapfile"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting datamapper run")
+        super(datamapper, self).go()
+
+        #      We build lists of compute-nodes per cluster and data-per-cluster,
+        #          then match them up to schedule jobs in a round-robin fashion.
+        # ----------------------------------------------------------------------
+        clusterdesc = ClusterDesc(self.config.get('cluster', "clusterdesc"))
+        if clusterdesc.subclusters:
+            available_nodes = dict(
+                (cl.name, cycle(get_compute_nodes(cl)))
+                for cl in clusterdesc.subclusters
+            )
+        else:
+            available_nodes = {
+                clusterdesc.name: cycle(get_compute_nodes(clusterdesc))
+            }
+
+        data = defaultdict(list)
+        for filename in self.inputs['args']:
+            subcluster = filename.split(os.path.sep)[2]
+            try:
+                host = available_nodes[subcluster].next()
+            except KeyError, key:
+                self.logger.error("%s is not a known cluster" % str(key))
+                raise
+
+            data[host].append(filename)
+
+        #                                 Dump the generated mapping to a parset
+        # ----------------------------------------------------------------------
+        parset = Parset()
+        for host, filenames in data.iteritems():
+            parset.addStringVector(host, filenames)
+
+        parset.writeFile(self.inputs['mapfile'])
+        self.outputs['mapfile'] = self.inputs['mapfile']
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(datamapper().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py b/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py
new file mode 100644
index 00000000000..36332ca328d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/casapy.py
@@ -0,0 +1,186 @@
+from __future__ import with_statement
+import sys, os
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARoutput, LOFARinput
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.group_data import group_files
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.clusterdesc import ClusterDesc
+
+def run_casapy(infile, parset, start_time, end_time, increment):
+    # Run on engine to process data with Casapy
+    from lofarrecipe.nodes.casapy import casapy_node
+    return casapy_node(loghost=loghost, logport=logport).run(
+        infile,
+        parset,
+        start_time,
+        end_time,
+        increment
+    )
+
+class casapy(LOFARrecipe):
+    def __init__(self):
+        super(casapy, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="CASApy executable"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="Parset containing configuration for CASAPY"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '-t', '--increment',
+            dest="increment",
+            help="Length of each image in seconds"
+        )
+        self.optionparser.add_option(
+            '-g', '--g(v)ds-file',
+            dest="gvds",
+            help="G(V)DS file describing data to be processed"
+        )
+        self.optionparser.add_option(
+            '--makevds-exec',
+            dest="makevds_exec",
+            help="makevds executable"
+        )
+        self.optionparser.add_option(
+            '--combinevds-exec',
+            dest="combinevds_exec",
+            help="combinevds executable"
+        )
+        self.optionparser.add_option(
+            '--max-bands-per-node',
+            dest="max_bands_per_node",
+            help="Maximum number of subbands to farm out to a given cluster node",
+            default="8"
+        )
+
+    def go(self):
+        self.logger.info("Starting CASApy run")
+        super(casapy, self).go()
+
+        job_directory = self.config.get("layout", "job_directory")
+
+        # Connect to the IPython cluster and initialise it with
+        # the funtions we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_casapy=run_casapy,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Use build_available_list() to determine which SBs are available
+        # on each engine; we use this for dependency resolution later.
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=self.inputs['args']))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+
+        for data_group in group_files(
+            self.logger,
+            clusterdesc,
+            os.path.join(self.inputs['working_directory'], self.inputs['job_name']),
+            int(self.inputs['max_bands_per_node']),
+            self.inputs['args']
+        ):
+            self.logger.debug("Processing: " + str(data_group))
+            self.logger.info("Calling vdsmaker")
+            inputs = LOFARinput(self.inputs)
+            inputs['directory'] = self.config.get('layout', 'vds_directory')
+            inputs['gvds'] = self.inputs['gvds']
+            inputs['args'] = data_group
+            inputs['makevds'] = self.inputs['makevds_exec']
+            inputs['combinevds'] = self.inputs['combinevds_exec']
+            outputs = LOFARoutput()
+            if self.cook_recipe('vdsmaker', inputs, outputs):
+                self.logger.warn("vdsmaker reports failure")
+                return 1
+
+
+            gvds = utilities.get_parset(
+                os.path.join(
+                    self.config.get('layout', 'vds_directory'), self.inputs['gvds']
+                )
+            )
+            start_time = gvds['StartTime']
+            end_time = gvds['EndTime']
+            self.inputs['increment'] = int(self.inputs['increment'])
+
+            # clusterlogger context manager accepts networked logging
+            # from compute nodes.
+            with clusterlogger(self.logger) as (loghost, logport):
+                # Timer for total casapy job execution
+                with utilities.log_time(self.logger):
+                    self.logger.debug("Logging to %s:%d" % (loghost, logport))
+                    tasks = []
+                    # Iterate over SB names, building and scheduling a casapy job
+                    # for each one.
+                    for ms_name in data_group:
+                        task = LOFARTask(
+                            "result = run_casapy(infile, parset, start_time, end_time, increment)",
+                            push=dict(
+                                infile=ms_name,
+                                parset=self.inputs['parset'],
+                                start_time=start_time,
+                                end_time=end_time,
+                                increment=self.inputs['increment'],
+                                loghost=loghost,
+                                logport=logport
+                            ),
+                            pull="result",
+                            depend=utilities.check_for_path,
+                            dependargs=(ms_name, available_list)
+                        )
+                        self.logger.info("Scheduling processing of %s" % (ms_name,))
+                        if self.inputs['dry_run'] == "False":
+                            self.inputs['dry_run'] = False
+                        if not self.inputs['dry_run']:
+                            tasks.append(tc.run(task))
+                        else:
+                            self.logger.info("Dry run: scheduling skipped")
+
+                    # Wait for all jobs to finish
+                    self.logger.info("Waiting for all CASApy tasks to complete")
+                    tc.barrier(tasks)
+
+            failure = False
+            for task in tasks:
+                ##### Print failing tasks?
+                ##### Abort if all tasks failed?
+                res = tc.get_task_result(task)
+                if res.failure:
+                    self.logger.warn("Task %s failed" % (task))
+                    self.logger.warn(res)
+                    self.logger.warn(res.failure.getTraceback())
+                    failure = True
+
+        if failure:
+            return 1
+#            self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(casapy().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/collector.py b/CEP/Pipeline/recipes/sip/master/deprecated/collector.py
new file mode 100644
index 00000000000..5d87ba4845c
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/collector.py
@@ -0,0 +1,140 @@
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterdesc import ClusterDesc, get_compute_nodes
+import lofarpipe.support.utilities as utilities
+import pyrap.images
+from skim.main import run as create_hdf5
+import os, os.path, glob, subprocess, sys, numpy
+import shutil, errno, re, logging, imp
+
+class collector(LOFARrecipe):
+    """
+    Collect images into results directory.
+    Convert to fits files.
+    Average.
+
+    Outstanding issue: breaks if the results directory is already
+    populated and the --clobber option isn't set.
+    """
+
+    def __init__(self):
+        super(collector, self).__init__()
+        self.optionparser.add_option(
+            '--image-re',
+            dest="image_re",
+            help="Regular expression to match CASA image names",
+        )
+        self.optionparser.add_option(
+            '--working-directory',
+            dest="working_directory",
+            help="Working directory containing images on compute nodes",
+        )
+        self.optionparser.add_option(
+            '--image2fits',
+            dest="image2fits",
+            help="Location of image2fits tool (from casacore)"
+        )
+        self.optionparser.add_option(
+            '--averaged-name',
+            dest="averaged_name",
+            help="Base filename for averaged images"
+        )
+
+    def go(self):
+        self.logger.info("Starting data collector run")
+        super(collector, self).go()
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+        results_dir = self.config.get('layout', 'results_directory')
+        try:
+            os.makedirs(results_dir)
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        self.logger.debug("Copying CASA images to to %s"  % (results_dir))
+        for node in get_compute_nodes(clusterdesc):
+            self.logger.debug("Node: %s" % (node))
+            try:
+                exec_string = [
+                            "ssh",
+                            node,
+                            "--",
+                            "cp",
+                            "-r",
+                            "%s/%s/%s" % (
+                                self.inputs['working_directory'],
+                                self.inputs['job_name'],
+                                self.inputs['image_re']
+                            ),
+                            results_dir
+                    ]
+                self.logger.info(exec_string)
+                subprocess.check_call(exec_string, close_fds=True)
+            except subprocess.CalledProcessError:
+                self.logger.warn("No images moved from %s" % (node))
+        
+        image_names = glob.glob("%s/%s" % (results_dir, self.inputs['image_re']))
+        if len(image_names) > 0:
+            self.logger.info("Averaging results")
+            result = reduce(
+                numpy.add,
+                (pyrap.images.image(file).getdata() for file in image_names)
+            ) / len(image_names)
+
+            self.logger.info("Writing averaged files")
+            averaged_file = os.path.join(
+                        self.config.get('layout', 'results_directory'),
+                        self.inputs['averaged_name']
+            )
+            # Output for the averaged image.
+            # Use the coordinate system from SB0.
+            output = pyrap.images.image(
+                averaged_file + ".img", values=result,
+                coordsys=pyrap.images.image(image_names[0]).coordinates()
+            )
+            self.logger.info("Wrote: %s" % (averaged_file + ".img",))
+            output.tofits(averaged_file + ".fits")
+            self.logger.info("Wrote: %s" % (averaged_file + ".fits",))
+            self.outputs['data'] = (averaged_file + ".fits",)
+        else:
+            self.logger.info("No images found; not averaging")
+            self.outputs['data'] = None
+
+        self.logger.info("Generating FITS files")
+        fits_files = []
+        for filename in image_names:
+            self.logger.debug(filename)
+            subband = re.search('(SB\d+)', os.path.basename(filename)).group()
+            output = os.path.join(
+                self.config.get('layout', 'results_directory'),
+                "%s.fits" % (subband)
+            )
+            fits_files.append(output)
+            subprocess.check_call(
+                [
+                    self.inputs['image2fits'],
+                    'in=%s' % (filename),
+                    'out=%s' % (output)
+                ],
+                stdout=subprocess.PIPE,
+                stderr=subprocess.STDOUT,
+                close_fds=True
+            )
+
+        self.logger.info("Creating HDF5 file")
+        hdf5logger = logging.getLogger(self.logger.name + ".hdf5")
+        hdf5logger.setLevel(logging.INFO)
+        create_hdf5(
+            self.config.get('layout', 'job_directory'),
+            self.inputs['start_time'],
+            hdf5logger
+        )
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(collector().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py b/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py
new file mode 100644
index 00000000000..cf4bf423b56
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/colmaker.py
@@ -0,0 +1,67 @@
+from __future__ import with_statement
+import sys, os, tempfile
+
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+
+def make_columns(file):
+    from lofarrecipe.nodes.colmaker import makecolumns_node
+    return makecolumns_node(loghost=loghost, logport=logport).run(file)
+
+class colmaker(LOFARrecipe):
+    """
+    Add imaging columns to inputs using pyrap.
+    """
+    def go(self):
+        super(colmaker, self).go()
+
+        ms_names = self.inputs['args']
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                make_columns=make_columns,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Build VDS files for each of the newly created MeasurementSets
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "colmaker")
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+        tasks = []
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            for ms_name in ms_names:
+                task = LOFARTask(
+                    "result = make_columns(ms_name)",
+                    push=dict(
+                        ms_name=ms_name,
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all colmaker tasks to complete")
+            tc.barrier(tasks)
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                print res.failure
+
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+
+if __name__ == '__main__':
+    sys.exit(colmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/copier.py b/CEP/Pipeline/recipes/sip/master/deprecated/copier.py
new file mode 100644
index 00000000000..47434b3d242
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/copier.py
@@ -0,0 +1,50 @@
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+import os.path
+
+class copier(LOFARrecipe):
+    """
+    Copy files to compute nodes.
+    """
+    def __init__(self):
+        super(copier, self).__init__()
+        self.optionparser.add_option(
+            '--destination',
+            dest="destination",
+            help="Destination directory on compute nodes"
+        )
+
+    def go(self):
+        self.logger.info("Starting copier run")
+        super(copier, self).go()
+
+        tc, mec = self._get_cluster()
+
+        mec.execute('import shutil')
+
+        self.logger.info("Compiling list of output destinations")
+        destinations = [
+            os.path.join(
+                self.inputs['destination'],
+                os.path.basename(file)
+            )
+            for file in self.inputs['args']
+        ]
+        self.logger.debug(destinations)
+
+        self.logger.info("Copying files on cluster")
+        try:
+            tc.map(
+                lambda x: shutil.copytree(x[0], x[1]),
+                zip(self.inputs['args'], destinations)
+            )
+        except Exception, e:
+            self.logger.exception('Failed to copy files on cluster')
+            return 1
+
+        self.outputs['ms_names'] = destinations
+
+        return 0
+
+
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py b/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py
new file mode 100644
index 00000000000..71795699c67
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/dppp.py
@@ -0,0 +1,151 @@
+from __future__ import with_statement
+import sys, os
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class dppp(LOFARrecipe):
+    def __init__(self):
+        super(dppp, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="DPPP executable"
+        )
+        self.optionparser.add_option(
+            '--initscript',
+            dest="initscript",
+            help="DPPP initscript"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="Parset containing configuration for DPPP"
+        )
+        self.optionparser.add_option(
+            '--suffix',
+            dest="suffix",
+            default=".dppp",
+            help="Suffix to add to trimmed data (default: overwrite existing)"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--data-start-time',
+            help="Start time to be passed to DPPP (optional)",
+        )
+        self.optionparser.add_option(
+            '--data-end-time',
+            help="End time to be passed to DPPP (optional)",
+        )
+        self.optionparser.add_option(
+            '--nthreads',
+            help="Number of threads per (N)DPPP process",
+            default="2"
+        )
+
+
+    def go(self):
+        self.logger.info("Starting DPPP run")
+        super(dppp, self).go()
+
+        job_directory = self.config.get("layout", "job_directory")
+        ms_names = self.inputs['args']
+
+        # Connect to the IPython cluster and initialise it with
+        # the funtions we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_dppp=run_node,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.debug("Pushed functions to cluster")
+
+        # Use build_available_list() to determine which SBs are available
+        # on each engine; we use this for dependency resolution later.
+        self.logger.debug("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        self.logger.debug("Data lists available. Starting processing loop.")
+
+        # clusterlogger context manager accepts networked logging
+        # from compute nodes.
+        with clusterlogger(self.logger) as (loghost, logport):
+            # Timer for total DPPP job execution
+            with utilities.log_time(self.logger):
+                self.logger.debug("Logging to %s:%d" % (loghost, logport))
+                tasks = []
+                outnames = []
+                # Iterate over SB names, building and scheduling a DPPP job
+                # for each one.
+                for ms_name in ms_names:
+                    outnames.append(
+                        os.path.join(
+                            self.inputs['working_directory'],
+                            self.inputs['job_name'],
+                            os.path.basename(ms_name) + self.inputs['suffix']
+                        )
+                    )
+                    task = LOFARTask(
+                        "result = run_dppp(ms_name, ms_outname, parset, executable, initscript, start_time, end_time, nthreads)",
+                        push=dict(
+                            recipename=self.name,
+                            nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                            ms_name=ms_name,
+                            ms_outname=outnames[-1],
+                            parset=self.inputs['parset'],
+                            executable=self.inputs['executable'],
+                            initscript=self.inputs['initscript'],
+                            start_time=self.inputs['data_start_time'],
+                            end_time=self.inputs['data_end_time'],
+                            end_time=self.inputs['nthreads'],
+                            loghost=loghost,
+                            logport=logport
+                        ),
+                        pull="result",
+                        depend=utilities.check_for_path,
+                        dependargs=(ms_name, available_list)
+                    )
+                    self.logger.info("Scheduling processing of %s" % (ms_name,))
+                    if self.inputs['dry_run'] == "False":
+                        self.inputs['dry_run'] = False
+                    if not self.inputs['dry_run']:
+                        tasks.append((tc.run(task), ms_name))
+                    else:
+                        self.logger.info("Dry run: scheduling skipped")
+
+                # Wait for all jobs to finish
+                self.logger.debug("Waiting for all DPPP tasks to complete")
+                tc.barrier([task for task, subband in tasks])
+
+        failure = False
+        for task, subband in tasks:
+            ##### Print failing tasks?
+            ##### Abort if all tasks failed?
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed (processing %s)" % (task, subband))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+        self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(dppp().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py b/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py
new file mode 100644
index 00000000000..efab8383573
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/dummy_echo_parallel.py
@@ -0,0 +1,63 @@
+from __future__ import with_statement
+import sys, os
+
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class dummy_echo_parallel(LOFARrecipe):
+    def __init__(self):
+        super(dummy_echo_parallel, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="Executable to be run (ie, dummy_echo.sh)",
+            default="/home/swinbank/sw/bin/dummy_echo.sh"
+        )
+
+    def go(self):
+        self.logger.info("Starting dummy_echo run")
+        super(dummy_echo_parallel, self).go()
+
+        # Connect to the IPython cluster and initialise it with the functions
+        # we need.
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_dummy_echo=run_node,
+            )
+        )
+        self.logger.info("Cluster initialised")
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = [] # this will be a list of scheduled jobs
+            for filename in self.inputs['args']:
+                task = LOFARTask(
+                    "result = run_dummy_echo(filename, executable)",
+                    push = dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        filename=filename,
+                        executable=self.inputs['executable'],
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result"
+                )
+                self.logger.info("Scheduling processing of %s" % (filename))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all dummy_echo tasks to complete")
+            tc.barrier(tasks)
+
+        for task in tasks:
+            result = tc.get_task_result(task)
+            if result.failure:
+                self.logger.warn(result)
+                self.logger.warn(result.failure.getTraceback())
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(dummy_echo_parallel().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py b/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py
new file mode 100644
index 00000000000..7c023ea6c11
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/excluder.py
@@ -0,0 +1,18 @@
+import sys
+from lofarpipe.support.pyraprunner import pyraprunner
+from lofarpipe.support.utilities import string_to_list
+
+class excluder(pyraprunner):
+    def __init__(self):
+        super(excluder, self).__init__()
+        self.optionparser.add_option(
+            '--station',
+            dest="station",
+            help="Name of stations to exclude (e.g. DE001LBA)"
+        )
+
+    def _generate_arguments(self):
+        return "\"%s\"" % ('\", \"'.join(string_to_list(self.inputs['station'])))
+
+if __name__ == '__main__':
+    sys.exit(excluder().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py b/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py
new file mode 100644
index 00000000000..3a303aead78
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/flagger.py
@@ -0,0 +1,68 @@
+from __future__ import with_statement
+import sys, os
+from lofarpipe.support.pyraprunner import pyraprunner
+
+# Quick n dirty tool to read ASCII sourcelists used as input by BBS.
+# The format for these doesn't seem all that well specified: see the
+# makesourcedb tool, which vaguely refers to a format string which might have
+# spaces, commas, ...
+# We'll do our best.
+
+class Source(dict):
+    pass
+
+class SourceList(list):
+    def __init__(self, filename):
+        # Default format if we can't read one from the file
+        format = (
+            "Name", "Type", "Ra", "Dec", "I", "Q", "U", "V",
+            "ReferenceFrequency='60e6'", "SpectralIndexDegree='0'",
+            "SpectralIndex:0='0.0'", "Major", "Minor", "Phi"
+        )
+        with open(filename, 'r') as file:
+            try:
+                # Maybe the first line is a comma-separated format string...
+                first_line = file.readline().strip().split()
+                if first_line.split()[-1] == "format":
+                    format = map(str.strip, first_line[3:-10].split(","))
+                else:
+                    raise
+            except:
+                # ...or maybe not.
+                file.seek(0)
+            for line in file:
+                if len(line.strip()) == 0 or line.strip()[0] == '#': continue
+                data = map(str.strip, line.split(','))
+                self.append(Source(zip(format, data)))
+
+class flagger(pyraprunner):
+    def __init__(self):
+        super(flagger, self).__init__()
+        self.optionparser.add_option(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="initial sky model (in makesourcedb format)"
+        )
+        self.optionparser.add_option(
+            '--n-factor',
+            dest="n_factor",
+            type="float",
+            help="Custom factor for flagging threshold"
+        )
+
+    def _generate_arguments(self):
+        self.inputs['skymodel'] = os.path.join(
+            self.config.get("layout", "parset_directory"),
+            self.inputs['skymodel']
+        )
+        self.logger.info("Using %s for %s skymodel" %
+            (self.inputs['skymodel'], "flagger")
+        )
+        if not os.access(self.inputs['skymodel'], os.R_OK):
+            raise IOError
+
+        sl = SourceList(self.inputs['skymodel'])
+        return float(self.inputs['n_factor']) * sum(float(s['I']) for s in sl)
+
+if __name__ == '__main__':
+    sys.exit(flagger().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py b/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py
new file mode 100644
index 00000000000..ec28e97b68b
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/mwimager.py
@@ -0,0 +1,214 @@
+from __future__ import with_statement
+import sys, os, tempfile, glob, subprocess, itertools
+from contextlib import closing
+from lofarpipe.support.clusterdesc import ClusterDesc
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+from lofarpipe.support.group_data import group_files
+import lofarpipe.support.utilities as utilities
+
+class mwimager(LOFARrecipe):
+    def __init__(self):
+        super(mwimager, self).__init__()
+        self.optionparser.add_option(
+            '--executable',
+            dest="executable",
+            help="Executable to be run (ie, mwimager script)"
+        )
+        self.optionparser.add_option(
+            '--initscript',
+            dest="initscript",
+            help="Initscript to source (ie, lofarinit.sh)"
+        )
+        self.optionparser.add_option(
+            '-g', '--g(v)ds-file',
+            dest="gvds",
+            help="G(V)DS file describing data to be processed"
+        )
+        self.optionparser.add_option(
+            '-p', '--parset',
+            dest="parset",
+            help="MWImager configuration parset"
+        )
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--log',
+            dest="log",
+            help="Log file"
+        )
+        self.optionparser.add_option(
+            '--askapsoft-path',
+            dest="askapsoft_path",
+            help="Path to cimager.sh"
+        )
+        self.optionparser.add_option(
+            '--casa',
+            dest="casa",
+            help="Use the CASA lwimager",
+            action="store_true"
+        )
+        self.optionparser.add_option(
+            '--makevds-exec',
+            dest="makevds_exec",
+            help="makevds executable"
+        )
+        self.optionparser.add_option(
+            '--combinevds-exec',
+            dest="combinevds_exec",
+            help="combinevds executable"
+        )
+        self.optionparser.add_option(
+            '--max-bands-per-node',
+            dest="max_bands_per_node",
+            help="Maximum number of subbands to farm out to a given cluster node",
+            default="8"
+        )
+
+    def go(self):
+        self.logger.info("Starting MWImager run")
+        super(mwimager, self).go()
+
+        clusterdesc = ClusterDesc(
+            self.config.get('cluster', 'clusterdesc')
+        )
+
+        self.outputs["data"] = []
+
+        # Given a limited number of processes per node, the first task is to
+        # partition up the data for processing.
+        for iteration, data_group in enumerate(group_files(
+            self.logger,
+            clusterdesc,
+            os.path.join(self.inputs['working_directory'], self.inputs['job_name']),
+            int(self.inputs['max_bands_per_node']),
+            self.inputs['args']
+        )):
+            self.logger.info("Calling vdsmaker")
+            vds_file = os.path.join(
+                self.config.get("layout", "vds_directory"), self.inputs['gvds']
+            )
+            self.run_task('vdsmaker', data_group, gvds=vds_file, unlink=False)
+
+            # Patch GVDS filename into parset
+            self.logger.debug("Setting up MWImager configuration")
+            temp_parset_filename = utilities.patch_parset(
+                self.inputs['parset'],
+                {
+                    'dataset': os.path.join(
+                        self.config.get('layout', 'vds_directory'), self.inputs['gvds']
+                    )
+                },
+                self.config.get('layout', 'parset_directory')
+            )
+
+            # Individual subband logs go in a temporary directory
+            # to be sorted out later.
+            log_root = os.path.join(tempfile.mkdtemp(), self.inputs['log'])
+            self.logger.debug("Logs dumped with root %s" % (log_root))
+
+            # Initscript for basic LOFAR utilities
+            env = utilities.read_initscript(self.inputs['initscript'])
+            # Also add the path for cimager.sh
+            env['PATH'] = "%s:%s" % (self.inputs['askapsoft_path'], env['PATH'])
+
+            # For the overall MWimgager log
+            log_location = "%s/%s" % (
+                self.config.get('layout', 'log_directory'),
+                self.inputs['log']
+            )
+            self.logger.debug("Logging to %s" % (log_location))
+
+            mwimager_cmd = [
+                self.inputs['executable'],
+                temp_parset_filename,
+                self.config.get('cluster', 'clusterdesc'),
+                os.path.join(
+                    self.inputs['working_directory'],
+                    self.inputs['job_name']
+                ),
+                log_root
+            ]
+            if self.inputs['casa'] is True or self.inputs['casa'] == "True":
+                mwimager_cmd.insert(1, '-casa')
+            try:
+                self.logger.info("Running MWImager")
+                self.logger.debug("Executing: %s" % " ".join(mwimager_cmd))
+                if not self.inputs['dry_run']:
+                    with utilities.log_time(self.logger):
+                        with closing(open(log_location + '-' + str(iteration), 'w')) as log:
+                            result = subprocess.check_call(
+                                mwimager_cmd,
+                                env=env,
+                                stdout=log,
+                                stderr=log,
+                                close_fds=True
+                            )
+                else:
+                    self.logger.info("Dry run: execution skipped")
+                    result = 0
+            except subprocess.CalledProcessError:
+                self.logger.exception("Call to mwimager failed")
+                result = 1
+            finally:
+                os.unlink(temp_parset_filename)
+
+            # Now parse the log files to:
+            # 1: find the name of the images that have been written
+            # 2: save the logs in appropriate places
+            # This is ugly!
+            self.logger.info("Parsing logfiles")
+            for log_file in glob.glob("%s%s" % (log_root, "*")):
+                self.logger.debug("Processing %s" % (log_file))
+                ms_name, image_name = "", ""
+                with closing(open(log_file)) as file:
+                    for line in file.xreadlines():
+                        if 'Cimager.Images.Names' in line.strip():
+                            try:
+                                image_name = line.strip().split("=")[1].lstrip("['").rstrip("]'")
+                                break
+                            except IndexError:
+                                pass
+                    file.seek(0)
+                    for line in file.xreadlines():
+                        split_line = line.split('=')
+                        if split_line[0] == "Cimager.dataset":
+                            ms_name = os.path.basename(split_line[1].rstrip())
+                            break
+                if not image_name:
+                    self.logger.info("Couldn't identify image for %s "% (log_file))
+                else:
+                    self.logger.debug("Found image: %s" % (image_name))
+                    self.outputs["data"].append(image_name)
+                if not ms_name:
+                    self.logger.info("Couldn't identify file for %s" % (log_file))
+                else:
+                    destination = "%s/%s/%s" % (
+                        self.config.get('layout', 'log_directory'),
+                        ms_name,
+                        self.inputs['log']
+                    )
+                    self.logger.debug(
+                        "Moving logfile %s to %s" % (log_file, destination)
+                    )
+                    utilities.move_log(log_file, destination)
+            try:
+                self.logger.debug("Removing temporary log directory")
+                os.rmdir(os.path.dirname(log_root))
+            except OSError, failure:
+                self.logger.info("Failed to remove temporary directory")
+                self.logger.debug(failure)
+                try:
+                    utilities.move_log(os.path.dirname(log_root), log_location)
+                except:
+                    pass
+
+        return result
+
+if __name__ == '__main__':
+    sys.exit(mwimager().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py b/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py
new file mode 100644
index 00000000000..e09b5560208
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/pyraprunner.py
@@ -0,0 +1,98 @@
+from __future__ import with_statement
+import sys, os
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class pyraprunner(LOFARrecipe):
+    """
+    Provides all the basic infrastructure for applying a pyrap-based filter to
+    code on the cluster, distributed using an IPython task client.
+    """
+    def __init__(self):
+        super(pyraprunner, self).__init__()
+        self.optionparser.add_option(
+            '--suffix',
+            dest="suffix",
+            help="Suffix to add to trimmed data (default: overwrite existing)"
+        )
+
+    def _generate_arguments(self):
+        return ''
+
+    def go(self):
+        super(pyraprunner, self).go()
+
+        ms_names = self.inputs['args']
+
+        tc, mec = self._get_cluster()
+        function_name = self.__class__.__name__ + "_remote"
+        mec.push_function(
+            {
+                function_name: run_node,
+                "build_available_list": utilities.build_available_list,
+                "clear_available_list": utilities.clear_available_list
+            }
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (
+            self.inputs['job_name'], self.__class__.__name__
+        )
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            outnames = []
+            for ms_name in ms_names:
+                outnames.append(ms_name + self.inputs['suffix'])
+                execute_string = "result = %s(ms_name, \"%s\", %s)" % (
+                    function_name, outnames[-1], self._generate_arguments()
+                )
+                task = LOFARTask(
+                    execute_string,
+                    push=dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        ms_name=ms_name,
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info(
+                "Waiting for all %s tasks to complete" %
+                (self.__class__.__name__)
+            )
+            tc.barrier(tasks)
+
+
+        failure = False
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed" % (task))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+        self.outputs['data'] = outnames
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(pyraprunner().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py
new file mode 100644
index 00000000000..567f6873875
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck.py
@@ -0,0 +1,91 @@
+from __future__ import with_statement
+import sys, os
+
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.clusterlogger import clusterlogger
+
+def run_qcheck(infile, pluginlist, outputdir):
+    from lofarrecipe.nodes.qcheck import qcheck_node
+    return qcheck_node(loghost=loghost, logport=logport).run(
+        infile,
+        pluginlist,
+        outputdir
+    )
+
+class qcheck(LOFARrecipe):
+    def __init__(self):
+        super(qcheck, self).__init__()
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+        self.optionparser.add_option(
+            '--plugins',
+            dest="plugins",
+            help="[Expert use] Quality check plugins"
+        )
+
+    def go(self):
+        super(qcheck, self).go()
+        self.logger.info("Quality check system starting")
+
+        self.outputs['data'] = [
+            os.path.join(
+                self.inputs['working_directory'], self.inputs['job_name'], filename
+            )
+            for filename in self.inputs['args']
+        ]
+        plugins = utilities.string_to_list(self.inputs['plugins'])
+        self.logger.info("Using plugins: %s" % (str(plugins)))
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                run_qcheck=run_qcheck,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "qcheck")
+        mec.push(dict(filenames=self.outputs['data']))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            for image_name in self.outputs['data']:
+                task = LOFARTask(
+                    "result = run_qcheck(infile, pluginlist, outputdir)",
+                    push=dict(
+                        infile=image_name,
+                        pluginlist=plugins,
+                        outputdir=self.config.get('layout', 'results_directory'),
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(image_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (image_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all qcheck tasks to complete")
+            tc.barrier(tasks)
+
+            for task in tasks:
+                tc.get_task_result(task)
+
+            mec.execute("clear_available_list(\"%s\")" % (available_list,))
+            self.logger.info("qcheck done")
+
+if __name__ == '__main__':
+    sys.exit(eval(os.path.splitext(os.path.basename(sys.argv[0]))[0])().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README
new file mode 100644
index 00000000000..e7152d38e65
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/README
@@ -0,0 +1,4 @@
+Simple image quality check, designed for use in (deprecated) pipeline qcheck
+recipe.
+
+Original by Evert Rol.
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/__init__.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py
new file mode 100644
index 00000000000..eb99a3cf642
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/qcheck/qcheck.py
@@ -0,0 +1,200 @@
+from pyrap import tables as ptables
+import numpy
+import logging
+import sys
+import os.path
+import pylab
+from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+from matplotlib.figure import Figure
+
+
+DEFAULTS = {}
+DEFAULTS['fieldnames'] = ['logtable', 'coords', 'units']
+DEFAULTS['colnames'] = ['map']
+IMAGEDIMS = {'y': 3, 'x': 4, 'polarization': 2, 'channel': 1}
+
+"""
+
+TODO:
+
+  - check for NaNs
+
+"""
+
+
+
+def check_basics(image, loggers):
+    logger = loggers["main"]
+    assert image.ndim == 5, "image does not have 5 dimensions"
+
+
+def clip_image(image, niter=0, clip=(-3, 3)):
+    if niter > 0:
+        mean = image.mean()
+        sigma = numpy.sqrt(image.var())
+        return clip_image(image[(image > mean+clip[0]*sigma)&(image < mean+clip[1]*sigma)],
+                   niter=niter-1, clip=clip)
+    return image
+
+
+def check_stats(image, filename, loggers, plot=False):
+    BINCOUNTS = 1000
+    MINNBINS = 20
+    CLIPCOUNTS = 0.001
+    NSIGMA = 0.1
+    NMAXSIGMA = 10.0
+    POLAXIS = ['I', 'Q', 'U', 'V']
+    if plot:
+        figure = Figure()
+        canvas = FigureCanvas(figure)
+    npols = image.shape[IMAGEDIMS['polarization']]
+    nchannels = image.shape[IMAGEDIMS['channel']]
+    nsubplotrows = nsubplotcols = int(numpy.sqrt(npols))
+    if nsubplotcols * nsubplotrows < npols:
+        nsubplotrows += 1
+    for npol in range(npols):
+        if plot:
+            axes = figure.add_subplot(nsubplotrows, nsubplotcols, npol+1)
+        for nchan in range(nchannels):
+            twodimage = image[0, nchan, npol, :, :]
+            flatimage = twodimage.flatten()
+            mean = flatimage.mean()
+            variance = flatimage.var()
+            stddev = numpy.sqrt(variance)
+            median = numpy.median(flatimage)
+            imgmin, imgmax = min(flatimage), max(flatimage)
+            loggers["main"].info("%d.%d.minimum = %.4e" % (npol+1, nchan+1,
+                                                        imgmin))
+            loggers["main"].info("%d.%d.maximum = %.4e" % (npol+1, nchan+1, 
+                                                        imgmax))
+            loggers["main"].info("%d.%d.mean = %.4e" % (npol+1, nchan+1, mean))
+            loggers["main"].info("%d.%d.median = %.4e" % (
+                    npol+1, nchan+1, median))
+            loggers["main"].info("%d.%d.Standard deviation = %.4e" % (
+                    npol+1, nchan+1,stddev))
+            # Keep only the bins with a minimum number of counts,
+            # so we can 'fit' a Gaussian distribution to calculate the mode
+            nbins = (int(flatimage.size/BINCOUNTS)
+                     if flatimage.size > 1e5 else MINNBINS)
+            counts, bins = numpy.histogram(flatimage, nbins)
+            clipped = {}
+            clipped['indices'] = counts > max(counts)*CLIPCOUNTS
+            clipped['counts'] = counts[clipped['indices']]
+            clipped['bins'] = bins[clipped['indices']]
+            if plot:
+                axes.plot(bins[numpy.invert(clipped['indices'])],
+                          counts[numpy.invert(clipped['indices'])], 'ob')
+            clippedimage = flatimage[(flatimage >= min(clipped['bins'])) &
+                                     (flatimage <= max(clipped['bins']))]
+            nbins = (int(clippedimage.size/BINCOUNTS)
+                     if clippedimage.size > 1e5 else MINNBINS)
+            counts, bins = numpy.histogram(clippedimage, nbins)
+            bins = (bins[0:-1] + bins[1:])/2.  # new behaviour in numpy 1.2
+            mode = sum(bins * counts)/sum(counts)
+            width = (numpy.sqrt(abs(sum( (bins - mode)**2 * counts) /
+                                    sum(counts))))
+            loggers["main"].info("%d.%d.mode = %.4e" % (npol+1, nchan+1, mode))
+            # Also calculate the statistics for a clipped image, ie
+            # only the background (no sources)
+            clippedimage = clip_image(flatimage, niter=3, clip=(-1, 1))
+            mean = clippedimage.mean()
+            variance = clippedimage.var()
+            stddev = numpy.sqrt(variance)
+            median = numpy.median(clippedimage)
+            imgmin, imgmax = min(clippedimage), max(clippedimage)
+            loggers["main"].info("%d.%d.background-minimum = %.4e" % (
+                    npol+1, nchan+1, imgmin))
+            loggers["main"].info("%d.%d.background-maximum = %.4e" % (
+                    npol+1, nchan+1, imgmax))
+            loggers["main"].info("%d.%d.background-mean = %.4e" % (
+                    npol+1, nchan+1, mean))
+            loggers["main"].info("%d.%d.background-median = %.4e" % (
+                    npol+1, nchan+1, median))
+            loggers["main"].info("%d.%d.background-stddev = %.4e" % (
+                    npol+1, nchan+1, stddev))
+            # Verify that mode, background mean & background median are within
+            # a few background sigma from each other:
+            if abs(mean-median) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                        " Background mean and background median are more "
+                        "than %.1f standard deviations different" % NSIGMA)
+            if abs(mean-mode) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Mode and background mean are more than %.1f "
+                    "standard deviations different" % NSIGMA)
+            if abs(mode-median) > NSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Mode and background median are more than %.1f "
+                    "standard deviations different" % NSIGMA)
+            if imgmax < 0:
+                loggers["warn"].warn(" Background maximum is negative")
+            if imgmin > 0:
+                loggers["warn"].warn(" Background minimum is positive")
+            if imgmax > NMAXSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Background maximum is more than %.1f the "
+                    "standard deviation")
+            if imgmin < -NMAXSIGMA*stddev:
+                loggers["warn"].warn(
+                    " Background minimum is less than %.1f the "
+                    "standard deviation")
+    
+            if plot:
+                axes.plot(bins, counts, 'ob')
+                axes.plot(bins, max(counts) * numpy.exp(-(bins-mode)**2 /
+                                                         (2 * width**2)), '-g')
+    if plot:
+        canvas.print_figure(plot)
+
+    
+def setup_logging(logfile):
+    loggers = {'main': logging.getLogger('main'),
+               'warn': logging.getLogger('warn')}
+    handlers = {'main': logging.FileHandler(logfile, mode="w"),
+                'warn': logging.StreamHandler()}
+    formatters = {'main': logging.Formatter("%(message)s"),
+                  'warn': logging.Formatter("%(levelname)s: %(message)s")}
+    handlers['main'].setFormatter(formatters['main'])
+    handlers['warn'].setFormatter(formatters['warn'])
+    loggers['main'].addHandler(handlers['main'])
+    loggers['warn'].addHandler(handlers['warn'])
+    loggers['main'].setLevel(logging.INFO)
+    loggers['warn'].setLevel(logging.WARNING) # warnings only
+    return loggers
+
+    
+def run(filename, logfile=None, plot=False, outputdir=False, loggers=False):
+    if not logfile:
+        logfile = filename + "_stats.log"
+    if not isinstance(plot, basestring):
+        plot = filename + "_histo.pdf"
+    if outputdir:
+        plot = os.path.join(outputdir, os.path.basename(plot))
+    if not loggers:
+        loggers = setup_logging(logfile)
+    try:
+        table = ptables.table(filename, ack=False)
+    except RuntimeError:  # pyrap is just a wrapper around C++, so no proper exceptions are thrown
+        loggers['main'].error("Error: image %s not properly opened" % filename)
+        return
+    names = {}
+    for part in ('col', 'field'):
+        partname = part + 'names'
+        names[part] = table.__getattribute__(partname)()
+        for defaultname in DEFAULTS[partname]:
+            if defaultname not in names[part]:
+                # use 'warn' logger instead? 
+                # But script can't continue with this fault,
+                # so should quit
+                raise KeyError("%s not in %snames" % (defaultname, part))
+    imgcol = table.col('map')
+    image = imgcol.getcol()
+    check_basics(image, loggers)
+    check_stats(image, filename, loggers, plot=plot)
+
+
+if __name__ == '__main__':
+    args = sys.argv[1:]
+    if len(args) != 1:
+        sys.exit(1)
+    run(args[0], plot=True)
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py b/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py
new file mode 100644
index 00000000000..e4f566c383a
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/sextractor.py
@@ -0,0 +1,92 @@
+import sys, os
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.lofaringredient import LOFARinput, LOFARoutput
+from lofarpipe.support.ipython import LOFARTask
+import lofarpipe.support.utilities as utilities
+
+from tkp_lib.dataset import DataSet
+
+def sextract(image, dataset):
+    # Run on engine to source extract
+    from lofarrecipe.nodes.sextractor import sextract
+    return sextract(image, dataset)
+
+class sextractor(LOFARrecipe):
+    def __init__(self):
+        super(sextractor, self).__init__()
+        self.optionparser.add_option(
+            '-w', '--working-directory',
+            dest="working_directory",
+            help="Working directory used on compute nodes"
+        )
+
+    def go(self):
+        self.logger.info("Starting source extraction run")
+        super(sextractor, self).go()
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                sextract=sextract,
+                build_available_list=utilities.build_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # We read the GVDS file to find the names of all the data files we're
+        # going to process, then push this list out to the engines so they can
+        # let us know which we have available
+        image_names = [
+            "%s/%s" % (self._input_or_default('working_directory'), image)
+            for image in self.inputs['args']
+        ]
+
+        # Construct list of available files on engines
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "sextractor")
+        mec.push(dict(filenames=image_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+
+        tasks = []
+
+        dataset = DataSet(self.inputs['job_name'])
+
+        for image_name in image_names:
+            task = LOFARTask(
+                "result = sextract(image_name, dataset)",
+                push=dict(
+                    image_name=image_name,
+                    dataset=dataset,
+                ),
+                pull="result",
+                depend=utilities.check_for_path,
+                dependargs=(image_name, available_list)
+            )
+            self.logger.info("Scheduling processing of %s" % (image_name,))
+            tasks.append(tc.run(task))
+        self.logger.info("Waiting for all source extraction tasks to complete")
+        tc.barrier(tasks)
+        for task in tasks:
+            ##### Print failing tasks?
+            ##### Abort if all tasks failed?
+            res = tc.get_task_result(task)
+            self.logger.info(res)
+            if res.failure:
+                print res.failure
+
+        mec.push_function(
+            dict(
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        # Save space on engines by clearing out old file lists
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+
+        self.logger.info("Source extraction done")
+
+if __name__ == '__main__':
+    sys.exit(sextractor().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py b/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py
new file mode 100644
index 00000000000..57fdec55ff2
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/simple_se.py
@@ -0,0 +1,122 @@
+from __future__ import with_statement
+from contextlib import closing
+
+# Local helpers
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+import lofarpipe.support.utilities as utilities
+import os, os.path, glob, subprocess, sys, numpy, shutil, errno, re
+
+# SE tools
+from tkp_lib.dataset   import DataSet
+from tkp_lib.image     import ImageData
+from tkp_lib.accessors import FitsFile
+from tkp_lib.dbplots   import plotAssocCloudByXSource
+from tkp_lib.dbregion  import createRegionByImage
+import tkp_lib.database as database
+
+associations = """
+SELECT
+    x1.xtrsrcid, x1.ra, x1.decl, x1.i_peak, x1.i_int, c.catname, c1.ra, c1.decl, a1.assoc_distance_arcsec
+FROM
+    extractedsources x1
+LEFT OUTER JOIN
+    assoccatsources a1 ON x1.xtrsrcid = a1.xtrsrc_id
+LEFT OUTER JOIN
+    catalogedsources c1 ON a1.assoc_catsrc_id = c1.catsrcid
+LEFT OUTER JOIN
+    catalogs c ON c.catid = c1.cat_id
+WHERE
+    image_id = %d
+ORDER BY
+    x1.I_Peak;
+"""
+
+class simple_se(LOFARrecipe):
+    """
+    Run source extraction on FITS images on the front-end.
+    Dump ds9 region files of found sources & WENSS sources.
+    Dump text file of assocations with catalogue sources.
+
+    Designed to be run e.g. on an averaged image at the end of a pipeline run.
+    """
+
+    def __init__(self):
+        super(simple_se, self).__init__()
+        self.optionparser.add_option(
+            '--detected-regions',
+            dest="detected_regions",
+            help="Filename for region file of local detections",
+            default="detected.reg"
+        )
+        self.optionparser.add_option(
+            '--wenss-regions',
+            dest="wenss_regions",
+            help="Filename for region file of WENSS detections",
+            default="wenss.reg"
+        )
+        self.optionparser.add_option(
+            '--associations',
+            dest="associations",
+            help="Filename for association list",
+            default="association.list"
+        )
+
+    def go(self):
+        self.logger.info("Starting source identification")
+        super(simple_se, self).go()
+
+        ds_name = "%s-%s" % (self.inputs['job_name'], self.inputs['start_time'])
+        self.logger.info("Creating dataset %s" % (ds_name,))
+        dataset = DataSet(ds_name)
+        src_ids = []
+        for file in self.inputs['args']:
+            self.logger.info("Processing %s" % (file,))
+            image = ImageData(FitsFile(file), dataset=dataset)
+            self.logger.info("Running source finder")
+            sr = image.sextract(det=5, anl=2)
+            with closing(database.connection()) as con:
+                self.logger.debug("Saving results to database")
+                sr.savetoDB(con)
+                self.logger.info("Generating source associations")
+                database.assocXSrc2XSrc(image.id, con)
+                database.assocXSrc2Cat(image.id, con)
+                self.logger.info("Querying for region file")
+                createRegionByImage(image.id[0], con,
+                    os.path.join(
+                        os.path.dirname(file),
+                        self.inputs['detected_regions']
+                    ), logger=self.logger
+                )
+                with closing(con.cursor()) as cur:
+                    self.logger.info("Querying for association list")
+                    my_query = associations % (image.id)
+                    self.logger.debug(my_query)
+                    cur.execute(my_query)
+                    with open(
+                        os.path.join(
+                            os.path.dirname(file),
+                            self.inputs['associations']
+                        ),
+                        'w'
+                    ) as output_file:
+                        for line in cur.fetchall():
+                            output_file.write(str(line) + '\n')
+                            src_ids.append(line[0])
+
+        # Diagnostic plot for each extracted source
+        self.logger.info("Generating associations plots")
+        # Use set to uniqify the list of src_ids
+        src_ids = list(set(src_ids))
+        with closing(database.connection()) as con:
+            for src_id in src_ids:
+                self.logger.debug("Generating associations plot for src %d" % src_id)
+                plotAssocCloudByXSource(
+                    src_id, con, os.path.dirname(self.inputs['args'][0])
+                )
+
+            self.outputs['data'] = None
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(eval(os.path.splitext(os.path.basename(sys.argv[0]))[0])().main())
+
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py b/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py
new file mode 100644
index 00000000000..71b5b16e8c0
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/trimmer.py
@@ -0,0 +1,27 @@
+import sys
+from lofarpipe.support.pyraprunner import pyraprunner
+
+class trimmer(pyraprunner):
+    def __init__(self):
+        super(trimmer, self).__init__()
+        self.optionparser.add_option(
+            '--start-seconds',
+            dest="start_seconds",
+            type="float",
+            help="Seconds to trim from start of data"
+        )
+        self.optionparser.add_option(
+            '--end-seconds',
+            dest="end_seconds",
+            type="float",
+            help="Seconds to trim from end of data"
+        )
+
+    def _generate_arguments(self):
+        return "%f, %f" %  (
+            float(self.inputs['start_seconds']),
+            float(self.inputs['end_seconds'])
+        )
+
+if __name__ == '__main__':
+    sys.exit(trimmer().main())
diff --git a/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py b/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py
new file mode 100644
index 00000000000..0df58c549ff
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/deprecated/vdsmaker.py
@@ -0,0 +1,175 @@
+from __future__ import with_statement
+import sys, os, tempfile, errno
+import subprocess
+
+import lofarpipe.support.utilities as utilities
+from lofarpipe.support.ipython import LOFARTask
+from lofarpipe.support.lofarrecipe import LOFARrecipe
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.lofarnode import run_node
+
+class vdsmaker(LOFARrecipe):
+    def __init__(self):
+        super(vdsmaker, self).__init__()
+        self.optionparser.add_option(
+            '-g', '--gvds',
+            dest="gvds",
+            help="Output file name"
+        )
+        self.optionparser.add_option(
+            '--directory',
+            dest="directory",
+            help="Directory for output files"
+        )
+        self.optionparser.add_option(
+            '--makevds',
+            dest="makevds",
+            help="makevds executable",
+            default="/opt/LofIm/daily/lofar/bin/makevds"
+        )
+        self.optionparser.add_option(
+            '--combinevds',
+            dest="combinevds",
+            help="combinevds executable",
+            default="/opt/LofIm/daily/lofar/bin/combinevds"
+        )
+        self.optionparser.add_option(
+            '--unlink',
+            help="Unlink VDS files after combining",
+            default="True"
+        )
+
+    def go(self):
+        super(vdsmaker, self).go()
+
+        ms_names = self.inputs['args']
+        if self.inputs['unlink'] == "False":
+            self.inputs['unlink'] = False
+
+        try:
+            os.makedirs(self.inputs['directory'])
+        except OSError, failure:
+            if failure.errno != errno.EEXIST:
+                raise
+
+        tc, mec = self._get_cluster()
+        mec.push_function(
+            dict(
+                make_vds=run_node,
+                build_available_list=utilities.build_available_list,
+                clear_available_list=utilities.clear_available_list
+            )
+        )
+        self.logger.info("Pushed functions to cluster")
+
+        # Build VDS files for each of the newly created MeasurementSets
+        self.logger.info("Building list of data available on engines")
+        available_list = "%s%s" % (self.inputs['job_name'], "dppp-vds")
+        mec.push(dict(filenames=ms_names))
+        mec.execute(
+            "build_available_list(\"%s\")" % (available_list,)
+        )
+        clusterdesc = self.config.get('cluster', 'clusterdesc')
+
+        with clusterlogger(self.logger) as (loghost, logport):
+            self.logger.debug("Logging to %s:%d" % (loghost, logport))
+            tasks = []
+            vdsnames = []
+            for ms_name in ms_names:
+                vdsnames.append(
+                    "%s/%s.vds" % (self.inputs['directory'], os.path.basename(ms_name))
+                )
+                task = LOFARTask(
+                    "result = make_vds(ms_name, clusterdesc, vds_name, executable)",
+                    push=dict(
+                        recipename=self.name,
+                        nodepath=os.path.dirname(self.__file__.replace('master', 'nodes')),
+                        ms_name=ms_name,
+                        vds_name=vdsnames[-1],
+                        clusterdesc=clusterdesc,
+                        executable=self.inputs['makevds'],
+                        loghost=loghost,
+                        logport=logport
+                    ),
+                    pull="result",
+                    depend=utilities.check_for_path,
+                    dependargs=(ms_name, available_list)
+                )
+                self.logger.info("Scheduling processing of %s" % (ms_name,))
+                tasks.append(tc.run(task))
+            self.logger.info("Waiting for all makevds tasks to complete")
+            tc.barrier(tasks)
+
+        # Save space on engines by clearing out old file lists
+        mec.execute("clear_available_list(\"%s\")" % (available_list,))
+        failure = False
+        for task in tasks:
+            res = tc.get_task_result(task)
+            if res.failure:
+                self.logger.warn("Task %s failed" % (task))
+                self.logger.warn(res)
+                self.logger.warn(res.failure.getTraceback())
+                failure = True
+        if failure:
+            return 1
+
+        # Combine VDS files to produce GDS
+        self.logger.info("Combining VDS files")
+        executable = self.inputs['combinevds']
+        gvds_out = self.inputs['gvds']
+        try:
+            command = [executable, gvds_out] + vdsnames
+            combineproc = subprocess.Popen(
+                command,
+                close_fds=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sour, serr = combineproc.communicate()
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(combineproc.returncode, command)
+            self.outputs['gvds'] = gvds_out
+        except subprocess.CalledProcessError, cpe:
+            self.logger.exception("combinevds failed with status %d: %s" % (cpe.returncode, serr))
+            failure = True
+        except OSError, failure:
+            self.logger.warn("Caught OSError")
+            try:
+                if failure.errno == errno.EMFILE:
+                    count = 0
+                    for x in xrange(0, os.sysconf('SC_OPEN_MAX')):
+                        try:
+                            self.logger.debug("open file %d: %s" (x, str(os.fstat(x))))
+                            count += 1
+                        except:
+                            pass
+                    self.logger.info("Had %d open files" % (count,))
+                elif failure.errno == errno.ENOMEM:
+                    self.logger.info("Failed to run: %s" % str(command))
+                    import operator
+                    total = reduce(operator.add, (len(x) for x in command))
+                    self.logger.debug("Num args: %d, num characters: %d" % (len(command), total))
+                    try:
+                        p = subprocess.Popen(['free'], stdout=subprocess.PIPE)
+                        sout, serr = p.communicate()
+                        self.logger.free(sout)
+                    except:
+                        self.logger.warn("Failed to spawn free")
+                    self.logger.exception(failure)
+                else:
+                    self.logger.exception(failure)
+            finally:
+                failure = True
+        finally:
+            if self.inputs["unlink"]:
+                self.logger.debug("Unlinking temporary files")
+                for file in vdsnames:
+                    os.unlink(file)
+            self.logger.info("vdsmaker done")
+        if failure:
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(vdsmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/flag_baseline.py b/CEP/Pipeline/recipes/sip/master/flag_baseline.py
new file mode 100644
index 00000000000..7fc16ab9c48
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/flag_baseline.py
@@ -0,0 +1,87 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                        Baseline flagger recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from tempfile import mkstemp
+from cPickle import dump
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class flag_baseline(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Accept a list of baselines (in the format used by NDPPP logging).
+
+    Flag them in all MeasurementSets.
+    """
+    inputs = {
+        'baselines': ingredient.ListField(
+            '--baselines',
+            help="Baselines (in NDPPP format, eg 1&1)"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting flag_baseline run")
+        super(flag_baseline, self).go()
+
+        #       Serialise list of baselines to disk for compute nodes to pick up
+        # ----------------------------------------------------------------------
+        fd, baseline_filename = mkstemp(
+            dir=self.config.get("layout", "job_directory")
+        )
+        baseline_file = os.fdopen(fd, "w")
+        dump(self.inputs["baselines"], baseline_file)
+        baseline_file.close()
+
+        #                 try block ensures baseline_filename is always unlinked
+        # ----------------------------------------------------------------------
+        try:
+            #                       Load file <-> compute node mapping from disk
+            # ------------------------------------------------------------------
+            self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+            data = load_data_map(self.inputs['args'][0])
+
+            command = "python %s" % (self.__file__.replace('master', 'nodes'))
+            jobs = []
+            for host, ms in data:
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            ms,
+                            baseline_filename
+                        ]
+                    )
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        finally:
+            os.unlink(baseline_filename)
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(flag_baseline().main())
diff --git a/CEP/Pipeline/recipes/sip/master/make_flaggable.py b/CEP/Pipeline/recipes/sip/master/make_flaggable.py
new file mode 100644
index 00000000000..ecc1d7b9b58
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/make_flaggable.py
@@ -0,0 +1,68 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                Make an MS flaggable; wraps makeFLAGwritable (but doesn't fork)
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class make_flaggable(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Update the storage manager on an MS to make the flag column writable.
+    """
+    inputs = {
+        'makeflagwritable': ingredient.ExecField(
+            '--makeFLAGwritable',
+            help="Path to makeFLAGwritable script",
+            default='/opt/LofIm/daily/lofar/bin/makeFLAGwritable'
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting make_flaggable run")
+        super(make_flaggable, self).go()
+
+        #                       Load file <-> compute node mapping from disk
+        # ------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        self.inputs['makeflagwritable']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(make_flaggable().main())
diff --git a/CEP/Pipeline/recipes/sip/master/new_dppp.py b/CEP/Pipeline/recipes/sip/master/new_dppp.py
new file mode 100644
index 00000000000..0fb9148975e
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/new_dppp.py
@@ -0,0 +1,172 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                         New DPPP recipe: fixed node allocation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from itertools import cycle
+from contextlib import nested
+from collections import defaultdict
+
+import collections
+import sys
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.parset import Parset
+
+class new_dppp(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Runs DPPP (either ``NDPPP`` or -- in the unlikely event it's required --
+    ``IDPPP``) on a number of MeasurementSets. This is used for compressing
+    and/or flagging data
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="The full path to the relevant DPPP executable"
+        ),
+        'initscript': ingredient.FileField(
+            '--initscript',
+            help="The full path to an (Bourne) shell script which will intialise the environment (ie, ``lofarinit.sh``)"
+        ),
+        'parset': ingredient.FileField(
+            '-p', '--parset',
+            help="The full path to a DPPP configuration parset. The ``msin`` and ``msout`` keys will be added by this recipe"
+        ),
+        'suffix': ingredient.StringField(
+            '--suffix',
+            default=".dppp",
+            help="Added to the input filename to generate the output filename"
+        ),
+        'working_directory': ingredient.StringField(
+            '-w', '--working-directory',
+            help="Working directory used on output nodes. Results will be written here"
+        ),
+        # NB times are read from vds file as string
+        'data_start_time': ingredient.StringField(
+            '--data-start-time',
+            default="None",
+            help="Start time to be passed to DPPP; used to pad data"
+        ),
+        'data_end_time': ingredient.StringField(
+            '--data-end-time',
+            default="None",
+            help="End time to be passed to DPPP; used to pad data"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            default=8,
+            help="Maximum number of simultaneous processes per output node"
+        ),
+        'nthreads': ingredient.IntField(
+            '--nthreads',
+            default=2,
+            help="Number of threads per (N)DPPP process"
+        ),
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Filename into which a mapfile describing the output data will be written"
+        ),
+        'clobber': ingredient.BoolField(
+            '--clobber',
+            default=False,
+            help="If ``True``, pre-existing output files will be removed before processing starts. If ``False``, the pipeline will abort if files already exist with the appropriate output filenames"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="The full path to a mapfile describing the processed data"
+        ),
+        'fullyflagged': ingredient.ListField(
+            help="A list of all baselines which were completely flagged in any of the input MeasurementSets"
+        )
+    }
+
+
+    def go(self):
+        self.logger.info("Starting DPPP run")
+        super(new_dppp, self).go()
+
+        #                Keep track of "Total flagged" messages in the DPPP logs
+        # ----------------------------------------------------------------------
+        self.logger.searchpatterns["fullyflagged"] = "Fully flagged baselines"
+
+        #                            Load file <-> output node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'])
+        data = load_data_map(self.inputs['args'][0])
+
+
+        #       We can use the same node script as the "old" IPython dppp recipe
+        # ----------------------------------------------------------------------
+        command = "python %s" % (
+            self.__file__.replace('master', 'nodes').replace('new_dppp', 'dppp')
+        )
+        outnames = collections.defaultdict(list)
+        jobs = []
+        for host, ms in data:
+            outnames[host].append(
+                os.path.join(
+                    self.inputs['working_directory'],
+                    self.inputs['job_name'],
+                    os.path.basename(ms.rstrip('/')) + self.inputs['suffix']
+                )
+            )
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        outnames[host][-1],
+                        self.inputs['parset'],
+                        self.inputs['executable'],
+                        self.inputs['initscript'],
+                        self.inputs['data_start_time'],
+                        self.inputs['data_end_time'],
+                        self.inputs['nthreads'],
+                        self.inputs['clobber']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        #                                  Log number of fully flagged baselines
+        # ----------------------------------------------------------------------
+        matches = self.logger.searchpatterns["fullyflagged"].results
+        self.logger.searchpatterns.clear() # finished searching
+        stripchars = "".join(set("Fully flagged baselines: "))
+        baselinecounter = defaultdict(lambda: 0)
+        for match in matches:
+            for pair in (
+                pair.strip(stripchars) for pair in match.getMessage().split(";")
+            ):
+                baselinecounter[pair] += 1
+        self.outputs['fullyflagged'] = baselinecounter.keys()
+
+        if self.error.isSet():
+            self.logger.warn("Failed DPPP process detected")
+            return 1
+        else:
+            parset = Parset()
+            for host, filenames in outnames.iteritems():
+                parset.addStringVector(host, filenames)
+            parset.writeFile(self.inputs['mapfile'])
+            self.outputs['mapfile'] = self.inputs['mapfile']
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(new_dppp().main())
diff --git a/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py b/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py
new file mode 100644
index 00000000000..3f593a9de6d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/new_vdsmaker.py
@@ -0,0 +1,140 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                     New vdsmaker recipe: fixed node allocation
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import sys
+import os
+import tempfile
+import errno
+import subprocess
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.pipelinelogging import log_process_output
+
+class new_vdsmaker(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Generate a GVDS file (and, optionally, individual VDS files per subband;
+    see the ``unlink`` input parameter) describing a collection of
+    MeasurementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'gvds': ingredient.StringField(
+            '-g', '--gvds',
+            help="File name for output GVDS file"
+        ),
+        'directory': ingredient.DirectoryField(
+            '--directory',
+            help="Directory for output GVDS file"
+        ),
+        'makevds': ingredient.ExecField(
+            '--makevds',
+            help="Full path to makevds executable"
+        ),
+        'combinevds': ingredient.ExecField(
+            '--combinevds',
+            help="Full path to combinevds executable"
+        ),
+        'unlink': ingredient.BoolField(
+            '--unlink',
+            help="Unlink VDS files after combining",
+            default=True
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'gvds': ingredient.FileField()
+    }
+
+    def go(self):
+        super(new_vdsmaker, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (
+            self.__file__.replace('master', 'nodes').replace('new_vdsmaker', 'vdsmaker')
+        )
+        jobs = []
+        vdsnames = []
+        for host, ms in data:
+            vdsnames.append(
+                "%s/%s.vds" % (self.inputs['directory'], os.path.basename(ms.rstrip('/')))
+            )
+            jobs.append(
+                ComputeJob(
+                    host, command,
+                    arguments=[
+                        ms,
+                        self.config.get('cluster', 'clusterdesc'),
+                        vdsnames[-1],
+                        self.inputs['makevds']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            self.logger.warn("Failed vdsmaker process detected")
+            return 1
+
+        # Combine VDS files to produce GDS
+        failure = False
+        self.logger.info("Combining VDS files")
+        executable = self.inputs['combinevds']
+        gvds_out = self.inputs['gvds']
+        try:
+            command = [executable, gvds_out] + vdsnames
+            combineproc = subprocess.Popen(
+                command,
+                close_fds=True,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sout, serr = combineproc.communicate()
+            log_process_output(executable, sout, serr, self.logger)
+            if combineproc.returncode != 0:
+                raise subprocess.CalledProcessError(combineproc.returncode, command)
+            self.outputs['gvds'] = gvds_out
+        except subprocess.CalledProcessError, cpe:
+            self.logger.exception("combinevds failed with status %d: %s" % (cpe.returncode, serr))
+            failure = True
+        except OSError, e:
+            self.logger.error("Failed to spawn combinevds (%s)" % str(e))
+            failure = True
+        finally:
+            if self.inputs["unlink"]:
+                self.logger.debug("Unlinking temporary files")
+                for file in vdsnames:
+                    os.unlink(file)
+            self.logger.info("vdsmaker done")
+        if failure:
+            self.logger.info("Failure was set")
+            return 1
+        elif not self.outputs.complete():
+            self.logger.info("Outputs incomplete")
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(new_vdsmaker().main())
diff --git a/CEP/Pipeline/recipes/sip/master/parmdb.py b/CEP/Pipeline/recipes/sip/master/parmdb.py
new file mode 100644
index 00000000000..bcd8d3aa930
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/parmdb.py
@@ -0,0 +1,115 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                  parmdb recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+import subprocess
+import shutil
+import tempfile
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.pipelinelogging import log_process_output
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+template = """
+create tablename="%s"
+adddef Gain:0:0:Ampl  values=1.0
+adddef Gain:1:1:Ampl  values=1.0
+adddef Gain:0:0:Real  values=1.0
+adddef Gain:1:1:Real  values=1.0
+adddef DirectionalGain:0:0:Ampl  values=1.0
+adddef DirectionalGain:1:1:Ampl  values=1.0
+adddef DirectionalGain:0:0:Real  values=1.0
+adddef DirectionalGain:1:1:Real  values=1.0
+adddef AntennaOrientation values=5.497787144
+quit
+"""
+
+class parmdb(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Add a parameter database to input MeasurementSets.
+
+    This recipe is called by the :class:`bbs.bbs` recipe; it may also be used
+    standalone.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="Full path to parmdbm executable",
+            default="/opt/LofIm/daily/lofar/bin/parmdbm"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting parmdb run")
+        super(parmdb, self).go()
+
+        self.logger.info("Generating template parmdb")
+        pdbdir = tempfile.mkdtemp(
+            dir=self.config.get("layout", "job_directory")
+        )
+        pdbfile = os.path.join(pdbdir, 'instrument')
+
+        try:
+            parmdbm_process = subprocess.Popen(
+                [self.inputs['executable']],
+                stdin=subprocess.PIPE,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE
+            )
+            sout, serr = parmdbm_process.communicate(template % pdbfile)
+            log_process_output("parmdbm", sout, serr, self.logger)
+        except OSError, e:
+            self.logger.error("Failed to spawn parmdbm: %s" % str(e))
+            return 1
+
+        #                     try-finally block to always remove temporary files
+        # ----------------------------------------------------------------------
+        try:
+            #                       Load file <-> compute node mapping from disk
+            # ------------------------------------------------------------------
+            self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+            data = load_data_map(self.inputs['args'][0])
+
+            command = "python %s" % (self.__file__.replace('master', 'nodes'))
+            jobs = []
+            for host, ms in data:
+                jobs.append(
+                    ComputeJob(host, command, arguments=[ms, pdbfile])
+                )
+            self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        finally:
+            self.logger.debug("Removing template parmdb")
+            shutil.rmtree(pdbdir, ignore_errors=True)
+
+        if self.error.isSet():
+            self.logger.warn("Detected failed parmdb job")
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(parmdb().main())
diff --git a/CEP/Pipeline/recipes/sip/master/rficonsole.py b/CEP/Pipeline/recipes/sip/master/rficonsole.py
new file mode 100644
index 00000000000..a87a9d4dd17
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/rficonsole.py
@@ -0,0 +1,127 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  rficonsole (AOflagger) recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+
+from contextlib import nested
+from collections import defaultdict
+
+import sys
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.remotecommand import ComputeJob
+from lofarpipe.support.group_data import load_data_map
+
+class rficonsole(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    The rficonsole recipe runs the rficonsole executable (flagger) across one
+    or more MeasurementSets.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            default="/opt/LofIm/daily/lofar/bin/rficonsole",
+            help="Full path to rficonsole executable"
+        ),
+        'strategy': ingredient.FileField(
+            '--strategy',
+            help="Full path to RFI strategy file",
+            optional=True
+        ),
+        'indirect_read': ingredient.BoolField(
+            '--indirect-read',
+            default=False,
+            help="Indirect baseline reader: re-write MS for efficiency"
+        ),
+        'skip_flagged': ingredient.BoolField(
+            '--skip-flagged',
+            default=True,
+            help="Ignore any MeasurementSet which has been flagged completely"
+        ),
+        'working_dir': ingredient.StringField(
+            '--working-dir',
+            default='/tmp',
+            help="Temporary rficonsole products are stored under this root on each of the remote machines. This directory should therefore be writable on each machine, but need not be shared across hosts"
+        ),
+        'nthreads': ingredient.IntField(
+            '--nthreads',
+            default=8,
+            help="Number of threads per rficonsole process"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            default=1,
+            help="Maximum number of simultaneous processes per node"
+        ),
+        'nmeasurementsets': ingredient.IntField(
+            '--nmeasurementsets',
+            optional=True,
+            help="Maximum number of MeasurementSets processed by a single rficonsole process"
+        ),
+    }
+
+    def go(self):
+        self.logger.info("Starting rficonsole run")
+        super(rficonsole, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'])
+        data = load_data_map(self.inputs['args'][0])
+
+        #        Jobs being dispatched to each host are arranged in a dict. Each
+        #            entry in the dict is a list of list of filnames to process.
+        # ----------------------------------------------------------------------
+        hostlist = defaultdict(lambda: list([[]]))
+        for host, filename in data:
+            if (
+                self.inputs.has_key('nmeasurementsets') and
+                len(hostlist[host][-1]) >= self.inputs['nmeasurementsets']
+            ):
+                hostlist[host].append([filename])
+            else:
+                hostlist[host][-1].append(filename)
+
+        if self.inputs.has_key('strategy'):
+            strategy = self.inputs['strategy']
+        else:
+            strategy = None
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, file_lists in hostlist.iteritems():
+            for file_list in file_lists:
+                jobs.append(
+                    ComputeJob(
+                        host, command,
+                        arguments=[
+                            self.inputs['executable'],
+                            self.inputs['nthreads'],
+                            strategy,
+                            self.inputs['indirect_read'],
+                            self.inputs['skip_flagged'],
+                            self.inputs['working_dir']
+                        ] + file_list
+                    )
+                )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            self.logger.warn("Failed rficonsole process detected")
+            return 1
+        else:
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(rficonsole().main())
diff --git a/CEP/Pipeline/recipes/sip/master/skymodel.py b/CEP/Pipeline/recipes/sip/master/skymodel.py
new file mode 100644
index 00000000000..7cfc667ba82
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/skymodel.py
@@ -0,0 +1,198 @@
+from __future__ import with_statement
+from contextlib import closing
+
+import sys
+
+import monetdb.sql as db
+from monetdb.sql import Error as Error
+
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+
+header_line = """\
+#(Name, Type, Ra, Dec, I, Q, U, V, MajorAxis, MinorAxis, Orientation, ReferenceFrequency='60e6', SpectralIndexDegree='0', SpectralIndex:0='0.0', SpectralIndex:1='0.0') = format
+"""
+
+query_skymodel = """
+SELECT t0.catsrcname, t0.src_type, ra2bbshms(t0.ra), decl2bbsdms(t0.decl), t0.i, t0.q, t0.u, t0.v, t0.MajorAxis, t0.MinorAxis, t0.Orientation, t0.ReferenceFrequency, t0.SpectralIndexDegree, t0.SpectralIndex_0
+FROM (
+    SELECT CAST(
+        TRIM(c1.catsrcname) AS VARCHAR(20)
+    ) AS catsrcname,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('POINT' AS VARCHAR(20))
+        ELSE CAST('GAUSSIAN' AS VARCHAR(20))
+    END AS src_type,
+    CAST(c1.ra AS VARCHAR(20)) AS ra,
+    CAST(c1.decl AS VARCHAR(20)) AS decl,
+    CAST(c1.i_int_avg AS VARCHAR(20)) AS i,
+    CAST(0 AS VARCHAR(20)) AS q,
+    CAST(0 AS VARCHAR(20)) AS u,
+    CAST(0 AS VARCHAR(20)) AS v,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CASE WHEN c1.major IS NULL
+            THEN CAST('' AS VARCHAR(20))
+            ELSE CAST(c1.major AS varchar(20))
+        END
+    END AS MajorAxis,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CASE WHEN c1.minor IS NULL
+            THEN CAST('' AS VARCHAR(20))
+            ELSE CAST(c1.minor AS varchar(20))
+        END
+    END AS MinorAxis,
+    CASE WHEN c1.pa IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CAST(c1.pa AS varchar(20))
+    END AS Orientation,
+    CAST(c1.freq_eff AS VARCHAR(20)) AS ReferenceFrequency,
+    CASE WHEN si.spindx_degree IS NULL
+        THEN CAST('' AS VARCHAR(20))
+        ELSE CAST(si.spindx_degree AS VARCHAR(20))
+    END AS SpectralIndexDegree,
+    CASE WHEN si.spindx_degree IS NULL
+        THEN CASE WHEN si.c0 IS NULL
+            THEN CAST(0 AS varchar(20))
+            ELSE CAST(si.c0 AS varchar(20))
+        END
+        ELSE CASE WHEN si.c0 IS NULL
+            THEN CAST('' AS varchar(20))
+            ELSE CAST(si.c0 AS varchar(20))
+        END
+    END AS SpectralIndex_0,
+    CASE WHEN si.c1 IS NULL
+        THEN CAST('' AS varchar(20))
+        ELSE CAST(si.c1 AS varchar(20))
+    END AS SpectralIndex_1
+    FROM catalogedsources c1
+    LEFT OUTER JOIN spectralindices si ON c1.catsrcid = si.catsrc_id
+        WHERE c1.cat_id BETWEEN %s AND %s
+        AND c1.ra BETWEEN %s AND %s
+        AND c1.decl BETWEEN %s AND %s
+        AND c1.i_int_avg > %s
+) t0
+"""
+
+query_central = """
+SELECT
+    catsrcname, i_int
+FROM
+    nearestneighborincat(%s,%s,'%s')
+"""
+
+
+class skymodel(BaseRecipe):
+    """
+    Extract basic sky model information from database
+    """
+    inputs = {
+        'db_host': ingredient.StringField(
+            '--db-host',
+            help="Host with MonetDB database instance",
+            default="ldb001"
+        ),
+        'db_port': ingredient.IntField(
+            '--db-port',
+            help="Host with MonetDB database instance",
+            default=50000
+        ),
+        'db_dbase': ingredient.StringField(
+            '--db-dbase',
+            help="Database name",
+            default="gsm"
+        ),
+        'db_user': ingredient.StringField(
+            '--db-user',
+            help="Database user",
+            default="gsm"
+        ),
+        'db_password': ingredient.StringField(
+            '--db-password',
+            help="Database password",
+            default="msss"
+        ),
+        'ra': ingredient.FloatField(
+            '--ra',
+            help='RA of image centre (degrees)'
+        ),
+        'dec': ingredient.FloatField(
+            '--dec',
+            help='dec of image centre (degrees)'
+        ),
+        'search_size': ingredient.FloatField(
+            '--search-size',
+            help='Distance to search in each of RA/dec (degrees)'
+        ),
+        'min_flux': ingredient.FloatField(
+            '--min-flux',
+            help="Integrated flus threshold, in Jy, for source selection"
+        ),
+        'skymodel_file': ingredient.StringField(
+            '--skymodel-file',
+            help="Output file for BBS-format sky model definition"
+        )
+    }
+
+    outputs = {
+        'source_name': ingredient.StringField(),
+        'source_flux': ingredient.FloatField()
+    }
+
+    def go(self):
+        self.logger.info("Building sky model")
+        super(skymodel, self).go()
+
+        ra_min = self.inputs['ra'] - self.inputs['search_size']
+        ra_max = self.inputs['ra'] + self.inputs['search_size']
+        dec_min = self.inputs['dec'] - self.inputs['search_size']
+        dec_max = self.inputs['dec'] + self.inputs['search_size']
+
+        try:
+            with closing(
+                db.connect(
+                    hostname=self.inputs["db_host"],
+                    port=int(self.inputs["db_port"]),
+                    database=self.inputs["db_dbase"],
+                    username=self.inputs["db_user"],
+                    password=self.inputs["db_password"]
+                )
+            ) as db_connection:
+                with closing(db_connection.cursor()) as db_cursor:
+                    db_cursor.execute(
+                        query_central % (float(self.inputs['ra']), float(self.inputs['dec']), "VLSS")
+                    )
+                    self.outputs["source_name"], self.outputs["source_flux"] = db_cursor.fetchone()
+                    self.logger.info("Central source is %s; flux %f" %
+                        (self.outputs["source_name"], self.outputs["source_flux"])
+                    )
+                    db_cursor.execute(
+                        query_skymodel % (
+                            4, 4, # Only using VLSS for now
+                            float(ra_min),
+                            float(ra_max),
+                            float(dec_min),
+                            float(dec_max),
+                            float(self.inputs['min_flux'])
+                        )
+                    )
+                    results = db_cursor.fetchall()
+
+        except db.Error, my_error:
+            self.logger.warn("Failed to build sky model: %s " % (my_error))
+            return 1
+
+        try:
+            with open(self.inputs['skymodel_file'], 'w') as file:
+                file.write(header_line)
+                file.writelines(", ".join(line) + ",\n" for line in results)
+        except Exception, e:
+            self.logger.warn("Failed to write skymodel file")
+            self.logger.warn(str(e))
+            return 1
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(skymodel().main())
diff --git a/CEP/Pipeline/recipes/sip/master/sourcedb.py b/CEP/Pipeline/recipes/sip/master/sourcedb.py
new file mode 100644
index 00000000000..b75da9c7b5c
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/sourcedb.py
@@ -0,0 +1,80 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                sourcedb recipe
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+import os
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn
+from lofarpipe.support.clusterlogger import clusterlogger
+from lofarpipe.support.group_data import load_data_map
+from lofarpipe.support.remotecommand import ComputeJob
+
+class sourcedb(BaseRecipe, RemoteCommandRecipeMixIn):
+    """
+    Add a source database to input MeasurementSets.
+
+    This recipe is called by the :class:`bbs.bbs` recipe; it may also be used
+    standalone.
+
+    **Arguments**
+
+    A mapfile describing the data to be processed.
+    """
+    inputs = {
+        'executable': ingredient.ExecField(
+            '--executable',
+            help="Full path to makesourcedb executable",
+            default="/opt/LofIm/daily/lofar/bin/makesourcedb"
+        ),
+        'skymodel': ingredient.FileField(
+            '-s', '--skymodel',
+            dest="skymodel",
+            help="Input sky catalogue"
+        ),
+        'nproc': ingredient.IntField(
+            '--nproc',
+            help="Maximum number of simultaneous processes per compute node",
+            default=8
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField()
+    }
+
+    def go(self):
+        self.logger.info("Starting sourcedb run")
+        super(sourcedb, self).go()
+
+        #                           Load file <-> compute node mapping from disk
+        # ----------------------------------------------------------------------
+        self.logger.debug("Loading map from %s" % self.inputs['args'][0])
+        data = load_data_map(self.inputs['args'][0])
+
+        command = "python %s" % (self.__file__.replace('master', 'nodes'))
+        jobs = []
+        for host, ms in data:
+            jobs.append(
+                ComputeJob(
+                    host, command, arguments=[
+                        self.inputs['executable'], ms, self.inputs['skymodel']
+                    ]
+                )
+            )
+        self._schedule_jobs(jobs, max_per_node=self.inputs['nproc'])
+
+        if self.error.isSet():
+            return 1
+        else:
+            self.outputs['mapfile'] = self.inputs['args'][0]
+            return 0
+
+if __name__ == '__main__':
+    sys.exit(sourcedb().main())
diff --git a/CEP/Pipeline/recipes/sip/master/storagemapper.py b/CEP/Pipeline/recipes/sip/master/storagemapper.py
new file mode 100644
index 00000000000..3aa90a67fbb
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/storagemapper.py
@@ -0,0 +1,63 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                        Generate a mapfile for processing data on storage nodes
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import os.path
+from collections import defaultdict
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.parset import Parset
+from lofarpipe.support.utilities import create_directory
+import lofarpipe.support.lofaringredient as ingredient
+
+class storagemapper(BaseRecipe):
+    """
+    Parses a list of filenames and generates a mapfile suitable for processing
+    on storage nodes.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'mapfile': ingredient.StringField(
+            '--mapfile',
+            help="Full path (including filename) of mapfile to produce (clobbered if exists)"
+        )
+    }
+
+    outputs = {
+        'mapfile': ingredient.FileField(
+            help="Full path (including filename) of generated mapfile"
+        )
+    }
+
+    def go(self):
+        self.logger.info("Starting storagemapper run")
+        super(storagemapper, self).go()
+
+        #                          We read the storage node name out of the path
+        #     and append the local filename (ie, on the storage node) to the map
+        # ----------------------------------------------------------------------
+        data = defaultdict(list)
+        for filename in self.inputs['args']:
+            host = filename.split(os.path.sep)[3]
+            data[host].append(filename.split(host)[-1])
+
+        #                                 Dump the generated mapping to a parset
+        # ----------------------------------------------------------------------
+        parset = Parset()
+        for host, filenames in data.iteritems():
+            parset.addStringVector(host, filenames)
+
+        create_directory(os.path.dirname(self.inputs['mapfile']))
+        parset.writeFile(self.inputs['mapfile'])
+        self.outputs['mapfile'] = self.inputs['mapfile']
+
+        return 0
+
+if __name__ == '__main__':
+    sys.exit(storagemapper().main())
diff --git a/CEP/Pipeline/recipes/sip/master/vdsreader.py b/CEP/Pipeline/recipes/sip/master/vdsreader.py
new file mode 100644
index 00000000000..968c62231fb
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/master/vdsreader.py
@@ -0,0 +1,69 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                  vdsreader recipe: extract filenames + metadata from GVDS file
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+import lofarpipe.support.utilities as utilities
+import lofarpipe.support.lofaringredient as ingredient
+
+from lofarpipe.support.baserecipe import BaseRecipe
+from lofarpipe.support.utilities import get_parset
+
+
+class vdsreader(BaseRecipe):
+    """
+    Read a GVDS file and return a list of the MS filenames referenced therein
+    together with selected metadata.
+
+    **Arguments**
+
+    None.
+    """
+    inputs = {
+        'gvds': ingredient.FileField(
+            '-g', '--gvds',
+            help="GVDS file to process"
+        )
+    }
+
+    outputs = {
+        'data': ingredient.ListField(help="List of MeasurementSet paths"),
+        'start_time': ingredient.StringField(help="Start time of observation"),
+        'end_time': ingredient.StringField(help="End time of observation"),
+        'pointing': ingredient.DictField(help="Observation pointing direction")
+    }
+
+    def go(self):
+        self.logger.info("Starting vdsreader run")
+        super(vdsreader, self).go()
+
+        try:
+            gvds = get_parset(self.inputs['gvds'])
+        except:
+            self.logger.error("Unable to read G(V)DS file")
+            raise
+
+        self.logger.info("Building list of measurementsets")
+        ms_names = [
+            gvds.getString("Part%d.FileName" % (part_no,))
+            for part_no in xrange(gvds.getInt("NParts"))
+        ]
+        self.logger.debug(ms_names)
+
+        self.outputs['data'] = ms_names
+        try:
+            self.outputs['start_time'] = gvds.getString('StartTime')
+            self.outputs['end_time'] = gvds.getString('EndTime')
+        except:
+            self.logger.warn("Failed to read start/end time from GVDS file")
+        try:
+            self.outputs['pointing'] = {
+                'type': gvds.getStringVector('Extra.FieldDirectionType')[0],
+                'dec': gvds.getStringVector('Extra.FieldDirectionDec')[0],
+                'ra': gvds.getStringVector('Extra.FieldDirectionRa')[0]
+            }
+        except:
+            self.logger.warn("Failed to read pointing information from GVDS file")
+        return 0
diff --git a/CEP/Pipeline/recipes/sip/nodes/bbs.py b/CEP/Pipeline/recipes/sip/nodes/bbs.py
new file mode 100644
index 00000000000..25fbeb4c86d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/bbs.py
@@ -0,0 +1,101 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                  BBS (BlackBoard Selfcal) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+from tempfile import mkstemp, mkdtemp
+import os
+import sys
+import shutil
+
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import read_initscript
+from lofarpipe.support.utilities import get_mountpoint
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.parset import Parset
+
+class bbs(LOFARnodeTCP):
+    #                      Handles running a single BBS kernel on a compute node
+    # --------------------------------------------------------------------------
+    def run(
+        self, executable, initscript, infile, key, db_name, db_user, db_host
+    ):
+        #                           executable: path to KernelControl executable
+        #                           initscript:             path to lofarinit.sh
+        #                               infile:    MeasurementSet for processing
+        #       key, db_name, db_user, db_host:   database connection parameters
+        # ----------------------------------------------------------------------
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            #        Build a configuration parset specifying database parameters
+            #                                                     for the kernel
+            # ------------------------------------------------------------------
+            self.logger.debug("Setting up kernel parset")
+            filesystem = "%s:%s" % (os.uname()[1], get_mountpoint(infile))
+            fd, parset_filename = mkstemp()
+            kernel_parset = Parset()
+            for key, value in {
+                "ObservationPart.Filesystem": filesystem,
+                "ObservationPart.Path": infile,
+                "BBDB.Key": key,
+                "BBDB.Name": db_name,
+                "BBDB.User": db_user,
+                "BBDB.Host": db_host,
+                "ParmLog": "",
+                "ParmLoglevel": "",
+                "ParmDB.Sky": os.path.join(infile, "sky"),
+                "ParmDB.Instrument": os.path.join(infile, "instrument")
+            }.iteritems():
+                kernel_parset.add(key, value)
+            kernel_parset.writeFile(parset_filename)
+            os.close(fd)
+            self.logger.debug("Parset written to %s" % (parset_filename,))
+
+
+            #                                                     Run the kernel
+            #               Catch & log output from the kernel logger and stdout
+            # ------------------------------------------------------------------
+            working_dir = mkdtemp()
+            env = read_initscript(self.logger, initscript)
+            try:
+                cmd = [executable, parset_filename, "0"]
+                self.logger.debug("Executing BBS kernel")
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable),
+                ):
+                    bbs_kernel_process = Popen(
+                        cmd, stdout=PIPE, stderr=PIPE, cwd=working_dir
+                    )
+                    sout, serr = bbs_kernel_process.communicate()
+                log_process_output("BBS kernel", sout, serr, self.logger)
+                if bbs_kernel_process.returncode != 0:
+                    raise CalledProcessError(
+                        bbs_kernel_process.returncode, executable
+                    )
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                os.unlink(parset_filename)
+                shutil.rmtree(working_dir)
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(bbs(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/cimager.py b/CEP/Pipeline/recipes/sip/nodes/cimager.py
new file mode 100644
index 00000000000..4b1d715c61f
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/cimager.py
@@ -0,0 +1,138 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                         ASKAPsoft cimager node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+from tempfile import mkdtemp
+import os
+import sys
+import shutil
+
+from pyrap.quanta import quantity
+from pyrap.tables import table
+
+from lofarpipe.support.pipelinelogging import CatchLog4CXX
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import log_process_output
+from lofarpipe.support.parset import Parset, patch_parset, get_parset
+
+class cimager(LOFARnodeTCP):
+    #                 Handles running a single cimager process on a compute node
+    # --------------------------------------------------------------------------
+    def run(self, imager_exec, vds, parset, resultsdir, start_time, end_time):
+        #       imager_exec:                          path to cimager executable
+        #               vds:           VDS file describing the data to be imaged
+        #            parset:                                imager configuration
+        #        resultsdir:                         place resulting images here
+        #        start_time:                        )    time range to be imaged
+        #          end_time:                        )   in seconds (may be None)
+        # ----------------------------------------------------------------------
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % (vds,))
+
+            #    Bail out if destination exists (can thus resume a partial run).
+            #                                            Should be configurable?
+            # ------------------------------------------------------------------
+            parset_data = Parset(parset)
+            image_names = parset_data.getStringVector("Cimager.Images.Names")
+            for image_name in image_names:
+                outputfile = os.path.join(resultsdir, image_name + ".restored")
+                self.logger.info(outputfile)
+                if os.path.exists(outputfile):
+                    self.logger.info("Image already exists: aborting.")
+                    return 0
+            try:
+                working_dir = mkdtemp()
+
+                #   If a time range has been specified, copy that section of the
+                #                                  input MS and only image that.
+                # --------------------------------------------------------------
+                query = []
+                if start_time:
+                    self.logger.debug("Start time is %s" % start_time)
+                    start_time = quantity(float(start_time), 's')
+                    query.append("TIME > %f" % start_time.get('s').get_value())
+                if end_time:
+                    self.logger.debug("End time is %s" % end_time)
+                    end_time = quantity(float(end_time), 's')
+                    query.append("TIME < %f" % end_time.get('s').get_value())
+                query = " AND ".join(query)
+                if query:
+                    #                             Select relevant section of MS.
+                    # ----------------------------------------------------------
+                    self.logger.debug("Query is %s" % query)
+                    output = os.path.join(working_dir, "timeslice.MS")
+                    vds_parset = get_parset(vds)
+                    t = table(vds_parset.getString("FileName"))
+                    t.query(query, name=output)
+                    #       Patch updated information into imager configuration.
+                    # ----------------------------------------------------------
+                    parset = patch_parset(parset,
+                        {
+                            'Cimager.dataset': output
+                        }
+                    )
+                else:
+                    self.logger.debug("No time range selected")
+
+                self.logger.debug("Running cimager")
+                with CatchLog4CXX(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(vds)
+                ):
+                    cimager_process = Popen(
+                        [imager_exec, "-inputs", parset],
+                        stdout=PIPE, stderr=PIPE, cwd=working_dir
+                    )
+                    sout, serr = cimager_process.communicate()
+                log_process_output("cimager", sout, serr, self.logger)
+                if cimager_process.returncode != 0:
+                    raise CalledProcessError(
+                        cimager_process.returncode, imager_exec
+                    )
+
+                #        Dump the resulting images in the pipeline results area.
+                #    I'm not aware of a foolproof way to predict the image names
+                #                that will be produced, so we read them from the
+                #                      parset and add standard cimager prefixes.
+                # --------------------------------------------------------------
+                parset_data = Parset(parset)
+                image_names = parset_data.getStringVector("Cimager.Images.Names")
+                prefixes = [
+                    "image", "psf", "residual", "weights", "sensitivity"
+                ]
+                self.logger.debug("Copying images to %s" % resultsdir)
+                for image_name in image_names:
+                    for prefix in prefixes:
+                        filename = image_name.replace("image", prefix, 1)
+                        shutil.move(
+                            os.path.join(working_dir, filename),
+                            os.path.join(resultsdir, filename)
+                        )
+                    if parset_data.getBool('Cimager.restore'):
+                        shutil.move(
+                            os.path.join(working_dir, image_name + ".restored"),
+                            os.path.join(resultsdir, image_name + ".restored")
+                        )
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                shutil.rmtree(working_dir)
+                if query:
+                    #                     If we created a new parset, delete it.
+                    # ----------------------------------------------------------
+                    os.unlink(parset)
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(cimager(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py b/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py
new file mode 100644
index 00000000000..bbe77474176
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/count_timesteps.py
@@ -0,0 +1,47 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                           count_timesteps node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+import os.path
+import sys
+
+from pyrap.tables import taql
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class count_timesteps(LOFARnodeTCP):
+    """
+    Return the first and last values in the TIME column.
+    """
+    def run(self, infile):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            try:
+                self.outputs['start_time'] = taql(
+                    "CALC MIN([SELECT TIME from %s])" % infile
+                )[0]
+                self.outputs['end_time'] = taql(
+                    "CALC MAX([SELECT TIME from %s])" % infile
+                )[0]
+            except Exception, e:
+                self.logger.error(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(count_timesteps(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py
new file mode 100644
index 00000000000..8478e836537
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/casapy.py
@@ -0,0 +1,68 @@
+# Python standard library
+from __future__ import with_statement
+from contextlib import closing
+from subprocess import check_call, CalledProcessError
+from dateutil.parser import parse as parse_date
+from datetime import timedelta
+import os.path, tempfile, shutil, time
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import patch_parset, create_directory, log_time
+from pipeline.support.lofarexceptions import ExecutableMissing
+import pipeline.support.utilities as utilities
+
+CASA_DATE_FORMAT = "%Y/%m/%d/%H:%M:%S.000"
+
+class casapy_node(LOFARnode):
+    def run(self, infile, parset, start_time, end_time, increment):
+        # Time execution of this job
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % (infile,))
+
+            start_time = parse_date(start_time)
+            end_time   = parse_date(end_time)
+
+            self.logger.debug("Start time: %s, end time: %s" % (str(start_time), str(end_time)))
+            increment = timedelta(0, increment)
+
+            process_start = start_time
+            while process_start < end_time:
+                process_end = process_start + increment
+                if process_end > end_time:
+                    td = end_time - process_start
+                    self.logger.info(
+                        "Note: final image is %.3f seconds long" % (
+                            td.days * 86400 + td.seconds + td.microseconds / 1e6
+                        )
+                    )
+                    process_end = end_time
+                time_range = "\'%s~%s\'" % (
+                    process_start.strftime(CASA_DATE_FORMAT),
+                    process_end.strftime(CASA_DATE_FORMAT)
+                )
+                self.logger.debug("Now processing %s" % (time_range))
+
+                tmp_parset_filename = patch_parset(
+                    parset, {
+                        'Selection.timerange': time_range,
+                        'Images.name': '-' + str(int(time.mktime(process_start.timetuple()))),
+                        'dataset': infile
+                    }
+                )
+
+                try:
+                    result = check_call([
+                        os.path.expanduser('~rol/sw/bin/casapy'),
+                        tmp_parset_filename,
+                    ])
+                except CalledProcessError, e:
+                    self.logger.error(str(e))
+                    self.logger.error("Failed dataset was %s %s" % (infile, time_range))
+                    raise Exception
+                finally:
+                    # Clean up tempoerary files.
+                    os.unlink(tmp_parset_filename)
+
+                process_start += increment
+
+            return result
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py
new file mode 100644
index 00000000000..7a2a43cdf73
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/colmaker.py
@@ -0,0 +1,17 @@
+from __future__ import with_statement
+import pyrap.tables
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import log_time
+
+class makecolumns_node(LOFARnode):
+    """
+    Add imaging columns to a given MS using pyrap.
+    """
+    def run(self, file):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (file))
+            try:
+                pyrap.tables.addImagingColumns(file)
+            except ValueError:
+                self.logger.debug('Add imaging columns failed: already exist?')
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py
new file mode 100644
index 00000000000..c165e51626d
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/dummy_echo_parallel.py
@@ -0,0 +1,14 @@
+import subprocess
+from lofarpipe.support.lofarnode import LOFARnode
+
+class dummy_echo_parallel(LOFARnode):
+    def run(self, filename, executable):
+        self.logger.info("Processing %s" % (filename))
+        execute = [executable, filename]
+
+        my_process = subprocess.Popen(execute, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        sout, serr = my_process.communicate()
+        self.logger.info("stdout: " + sout)
+        self.logger.info("stderr: " + serr)
+
+        return filename
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py
new file mode 100644
index 00000000000..2d212e9bee6
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/excluder.py
@@ -0,0 +1,27 @@
+from __future__ import with_statement
+from pyrap.tables import table
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class excluder(LOFARnode):
+    """
+    Remove data from the given station from the input MS.
+    """
+    def run(self, input, output, *stations):
+        try:
+            t = table(input)
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
+        try:
+            a = table(t.getkeyword('ANTENNA').split()[1])
+            station_ids = [a.getcol('NAME').index(name) for name in stations]
+            selection = t.query(
+                "ANTENNA1 not in %s and ANTENNA2 not in %s" %
+                (str(station_ids), str(station_ids))
+            )
+            selection.copy(output, deep=True).close()
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py
new file mode 100644
index 00000000000..b5a6c49d2c6
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/flagger.py
@@ -0,0 +1,31 @@
+from __future__ import with_statement
+from pyrap.tables import table
+import numpy
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class flagger(LOFARnode):
+    """
+    Flag out CORRECTED_DATA greater than some maximum value.
+    """
+    def run(self, input, output, max_value):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (input))
+            try:
+                t = table(input)
+                t2 = t.copy(output, deep=True)
+                t2.close()
+                t = table(output, readonly=False)
+            except Exception, e:
+                self.logger.error(str(e))
+                raise e
+            try:
+                for i, data in enumerate(t.getcol('CORRECTED_DATA')):
+                    if max([abs(val) for val in data[0]]) > max_value:
+                        t.putcell('FLAG', i, numpy.array([[True, True, True, True]]))
+                        t.putcell('FLAG_ROW', i, True)
+                t.close()
+            except Exception, e:
+                self.logger.error(str(e))
+                raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py
new file mode 100644
index 00000000000..da2c5f6ad73
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/qcheck.py
@@ -0,0 +1,51 @@
+from __future__ import with_statement
+import os, imp, logging, errno
+
+from pipeline.support.lofarnode import LOFARnode
+from pipeline.support.utilities import log_time
+
+class qcheck_node(LOFARnode):
+    """
+    Run quality check modules on an image.
+    """
+    def run(self, infile, pluginlist, outputdir):
+        with log_time(self.logger):
+            self.logger.info("Processing: %s" % (infile))
+
+            try:
+                os.makedirs(outputdir)
+            except OSError, failure:
+                if failure.errno != errno.EEXIST:
+                    raise
+
+            file_handler = logging.FileHandler(os.path.join(
+                    outputdir,
+                    os.path.basename(infile) + ".qcheck.log"
+                ),
+                mode='w'
+            )
+            file_handler.setFormatter(logging.Formatter("%(message)s"))
+            file_logger = logging.getLogger('main')
+            file_logger.addHandler(file_handler)
+            file_logger.setLevel(logging.INFO)
+            pipeline_logger = logging.getLogger(self.logger.name + "." + os.path.basename(infile))
+            pipeline_logger.setLevel(logging.WARN)
+
+            loggers = {'main': file_logger, 'warn': pipeline_logger}
+
+            for plugin in pluginlist:
+                try:
+                    qcheck = imp.load_source('qcheck', plugin)
+                except ImportError:
+                    self.logger.warn("Quality check module (%s) not found" % (plugin))
+                try:
+                    qcheck.run(infile, outputdir=outputdir, loggers=loggers)
+                except Exception, e:
+                    self.logger.warn("Quality check failed on %s" % (infile))
+                    self.logger.exception(str(e))
+
+            # Tidy up for the next image
+            file_handler.flush()
+            loggers['main'].remove_handler(file_handler)
+
+        return 0
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py
new file mode 100644
index 00000000000..bf87b51e9f7
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/sextractor.py
@@ -0,0 +1,59 @@
+# Python standard library
+from __future__ import with_statement
+from contextlib import closing
+from subprocess import check_call
+from tempfile import mkdtemp
+from ConfigParser import SafeConfigParser as ConfigParser
+from shutil import rmtree
+import os.path
+
+# Root directory for config file
+from pipeline import __path__ as config_path
+
+from tkp_lib.accessors import FitsFile
+from tkp_lib.image import ImageData
+from tkp_lib.database import connection
+
+from tkp_lib.dataset import DataSet
+
+def sextract(filename, dataset):
+    raise NotImplementedError
+    # Hack around MonetDB concurrency issues(!)
+    import time, random
+    time.sleep(random.randint(0,60))
+
+    try:
+        config = ConfigParser()
+        config.read("%s/pipeline.cfg" % (config_path[0],))
+        image2fits = config.get('sextractor', 'image2fits')
+
+        tempdir = mkdtemp(dir='/data/swinbank')
+        fitsfile = os.path.join(tempdir, os.path.basename(filename) + ".fits")
+
+        command_line = [image2fits, "in=%s" % (os.path.basename(filename)), "out=%s" % (fitsfile)]
+        cwd = os.path.dirname(filename)
+
+        check_call(
+            command_line,
+            cwd=os.path.dirname(filename),
+            close_fds=True
+        )
+
+        image = ImageData(FitsFile(fitsfile), dataset=dataset)
+    except Exception, inst:
+        return "ERROR: %s on %s, %s" % (str((type(inst))), platform.node(), fitsfile)
+
+    sr = image.sextract()
+    with closing(connection()) as con:
+        sr.savetoDB(con)
+    
+    rmtree(tempdir)
+    return "%s found %d sources" % (filename, len(sr))
+
+if __name__ == "__main__":
+    from sys import argv
+    dataset = DataSet("command line")
+    try:
+        sextract(argv[1], dataset)
+    except:
+        print "Usage: sextractor [filename]"
diff --git a/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py b/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py
new file mode 100644
index 00000000000..391a76ec89f
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/deprecated/trimmer.py
@@ -0,0 +1,32 @@
+from __future__ import with_statement
+from pyrap.tables import table
+
+from lofarpipe.support.lofarnode import LOFARnode
+from lofarpipe.support.utilities import log_time
+
+class trimmer(LOFARnode):
+    """
+    Remove data from the start and/or end of a MeasurementSet.
+    """
+    def run(self, input, output, start_seconds, end_seconds):
+        # Remove data from the start and/or end of a MeasurementSet.
+        copy_columns = ",".join([
+            'UVW', 'FLAG', 'FLAG_CATEGORY', 'WEIGHT', 'SIGMA', 'ANTENNA1',
+            'ANTENNA2', 'ARRAY_ID', 'DATA_DESC_ID', 'EXPOSURE', 'FEED1', 'FEED2',
+            'FIELD_ID', 'FLAG_ROW', 'INTERVAL', 'OBSERVATION_ID', 'PROCESSOR_ID',
+            'SCAN_NUMBER', 'STATE_ID', 'TIME', 'TIME_CENTROID', 'DATA',
+            'WEIGHT_SPECTRUM'
+        ])
+        try:
+            t = table(input)
+            selection = t.query(
+                "TIME > %.16f AND TIME < %.16f" % (
+                    t.getcol('TIME')[0] + float(start_seconds),
+                    t.getcol('TIME')[-1] - float(end_seconds)
+                ),
+                columns=copy_columns
+            )
+            selection.copy(output, deep=True)
+        except Exception, e:
+            self.logger.error(str(e))
+            raise e
diff --git a/CEP/Pipeline/recipes/sip/nodes/dppp.py b/CEP/Pipeline/recipes/sip/nodes/dppp.py
new file mode 100644
index 00000000000..a2f6fc27da6
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/dppp.py
@@ -0,0 +1,122 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                        DPPP (Data Pre-Procesing Pipeline) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import CalledProcessError
+from logging import getLogger
+import sys
+import os.path
+import tempfile
+import shutil
+
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.utilities import patch_parset
+from lofarpipe.support.utilities import read_initscript
+from lofarpipe.support.utilities import create_directory
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+
+class dppp(LOFARnodeTCP):
+    def run(
+        self, infile, outfile, parset, executable, initscript,
+        start_time, end_time, nthreads, clobber
+    ):
+        # Time execution of this job
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if clobber == "True":
+                self.logger.info("Checking for and removing previous output")
+                shutil.rmtree(outfile, ignore_errors=True)
+
+            self.logger.debug("Time interval: %s %s" % (start_time, end_time))
+
+            #                                             Initialise environment
+            #                 Limit number of threads used, per request from GvD
+            # ------------------------------------------------------------------
+            env = read_initscript(self.logger, initscript)
+            if nthreads == "None": nthreads = 1
+            self.logger.debug("Using %s threads for NDPPP" % nthreads)
+            env['OMP_NUM_THREADS'] = str(nthreads)
+
+            #    If the input and output filenames are the same, DPPP should not
+            #       write a new MS, but rather update the existing one in-place.
+            #              This is achieved by setting msout to an empty string.
+            # ------------------------------------------------------------------
+            if outfile == infile:
+                outfile = "\"\""
+            else:
+                create_directory(os.path.dirname(outfile))
+
+            #       Patch the parset with the correct input/output MS names and,
+            #                                   if available, start & end times.
+            #                            The uselogger option enables log4cplus.
+            # ------------------------------------------------------------------
+            patch_dictionary = {
+                'msin': infile,
+                'msout': outfile,
+                'uselogger': 'True'
+            }
+            if start_time and start_time != "None":
+                patch_dictionary['msin.starttime'] = start_time
+            if end_time and end_time != "None":
+                patch_dictionary['msin.endtime'] = end_time
+            try:
+                temp_parset_filename = patch_parset(parset, patch_dictionary)
+            except Exception, e:
+                self.logger.error(e)
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+
+                working_dir = tempfile.mkdtemp()
+                cmd = [executable, temp_parset_filename, '1']
+
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable),
+                ) as logger:
+                    #     Catch NDPPP segfaults (a regular occurance), and retry
+                    # ----------------------------------------------------------
+                    if outfile != infile:
+                        cleanup_fn = lambda : shutil.rmtree(outfile, ignore_errors=True)
+                    else:
+                        cleanup_fn = lambda : None
+                    catch_segfaults(
+                        cmd, working_dir, env, logger, cleanup=cleanup_fn
+                    )
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                #        CalledProcessError isn't properly propagated by IPython
+                # --------------------------------------------------------------
+                self.logger.error(str(e))
+                return 1
+            except Exception, e:
+                self.logger.error(str(e))
+                return 1
+            finally:
+                os.unlink(temp_parset_filename)
+                shutil.rmtree(working_dir)
+
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(dppp(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py b/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py
new file mode 100644
index 00000000000..16ce3cd6842
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/flag_baseline.py
@@ -0,0 +1,78 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                             flag_baseline node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+from cPickle import load
+import os.path
+import sys
+
+from pyrap.tables import taql, table
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class flag_baseline(LOFARnodeTCP):
+    """
+    Completely flag a series of baselines in a MeasurementSet.
+    """
+    def run(self, infile, baseline_filename):
+        """
+        baseline_filename points to a file continaing a pickled array of
+        antenna pairs.
+        """
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if not os.path.exists(baseline_filename):
+                self.logger.error(
+                    "baseline file %s not found" % (baseline_filename)
+                )
+                return 1
+
+            with open(baseline_filename) as file:
+                baselines = load(file)
+
+            antenna1, antenna2 = [], []
+            for baseline in baselines:
+                ant1, ant2 = baseline.split("&")
+                antenna1.append(int(ant1))
+                antenna2.append(int(ant2))
+
+            cmd = "UPDATE %s SET FLAG=True WHERE any(ANTENNA1=%s and ANTENNA2=%s)" % \
+                (infile, str(antenna1), str(antenna2))
+            self.logger.info("Running TaQL: " + cmd)
+
+            try:
+                taql(cmd)
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+            # QUICK HACK: Also flag last timestep
+            t = table(infile)
+            maxtime = t.getcol('TIME').max()
+            t.close()
+            cmd = "UPDATE %s SET FLAG=True WHERE TIME=%f" % (infile, maxtime)
+            self.logger.info("Running TaQL: " + cmd)
+            try:
+                taql(cmd)
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(flag_baseline(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py b/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py
new file mode 100644
index 00000000000..dd5048caf48
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/make_flaggable.py
@@ -0,0 +1,45 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                            make_flaggable node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+from __future__ import with_statement
+import os.path
+import sys
+import imp
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+
+
+class make_flaggable(LOFARnodeTCP):
+    def run(self, infile, makeFLAGwritable):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            if not os.path.exists(makeFLAGwritable):
+                self.logger.error(
+                    "file %s not found" % (makeFLAGwritable)
+                )
+                return 1
+
+            try:
+                mFw_module = imp.load_source('mFw_module', makeFLAGwritable)
+                mFw_module.makeFlagWritable(infile, '')
+            except Exception, e:
+                self.logger.warn(str(e))
+                return 1
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(make_flaggable(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/parmdb.py b/CEP/Pipeline/recipes/sip/nodes/parmdb.py
new file mode 100644
index 00000000000..1f65a80392b
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/parmdb.py
@@ -0,0 +1,31 @@
+from __future__ import with_statement
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+import shutil, os.path
+import sys
+
+class parmdb(LOFARnodeTCP):
+    def run(self, infile, pdb):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            output = os.path.join(infile, os.path.basename(pdb))
+
+            # Remove any old parmdb database
+            shutil.rmtree(output, ignore_errors=True)
+
+            # And copy the new one into place
+            shutil.copytree(pdb, output)
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(parmdb(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/rficonsole.py b/CEP/Pipeline/recipes/sip/nodes/rficonsole.py
new file mode 100644
index 00000000000..d9869abff65
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/rficonsole.py
@@ -0,0 +1,73 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                    rficonsole (AOflagger) node
+#                                                            John Swinbank, 2010
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import CalledProcessError
+import sys
+import os.path
+import shutil
+import tempfile
+
+from lofarpipe.support.pipelinelogging import log_time
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+
+class rficonsole(LOFARnodeTCP):
+    def run(self, executable, nthreads, strategy, indirect, skip_flagged, wd, *infiles):
+        with log_time(self.logger):
+            self.logger.info("Processing %s" % " ".join(infiles))
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+
+                working_dir = tempfile.mkdtemp(dir=wd)
+                cmd = [executable, "-j", str(nthreads)]
+                if strategy:
+                    if os.path.exists(strategy):
+                        cmd.extend(["-strategy", strategy])
+                    else:
+                        raise Exception("Strategy definition not available")
+                if indirect:
+                    cmd.extend(["-indirect-read"])
+                if skip_flagged:
+                    cmd.extend(["-skip-flagged"])
+                cmd.extend(infiles)
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name,
+                    os.path.basename(executable)
+                ) as logger:
+                    catch_segfaults(cmd, working_dir, None, logger)
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                self.logger.error(str(e))
+                return 1
+            except Exception, e:
+                self.logger.exception(e)
+                return 1
+            finally:
+                # Try and clean up the working directory, but don't worry if
+                # it fails -- it might not have been greated before throwing
+                # the exception
+                try:
+                    shutil.rmtree(working_dir)
+                except:
+                    pass
+
+            return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(rficonsole(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/sourcedb.py b/CEP/Pipeline/recipes/sip/nodes/sourcedb.py
new file mode 100644
index 00000000000..220fdb5f594
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/sourcedb.py
@@ -0,0 +1,52 @@
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+import shutil
+import os.path
+import tempfile
+import sys
+
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+from lofarpipe.support.utilities import log_time
+from lofarpipe.support.pipelinelogging import CatchLog4CPlus
+from lofarpipe.support.utilities import catch_segfaults
+
+
+class sourcedb(LOFARnodeTCP):
+    def run(self, executable, infile, catalogue):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            output = os.path.join(infile, "sky")
+
+            # Remove any old sky database
+            shutil.rmtree(output, ignore_errors=True)
+
+            working_dir = tempfile.mkdtemp()
+            try:
+                cmd = [executable, "format=<", "in=%s" % (catalogue), "out=%s" % (output)]
+                with CatchLog4CPlus(
+                    working_dir,
+                    self.logger.name + "." + os.path.basename(infile),
+                    os.path.basename(executable)
+                ) as logger:
+                    catch_segfaults(cmd, working_dir, None, logger)
+            except CalledProcessError, e:
+                # For CalledProcessError isn't properly propagated by IPython
+                # Temporary workaround...
+                self.logger.error(str(e))
+                return 1
+            finally:
+                shutil.rmtree(working_dir)
+
+        return 0
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(sourcedb(jobid, jobhost, jobport).run_with_stored_arguments())
diff --git a/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py b/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py
new file mode 100644
index 00000000000..2152b1c414f
--- /dev/null
+++ b/CEP/Pipeline/recipes/sip/nodes/vdsmaker.py
@@ -0,0 +1,50 @@
+#                                                         LOFAR IMAGING PIPELINE
+#
+#                                                                  vdsmaker node
+#                                                         John Swinbank, 2009-10
+#                                                      swinbank@transientskp.org
+# ------------------------------------------------------------------------------
+
+from __future__ import with_statement
+from subprocess import Popen, CalledProcessError, PIPE, STDOUT
+import os
+import sys
+
+from lofarpipe.support.lofarexceptions import ExecutableMissing
+from lofarpipe.support.utilities import create_directory, log_time
+from lofarpipe.support.utilities import catch_segfaults
+from lofarpipe.support.lofarnode import LOFARnodeTCP
+
+class vdsmaker(LOFARnodeTCP):
+    """
+    Make a VDS file for the input MS in a specificed location.
+    """
+    def run(self, infile, clusterdesc, outfile, executable):
+        with log_time(self.logger):
+            if os.path.exists(infile):
+                self.logger.info("Processing %s" % (infile))
+            else:
+                self.logger.error("Dataset %s does not exist" % (infile))
+                return 1
+
+            try:
+                if not os.access(executable, os.X_OK):
+                    raise ExecutableMissing(executable)
+                cmd = [executable, clusterdesc, infile, outfile]
+                result = catch_segfaults(cmd, None, None, self.logger).returncode
+                self.outputs["result"] = result
+            except ExecutableMissing, e:
+                self.logger.error("%s not found" % (e.args[0]))
+                return 1
+            except CalledProcessError, e:
+                # For CalledProcessError isn't properly propagated by IPython
+                # Temporary workaround...
+                self.logger.error(str(e))
+                return 1
+
+if __name__ == "__main__":
+    #   If invoked directly, parse command line arguments for logger information
+    #                        and pass the rest to the run() method defined above
+    # --------------------------------------------------------------------------
+    jobid, jobhost, jobport = sys.argv[1:4]
+    sys.exit(vdsmaker(jobid, jobhost, jobport).run_with_stored_arguments())
-- 
GitLab