Skip to content
Snippets Groups Projects
Commit 738ef2a4 authored by Jan David Mol's avatar Jan David Mol
Browse files

Task #8437: Generate pipeline.cfg.CEP4, and propagate this file in PipelineControl

parent 81fc0a7b
No related branches found
No related tags found
No related merge requests found
...@@ -1614,8 +1614,8 @@ CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py eol=lf ...@@ -1614,8 +1614,8 @@ CEP/Pipeline/recipes/sip/nodes/selfcal_finalize.py eol=lf
CEP/Pipeline/recipes/sip/nodes/setupparmdb.py eol=lf CEP/Pipeline/recipes/sip/nodes/setupparmdb.py eol=lf
CEP/Pipeline/recipes/sip/nodes/setupsourcedb.py eol=lf CEP/Pipeline/recipes/sip/nodes/setupsourcedb.py eol=lf
CEP/Pipeline/recipes/sip/nodes/vdsmaker.py eol=lf CEP/Pipeline/recipes/sip/nodes/vdsmaker.py eol=lf
CEP/Pipeline/recipes/sip/pipeline.cfg.CEP4.docker-template -text
CEP/Pipeline/recipes/sip/pipeline.cfg.in eol=lf CEP/Pipeline/recipes/sip/pipeline.cfg.in eol=lf
CEP/Pipeline/recipes/sip/pipeline.cfg.thead01.cep4 -text
CEP/Pipeline/recipes/sip/plugins/PipelineStep_addMapfile.py -text CEP/Pipeline/recipes/sip/plugins/PipelineStep_addMapfile.py -text
CEP/Pipeline/recipes/sip/plugins/PipelineStep_changeMapfile.py -text CEP/Pipeline/recipes/sip/plugins/PipelineStep_changeMapfile.py -text
CEP/Pipeline/recipes/sip/plugins/PipelineStep_combineParsets.py -text CEP/Pipeline/recipes/sip/plugins/PipelineStep_combineParsets.py -text
......
...@@ -113,6 +113,7 @@ install(FILES ...@@ -113,6 +113,7 @@ install(FILES
install(FILES install(FILES
${CMAKE_CURRENT_BINARY_DIR}/pipeline.cfg ${CMAKE_CURRENT_BINARY_DIR}/pipeline.cfg
${CMAKE_CURRENT_BINARY_DIR}/pipeline.cfg.CEP4
${CMAKE_CURRENT_BINARY_DIR}/tasks.cfg ${CMAKE_CURRENT_BINARY_DIR}/tasks.cfg
DESTINATION share/pipeline) DESTINATION share/pipeline)
...@@ -127,6 +128,29 @@ configure_file( ...@@ -127,6 +128,29 @@ configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/pipeline.cfg.in ${CMAKE_CURRENT_SOURCE_DIR}/pipeline.cfg.in
${CMAKE_CURRENT_BINARY_DIR}/pipeline.cfg) ${CMAKE_CURRENT_BINARY_DIR}/pipeline.cfg)
# Convert configuration files through docker-template
foreach(_file ${CMAKE_CURRENT_SOURCE_DIR}/pipeline.cfg.CEP4)
# _src -> _dst
set(_src ${CMAKE_CURRENT_SOURCE_DIR}/${_file}.in_docker-template)
set(_dst ${CMAKE_CURRENT_BINARY_DIR}/${_file})
# add generating command, and (any) target to force the generation
# when "all" is build.
add_custom_command(
OUTPUT ${_dst}
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/docker-template < ${_src} > ${_dst}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/docker-template ${_src} ${CMAKE_CURRENT_BINARY_DIR}/versiondocker
)
add_custom_target(${_file}_target ALL DEPENDS ${_dst})
# install resulting file
install(FILES
${_dst}
DESTINATION share/pipeline
RENAME Dockerfile
)
endforeach()
configure_file( configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/tasks.cfg.in ${CMAKE_CURRENT_SOURCE_DIR}/tasks.cfg.in
${CMAKE_CURRENT_BINARY_DIR}/tasks.cfg) ${CMAKE_CURRENT_BINARY_DIR}/tasks.cfg)
......
...@@ -15,9 +15,10 @@ ...@@ -15,9 +15,10 @@
# runPipeline.sh <obsid> || pipelineAborted.sh <obsid> # runPipeline.sh <obsid> || pipelineAborted.sh <obsid>
OBSID=$1 OBSID=$1
shift
if [ -z "$OBSID" ]; then if [ -z "$OBSID" ]; then
echo "Usage: $0 <obsid>" echo "Usage: $0 <obsid> <pipeline parameters>"
exit 1 exit 1
fi fi
...@@ -35,10 +36,7 @@ getParset.py -o $OBSID >$PARSET ...@@ -35,10 +36,7 @@ getParset.py -o $OBSID >$PARSET
PROGRAM_NAME=$(getparsetvalue $PARSET "ObsSW.Observation.ObservationControl.PythonControl.programName") PROGRAM_NAME=$(getparsetvalue $PARSET "ObsSW.Observation.ObservationControl.PythonControl.programName")
# Run pipeline # Run pipeline
OPTIONS=" \ OPTIONS=" -d $@"
-d \
-c ${LOFARROOT}/share/pipeline/pipeline.cfg \
-t ${LOFARROOT}/share/pipeline/tasks.cfg"
# Set up the environment (information to propagate to the node scripts for monitoring and logging) # Set up the environment (information to propagate to the node scripts for monitoring and logging)
export LOFAR_OBSID="$OBSID" export LOFAR_OBSID="$OBSID"
......
...@@ -22,10 +22,10 @@ hdf5root = ...@@ -22,10 +22,10 @@ hdf5root =
wcsroot = /opt/wcslib wcsroot = /opt/wcslib
pythonpath = /opt/lofar/lib/python2.7/site-packages pythonpath = /opt/lofar/lib/python2.7/site-packages
# runtime dir is a global FS (nfs, lustre) to exchange small files (parsets, vds, map files, etc) # runtime dir is a global FS (nfs, lustre) to exchange small files (parsets, vds, map files, etc)
runtime_directory = /shared/mol/regression_test/rundir runtime_directory = /data/share/pipeline
recipe_directories = [%(pythonpath)s/lofarpipe/recipes] recipe_directories = [%(pythonpath)s/lofarpipe/recipes]
# working dir is the local dir in which input/output dataproducts reside # working dir is the local dir in which input/output dataproducts reside
working_directory = /globalhome/mol/regression_test/working_dir working_directory = /data/scratch
task_files = [%(lofarroot)s/share/pipeline/tasks.cfg] task_files = [%(lofarroot)s/share/pipeline/tasks.cfg]
[layout] [layout]
...@@ -50,11 +50,9 @@ xml_stat_file = %(runtime_directory)s/%(job_name)s/logs/%(start_time)s/statistic ...@@ -50,11 +50,9 @@ xml_stat_file = %(runtime_directory)s/%(job_name)s/logs/%(start_time)s/statistic
method = none method = none
[docker] [docker]
image = lofar-patched image = lofar-pipeline:${LOFAR_TAG}
[remote] [remote]
#method = slurm_srun_cep3
#method = ssh_docker
method = custom_cmdline method = custom_cmdline
max_per_node = 1 max_per_node = 1
...@@ -85,5 +83,4 @@ max_per_node = 1 ...@@ -85,5 +83,4 @@ max_per_node = 1
# /bin/bash -c # /bin/bash -c
# #
# Required because the pipeline framework needs some bash functionality in the commands it starts. # Required because the pipeline framework needs some bash functionality in the commands it starts.
#cmdline = ssh -n -tt -x localhost srun -w {host} -N 1 -n 1 --jobid={slurm_job_id} docker run -t --rm -e LUSER={uid} -w g -v /home/mol/.ssh:/home/lofar/.ssh:ro -v /globalhome/mol/regression_test:/globalhome/mol/regression_test -v /shared:/shared --net=host {docker_image} /bin/bash -c cmdline = ssh -n -tt -x localhost srun -w {host} -N 1 -n 1 --jobid={slurm_job_id} docker run --rm -e LUSER={uid} -v %(runtime_directory)s:%(runtime_directory)s -v working_directory)s:%(working_directory)s -v /data:/data --net=host {docker_image} /bin/bash -c "\"{command}\""
cmdline = ssh -n -tt -x localhost srun -w {host} -N 1 -n 1 --jobid={slurm_job_id} docker run -t --rm -e LUSER={uid} -v %(runtime_directory)s:%(runtime_directory)s -v %(working_directory)s:%(working_directory)s --net=host {docker_image} /bin/bash -c "\"{command}\""
...@@ -351,10 +351,11 @@ class PipelineControl(OTDBBusListener): ...@@ -351,10 +351,11 @@ class PipelineControl(OTDBBusListener):
" -e LUSER=$UID" " -e LUSER=$UID"
" -v $HOME/.ssh:/home/lofar/.ssh:ro" " -v $HOME/.ssh:/home/lofar/.ssh:ro"
" -e SLURM_JOB_ID=$SLURM_JOB_ID" " -e SLURM_JOB_ID=$SLURM_JOB_ID"
" runPipeline.sh {obsid}" " runPipeline.sh {obsid} --config /opt/lofar/share/pipeline/pipeline.cfg.{cluster}"
.format( .format(
obsid = treeId, obsid = treeId,
tag = parset.dockerTag(), tag = parset.dockerTag(),
cluster = parset.processingCluster()
), ),
sbatch_params=sbatch_params sbatch_params=sbatch_params
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment