Skip to content
Snippets Groups Projects
Select Git revision
  • c292834f8dd82a8543225048440d0c48392aa9da
  • master default protected
2 results

yaml_reader.py

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    rapthor.parset 9.17 KiB
    # This is an example parset listing all available options
    
    
    [global]
    # Full path to working dir where rapthor will run (required). All output will be
    # placed in this directory
    dir_working = /data/rapthor
    
    # Full path to input MS files (required). Wildcards can be used (e.g., /path/to/data/*.ms)
    # or a list can be given
    input_ms = /data/ms/*.ms
    
    # Automatically download a target sky model (default = True). This will have no effect if
    # input_sky model is specified. The radius out to which a sky model should be downloaded
    # (default is 5 degrees) and the service from which a sky model should be downloaded
    # (default is TGSS) can be specified, as can the option to overwrite an existing sky model
    # with the downloaded one.
    # download_initial_skymodel = True
    # download_initial_skymodel_radius = 5.0
    # download_initial_skymodel_server = TGSS
    # download_overwrite_skymodel = False
    
    # Full path to the input sky model file, with true-sky fluxes (required when not
    # automatically downloading). If you also have a sky model with apparent flux densities,
    # specify it with the apparent_skymodel option (note that the source names must be
    # identical in both sky models)
    # input_skymodel = /data/skymodel.txt
    # apparent_skymodel = /data/apparent_skymodel.txt
    
    # Regroup the input or downloaded sky model as needed to meet target flux (default =
    # True). If False, the existing patches are used for the calibration
    # regroup_input_skymodel = True
    
    # Processing strategy to use (default = selfcal):
    # - selfcal: standard self calibration
    # - image: (not yet supported) image using the input solutions (no calibration is done)
    # - user-supplied file: full path to Python file defining custom strategy
    # strategy = selfcal
    
    # Fraction of data to process (default = 0.2 for self calibration and 1.0 for the final pass).
    # If less than one, the input data are divided by time into chunks (of no less
    # than slow_timestep_separate_sec below) that sum to the requested fraction, spaced out
    # evenly over the full time range. A final fraction can also be specified
    # (default = selfcal_data_fraction) such that a final processing pass (i.e.,
    # after selfcal finishes) is done with a different fraction
    # selfcal_data_fraction = 0.2
    # final_data_fraction = 1.0
    
    # Flagging ranges (default = no flagging). A range of times, baselines, and
    # frequencies to flag can be specified (see the DPPP documentation for details
    # of the syntax). By default, the ranges are AND-ed to produce the final flags,
    # but a set expression can be specified that controls how the selections are
    # combined
    # flag_abstime = [12-Mar-2010/11:31:00.0..12-Mar-2010/11:50:00.0]
    # flag_baseline = [CS013HBA*]
    # flag_freqrange = [125.2..126.4MHz]
    # flag_expr = flag_abstime and flag_baseline and flag_freqrange
    
    
    [calibration]
    # If one of the included sky models (see rapthor/skymodels) is within 2 * PB_FWHM of the
    # field center, include it in the calibration (default = False)
    # use_included_skymodels = False
    
    # General solver parameters (defaults shown):
    # llssolver = qr
    # maxiter = 150
    # propagatesolutions = True
    # solveralgorithm = hybrid
    # onebeamperpatch = False
    # stepsize = 0.02
    # tolerance = 5e-3
    # solve_min_uv_lambda = 350.0
    # parallelbaselines = False
    
    # Fast solve parameters (defaults shown):
    # fast_timestep_sec = 8.0
    # fast_freqstep_hz = 1e6
    # fast_smoothnessconstraint = 3e6
    # fast_smoothnessreffrequency = midpoint of frequency coverage
    # fast_smoothnessrefdistance = 0
    
    # Slow solve parameters (defaults shown):
    # slow_timestep_joint_sec = 0.0
    # slow_timestep_separate_sec = 600.0
    # slow_freqstep_hz = 1e6
    # slow_smoothnessconstraint_joint = 3e6
    # slow_smoothnessconstraint_separate = 3e6
    
    
    [imaging]
    # Imaging parameters: pixel size in arcsec (default = 1.25, suitable for HBA data), Briggs
    # robust parameter (default = -0.5), min and max uv distance in lambda (default = 80, none),
    # taper in arcsec (default = none), and whether multiscale clean should be used (default =
    # True)
    # cellsize_arcsec = 1.25
    # robust = -0.5
    # min_uv_lambda = 0.0
    # max_uv_lambda = 0.0
    # taper_arcsec = 0.0
    # do_multiscale_clean = True
    
    # Method to use to correct for direction-dependent effects during imaging:
    # "none", "facets", or "screens" (default = facets). If "none", the solutions
    # closest to the image centers will be used. If "facets", Voronoi faceting is
    # used. If "screens", smooth 2-D screens are used; the type of screen to use can
    # be specified with screen_type: "tessellated" (simple, smoothed tessellated
    # screens) or "kl" (Karhunen-Lo`eve screens) (default = kl)
    # dde_method = facets
    # screen_type = kl
    
    # IDG (image domain gridder) mode to use in WSClean (default = cpu). The mode can be
    # "cpu" or "hybrid". Note that IDG is only used when dde_method = "none" or "screens"
    # idg_mode = cpu
    
    # Fraction of the total memory (per node) to use for WSClean jobs (default = 0.9 = 90%)
    # mem_fraction = 0.9
    
    # Use MPI to distribute WSClean jobs over multiple nodes (default = False)? If True and
    # more than one node can be allocated to each WSClean job (i.e., max_nodes / num_images
    # >= 2), then distributed imaging will be used (only available if batch_system = slurm).
    # Note that if MPI is activated, dir_local (under the [cluster] section below) must
    # not be set unless it is on a shared filesystem
    # use_mpi = False
    
    # Reweight the visibility data before imaging (default = False)
    # reweight = False
    
    # Size of area to image when using a grid (default = mean FWHM of the primary beam)
    # Number of sectors along RA to use in imaging grid (default = 0). The number of sectors in
    # Dec will be determined automatically to ensure the whole area specified with grid_center_ra,
    # grid_center_dec, grid_width_ra_deg, and grid_width_dec_deg is imaged. Set grid_nsectors_ra = 0 to force a
    # single sector for the full area. Multiple sectors are useful for parallelizing the imaging
    # over multiple nodes of a cluster or for computers with limited memory
    # grid_width_ra_deg = 5.0
    # grid_width_dec_deg = 7.0
    # grid_center_ra = 14h41m01.884
    # grid_center_dec = +35d30m31.52
    # grid_nsectors_ra = 3
    
    # Instead of a grid, imaging sectors can be defined individually by specifying
    # their centers and widths. If sectors are specified in this way, they will be
    # used instead of the sector grid. Note that the sectors should not overlap
    # sector_center_ra_list = [14h41m01.884, 14h13m23.234]
    # sector_center_dec_list = [+35d30m31.52, +37d21m56.86]
    # sector_width_ra_deg_list = [0.532, 0.127]
    # sector_width_dec_deg_list = [0.532, 0.127]
    
    # Max desired peak flux density reduction at center of the image edges due to
    # bandwidth smearing (at the mean frequency) and time smearing (default = 0.15 =
    # 15% reduction in peak flux). Higher values can result in shorter run times but
    # more smearing away from the sector centers
    # max_peak_smearing = 0.15
    
    
    [cluster]
    # Cluster batch system (default = single_machine). Use batch_system = slurm to
    # use a SLURM-based cluster
    # batch_system = single_machine
    
    # For batch_system = slurm, the maximum number of nodes of the cluster to use at
    # once can be specified with the max_nodes option (default = 12), the number of
    # processors and amount of memory per node to request from SLURM can be
    # specified with the cpus_per_task (default = 0 = all) and mem_per_node_gb
    # options (default = 0 = all). By setting the cpus_per_task value to the number of
    # processors per node, one can ensure that each task gets the entire node to
    # itself, which is the recommended way of running Rapthor
    # max_nodes = 12
    # cpus_per_task = 0
    # mem_per_node_gb = 0
    
    # Maximum number of cores and threads per task to use on each node (default = 0 = all)
    # max_cores = 0
    # max_threads = 0
    
    # Number of threads to use by WSClean during deconvolution and parallel gridding
    # (default = 0 = 2/5 of max_threads). Higher values will speed up imaging at the
    # expense of higher memory usage
    # deconvolution_threads = 0
    # parallel_gridding_threads = 0
    
    # Full path to a local disk on the nodes for IO-intensive processing (default =
    # not used). The path must exist on all nodes (but does not have to be on a
    # shared filesystem). This parameter is useful if you have a fast local disk
    # (e.g., an SSD) that is not the one used for dir_working. If this parameter is
    # not set, IO-intensive processing (e.g., WSClean) will use a default path in
    # dir_working instead. This parameter should not be set in the following
    # situations:
    #   - when batch_system = single_machine and multiple imaging sectors are
    #     used (as each sector will overwrite files from the other sectors)
    #   - when use_mpi = True under the [imaging] section above and dir_local is
    #     not on a shared filesystem
    # dir_local = /tmp
    
    # Run the pipelines inside a container (default = False)? If True, the pipeline
    # for each operation (such as calibrate or image) will be run inside a container.
    # The type of container can also be specified (one of docker, udocker, or
    # singularity; default = docker)
    # use_container = False
    # container_type = docker
    
    # CWL runner to use. Currently supported runners are: cwltool and toil (default)
    # cwl_runner = toil
    
    # Debug workflow related issues. Enabling this will require significantly more
    # disk space. The working directory will never be cleaned up, stdout and stderr
    # will not be redirectied, and log level of the CWL runner will be set to DEBUG.
    # debug_workflow = False