diff --git a/SAS/Scheduler/src/CMakeLists.txt b/SAS/Scheduler/src/CMakeLists.txt
index e82e50db8cb028dc0fe43acf712fec8e0530f820..5a1229fcca2445632d9611265c28f719c5d203bd 100644
--- a/SAS/Scheduler/src/CMakeLists.txt
+++ b/SAS/Scheduler/src/CMakeLists.txt
@@ -48,7 +48,6 @@ set(scheduler_SRCS
   pipeline.cpp
   publishdialog.cpp
   pulsarpipeline.cpp
-  qcustomplot.cpp
   qlofardatamodel.cpp
   redistributetasksdialog.cpp
   sasconnectdialog.cpp
@@ -74,7 +73,6 @@ set(scheduler_SRCS
   stationtreewidget.cpp
   Storage.cpp
   StorageNode.cpp
-  storageplot.cpp
   tablecolumnselectdialog.cpp
   tableview.cpp
   taskcopydialog.cpp
@@ -103,7 +101,6 @@ set(scheduler_MOC_HDRS
   ListWidget.h
   parsettreeviewer.h
   publishdialog.h
-  qcustomplot.h
   qlofardatamodel.h
   redistributetasksdialog.h
   sasconnectdialog.h
@@ -120,7 +117,6 @@ set(scheduler_MOC_HDRS
   statehistorydialog.h
   stationlistwidget.h
   stationtreewidget.h
-  storageplot.h
   tablecolumnselectdialog.h
   tableview.h
   taskcopydialog.h
@@ -147,7 +143,6 @@ set(scheduler_UIS
   statehistorydialog.ui
   stationlistwidget.ui
   stationtreewidget.ui
-  storageresourceview.ui
   tablecolumnselectdialog.ui
   taskcopydialog.ui
   taskdialog.ui
diff --git a/SAS/Scheduler/src/Controller.cpp b/SAS/Scheduler/src/Controller.cpp
index 635cbb692b93d03dd23c33b3f5382b1804d596da..5993c92f8105dbb6182719d466fc6ae5a6284e76 100644
--- a/SAS/Scheduler/src/Controller.cpp
+++ b/SAS/Scheduler/src/Controller.cpp
@@ -3,9 +3,6 @@
  *
  * Author         : Alwin de Jong
  * e-mail         : jong@astron.nl
- * Revision       : $Revision$
- * Last change by : $Author$
- * Change date	  : $Date$
  * First creation : 4-feb-2009
  * URL            : $URL: https://svn.astron.nl/ROD/trunk/LOFAR_Scheduler/Controller.cpp $
  *
@@ -53,8 +50,8 @@ SchedulerSettings Controller::theSchedulerSettings = SchedulerSettings();
 unsigned Controller::itsFileVersion = 0;
 
 Controller::Controller(QApplication &app) :
-    application(&app) , gui(0), itsSettingsDialog(0),
-    possiblySaveMessageBox(0),itsConflictDialog(0)
+     possiblySaveMessageBox(0), application(&app), gui(0),
+    itsSettingsDialog(0), itsConflictDialog(0)
 {
     itsAutoPublishAllowed = currentUser == "lofarsys" ? true : false;
 #if defined Q_OS_WINDOWS || _DEBUG_
@@ -1305,7 +1302,7 @@ void Controller::applyTableItemChange(unsigned taskID, data_headers property, co
 		QString newstatus(value.toString());
 		if ((newstatus == task_states_str[Task::PRESCHEDULED]) || (newstatus == task_states_str[Task::SCHEDULED])) {
 			std::pair<unscheduled_reasons, QString> errCode = doPreScheduleChecks(pTask);
-			if ((errCode.first == BEAM_DURATION_DIFFERENT)) {
+            if (errCode.first == BEAM_DURATION_DIFFERENT) {
 				if (QMessageBox::question(gui, tr("Beam duration different"),
 						errCode.second.replace('$','\n') + "\nDo you still want to set the task to " + newstatus + "?",
 						QMessageBox::Yes | QMessageBox::No, QMessageBox::Yes) == QMessageBox::No) {
@@ -3321,6 +3318,11 @@ std::pair<unscheduled_reasons, QString> Controller::doPreScheduleChecks(Task *ta
             }
         }
 
+        // TODO: setInputFilesForPipeline should probably not be done here. Only set the input files when a task is downloaded from SAS or when it is just loaded from disk
+        // now the enabled flags (user selection) gets reset by calling setInputFilesForPipeline which is also a bug. This should not be the case
+        error = setInputFilesForPipeline(pPipe);
+
+        //WK code commented out
 //        if (pPipe->isCalibrationPipeline() &&
 //            !task->storage()->getEqualityInputOutputProducts())
 //        {
@@ -3339,13 +3341,13 @@ std::pair<unscheduled_reasons, QString> Controller::doPreScheduleChecks(Task *ta
             task->storage()->generateFileList();
         }
 	}
-//    // Check here if the input output locations are the same
-//    // Check added due to #8174
+    // Check here if the input output locations are the same
+    // Check added due to #8174
 //    if (task->isPipeline())
 //    {
 //        // TODO: This is incredibly ugly!!!
 //        Pipeline *pipeline = dynamic_cast<Pipeline *>(task);
-
+//
 //        if (pipeline->isCalibrationPipeline() &&
 //            !task->storage()->getEqualityInputOutputProducts())
 //        {
@@ -3849,7 +3851,7 @@ void Controller::copyTask(unsigned int taskID) {
                 }
                 if (newState == Task::PRESCHEDULED) {
                     std::pair<unscheduled_reasons, QString> errCode(doPreScheduleChecks(newTask));
-                    if ((errCode.first == BEAM_DURATION_DIFFERENT)) {
+                    if (errCode.first == BEAM_DURATION_DIFFERENT) {
                         if (QMessageBox::question(gui, tr("Beam duration different"),
                                                   errCode.second.replace('$','\n') + "\nDo you want to maximize the beam durations (Recommended)",
                                                   QMessageBox::Yes | QMessageBox::No, QMessageBox::Yes) == QMessageBox::Yes) {
@@ -4093,23 +4095,21 @@ bool Controller::doScheduleChecks(Task *pTask) {
 		return false;
 	}
 
+//WK code commented out
 //    if (pTask->isPipeline())
 //    {
 //        // TODO: This is incredibly ugly!!!
 //        Pipeline *pipeline = dynamic_cast<Pipeline *>(pTask);
-
 //        if (pipeline->isCalibrationPipeline() &&
 //            !pTask->storage()->getEqualityInputOutputProducts())
 //        {
 //            QMessageBox::warning(gui,
 //              tr("Error during scheduling")
-//                     ,"Task input and output are different, #8174, LOC3. Retry assigning resources");
+//                     ,tr("Task input and output are different, #8174, LOC3. Retry assigning resources"));
 //            return false;
-
 //        }
 //    }
 
-
 	return true;
 }
 
@@ -4243,7 +4243,7 @@ void Controller::scheduleTask(unsigned taskID, Task::task_status new_status) {
                 gui->updateTask(pTask);
 				return;
 			}
-			if ((errCode.first == BEAM_DURATION_DIFFERENT)) {
+            if (errCode.first == BEAM_DURATION_DIFFERENT) {
 				if (QMessageBox::question(gui, tr("Beam duration different"),
 						errCode.second.replace('$','\n') + "\nDo you still want to set the task to PRESCHEDULED?",
 						QMessageBox::Yes | QMessageBox::No, QMessageBox::Yes) == QMessageBox::No) {
@@ -4390,7 +4390,7 @@ void Controller::scheduleSelectedTasks(Task::task_status new_status) {
 				if (errCode.first == NO_ERROR) {
 					apply_prescheduled = true;
 				}
-				else if ((errCode.first == BEAM_DURATION_DIFFERENT)) {
+                else if (errCode.first == BEAM_DURATION_DIFFERENT) {
 					if (apply_all_beam_duration_diffs) apply_prescheduled = true;
 					else {
 						int choice(QMessageBox::question(gui, tr("Beam duration different"),
@@ -4943,10 +4943,12 @@ bool Controller::assignStorageToTask(Task *pTask) {
                 }
 
                 if (dpit->second == -1) {  // the bandwidth required for a single file of this dataproduct exceeds the single storage node network bandwidth
-                    pTask->setConflict(CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
-                    itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
-                    bResult = false;
-                    break;
+                    if (pTask->getOutputDataproductCluster() != "CEP4") {
+                        pTask->setConflict(CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
+                        itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
+                        bResult = false;
+                        break;
+                    }
                 }
                 // get number of available nodes
                 unsigned nrOfAvailableNodes(Controller::theSchedulerSettings.getNrOfStorageNodesAvailable());
@@ -4967,41 +4969,45 @@ bool Controller::assignStorageToTask(Task *pTask) {
                     nrFilesPerNode = minNrFilesPerNode;
                     singleFileBW = dfit->second.first / (double) pTask->getDuration().totalSeconds() * 8; // kbit/sec
 
-			// calculate the mininum number of files that have to fit on one storage node
-			if (nrOfAvailableNodes >= minNrOfNodes) {
-				if (minNrFilesPerNode <= maxNrFilesPerNode) {
-					while (!found_sufficient_nodes) {
-						preferred_locations = data.getStorageLocationOptions(dpit->first, pTask->getScheduledStart(), pTask->getScheduledEnd(), dfit->second.first, singleFileBW, nrFilesPerNode, sort_mode, preferred_nodes);
-						if (preferred_locations.size() <= minNrOfNodes) {
-							extra_locations = data.getStorageLocationOptions(dpit->first, pTask->getScheduledStart(), pTask->getScheduledEnd(), dfit->second.first, singleFileBW, nrFilesPerNode, sort_mode, extra_nodes);
-                            if (((preferred_locations.size() + extra_locations.size()) * nrFilesPerNode >= nrFiles) && (preferred_locations.size() + extra_locations.size() >= minNrOfNodes)) {
-								found_sufficient_nodes = true;
-								break;
-							}
-						}
-						else {
-							if (preferred_locations.size() * nrFilesPerNode >= nrFiles) {
-								found_sufficient_nodes = true;
-								break;
-							}
-						}
-						if (++nrFilesPerNode > maxNrFilesPerNode) { // nr files per node too high will exceed bandwidth to node
-							break;
-						}
-					}
+                    // calculate the mininum number of files that have to fit on one storage node
+                    if (nrOfAvailableNodes >= minNrOfNodes) {
+                        if (minNrFilesPerNode <= maxNrFilesPerNode) {
+                            while (!found_sufficient_nodes) {
+                                preferred_locations = data.getStorageLocationOptions(dpit->first, pTask->getScheduledStart(), pTask->getScheduledEnd(), dfit->second.first, singleFileBW, nrFilesPerNode, sort_mode, preferred_nodes);
+                                if (preferred_locations.size() <= minNrOfNodes) {
+                                    extra_locations = data.getStorageLocationOptions(dpit->first, pTask->getScheduledStart(), pTask->getScheduledEnd(), dfit->second.first, singleFileBW, nrFilesPerNode, sort_mode, extra_nodes);
+                                    if (((preferred_locations.size() + extra_locations.size()) * nrFilesPerNode >= nrFiles) && (preferred_locations.size() + extra_locations.size() >= minNrOfNodes)) {
+                                        found_sufficient_nodes = true;
+                                        break;
+                                    }
+                                }
+                                else {
+                                    if (preferred_locations.size() * nrFilesPerNode >= nrFiles) {
+                                        found_sufficient_nodes = true;
+                                        break;
+                                    }
+                                }
+                                if (++nrFilesPerNode > maxNrFilesPerNode) { // nr files per node too high will exceed bandwidth to node
+                                    break;
+                                }
+                            }
                             if (!found_sufficient_nodes) {
-                                taskStorage->setStorageCheckResult(data.getLastStorageCheckResult());
-                                pTask->setConflict(CONFLICT_STORAGE_NO_OPTIONS);
-                                itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_NO_OPTIONS);
-                                bResult = false;
-                                break;
+                                if (pTask->getOutputDataproductCluster() != "CEP4") {
+                                    taskStorage->setStorageCheckResult(data.getLastStorageCheckResult());
+                                    pTask->setConflict(CONFLICT_STORAGE_NO_OPTIONS);
+                                    itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_NO_OPTIONS);
+                                    bResult = false;
+                                    break;
+                                }
                             }
                         }
                         else {
-                            pTask->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
-                            itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
-                            bResult = false;
-                            break;
+                            if (pTask->getOutputDataproductCluster() != "CEP4") {
+                                pTask->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
+                                itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
+                                bResult = false;
+                                break;
+                            }
                         }
 
                         if (bResult) {
@@ -5097,10 +5103,12 @@ bool Controller::assignStorageToTask(Task *pTask) {
                         }
                     }
                     else {
-                        pTask->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
-                        itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
-                        bResult = false;
-                        break;
+                        if (pTask->getOutputDataproductCluster() != "CEP4") {
+                            pTask->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
+                            itsConflictDialog->addStorageConflict(pTask, dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
+                            bResult = false;
+                            break;
+                        }
                     }
                 }
             }
@@ -5110,15 +5118,15 @@ bool Controller::assignStorageToTask(Task *pTask) {
 }
 
 bool Controller::assignGroupedStorage(void) { // not for manual assignment of storage
-	bool bResult(true);
-	std::map<unsigned, std::vector<Task *> > groupedTasks = data.getGroupedTasks(Task::PRESCHEDULED);
+    bool bResult(true);
+    std::map<unsigned, std::vector<Task *> > groupedTasks = data.getGroupedTasks(Task::PRESCHEDULED);
 
-	std::vector<unsigned> emptyGroups;
-	if (!groupedTasks.empty()) {
-		std::vector<Task *> subGroupTasks;
+    std::vector<unsigned> emptyGroups;
+    if (!groupedTasks.empty()) {
+        std::vector<Task *> subGroupTasks;
         TaskStorage *tStorage;
-		for (std::map<unsigned, std::vector<Task *> >::iterator groupIt = groupedTasks.begin(); groupIt != groupedTasks.end(); ++groupIt) {
-			for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) {
+        for (std::map<unsigned, std::vector<Task *> >::iterator groupIt = groupedTasks.begin(); groupIt != groupedTasks.end(); ++groupIt) {
+            for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) {
                 if ((*taskit)->hasStorage()) {
                     tStorage = (*taskit)->storage();
                     if (tStorage->getStorageSelectionMode() != STORAGE_MODE_MANUAL) { // don't assign grouped storage to tasks that have manual storage assignment
@@ -5130,454 +5138,464 @@ bool Controller::assignGroupedStorage(void) { // not for manual assignment of st
                     }
                 }
             }
-			if (subGroupTasks.empty()) {
-				emptyGroups.push_back(groupIt->first);
-			}
-			else {
-				groupIt->second = subGroupTasks; // removes all 'manual storage' tasks
-				subGroupTasks.clear();
-			}
-		}
-		// remove groups that have no tasks with automatic storage assignment left
-		for (std::vector<unsigned>::const_iterator eit = emptyGroups.begin(); eit != emptyGroups.end(); ++eit) {
-			groupedTasks.erase(*eit);
-		}
-		if (groupedTasks.empty()) return bResult; // if nothing left return
-
-	// get number of available storage nodes
-	unsigned nrOfAvailableNodes(Controller::theSchedulerSettings.getNrOfStorageNodesAvailable());
-	if (nrOfAvailableNodes > 0) {
-		sortMode sort_mode;
-
-		// the distribution algorithm used
-		const storageNodeDistribution &distribution(theSchedulerSettings.getStorageDistribution());
-		if (distribution == STORAGE_DISTRIBUTION_FLAT_USAGE) {
-			sort_mode = SORT_USAGE;
-		}
-		else if (distribution == STORAGE_DISTIBUTION_LEAST_FRAGMENTED) {
-			sort_mode = SORT_SIZE;
-		}
-		else sort_mode = SORT_NONE;
-
-		const preferredDataProductStorageMap &pdps(theSchedulerSettings.getPreferredDataProductStorage());
-		const preferredProjectStorageMap &pps(theSchedulerSettings.getPreferredProjectStorage());
-		std::vector<int> extra_nodes, usable_storage_nodes(itsDMConnection->getUsableStorageNodeIDs()); // all existing storage node IDs
+            if (subGroupTasks.empty()) {
+                emptyGroups.push_back(groupIt->first);
+            }
+            else {
+                groupIt->second = subGroupTasks; // removes all 'manual storage' tasks
+                subGroupTasks.clear();
+            }
+        }
+        // remove groups that have no tasks with automatic storage assignment left
+        for (std::vector<unsigned>::const_iterator eit = emptyGroups.begin(); eit != emptyGroups.end(); ++eit) {
+            groupedTasks.erase(*eit);
+        }
+        if (groupedTasks.empty()) return bResult; // if nothing left return
 
+        // get number of available storage nodes
+        unsigned nrOfAvailableNodes(Controller::theSchedulerSettings.getNrOfStorageNodesAvailable());
+        if (nrOfAvailableNodes > 0) {
+            sortMode sort_mode;
 
-	//determine grouped tasks combined storage needs and settings
-	dataFileMap combinedOutput;
-    storage_selection_mode mode;
-	std::map<dataProductTypes, int> combinedMinimumNrNodes;
-	std::map<dataProductTypes, double> maxSingleFileBW;
-	std::map<dataProductTypes, double>::iterator bwit;
-	dataFileMap::iterator dfit;
-	std::map<dataProductTypes, int>::iterator mit;
-	int project_id(0);
-	std::pair<unscheduled_reasons, QString> error;
-	for (std::map<unsigned, std::vector<Task *> >::const_iterator groupIt = groupedTasks.begin(); groupIt != groupedTasks.end(); ++groupIt) { // 2
-		combinedOutput.clear();
-		combinedMinimumNrNodes.clear();
-		if (!groupIt->second.empty()) {
-            mode = groupIt->second.front()->storage()->getStorageSelectionMode();
-			project_id = theSchedulerSettings.getCampaignInfo(groupIt->second.front()->getProjectName()).id; // should be the same for all tasks within group (no check done)
-			for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { // 3
-                TaskStorage *task_storage((*taskit)->storage());
-				if ((*taskit)->isPipeline()) {
-                    Pipeline *pipe(static_cast<Pipeline *>(*taskit));
-                    error = setInputFilesForPipeline(pipe);
-					if (error.first == NO_ERROR) {
-                        pipe->calculateDataFiles();
-                        const storageMap &inputStorageLocations(task_storage->getInputStorageLocations());
-						storageMap::const_iterator inpcorit = inputStorageLocations.find(DP_CORRELATED_UV);
-						if (inpcorit != inputStorageLocations.end()) {
-                            if (task_storage->isOutputDataProduktEnabled(DP_INSTRUMENT_MODEL)) { // set output storage nodes equal to input storage nodes for these type of data products
-                                std::vector<storageResult> result = data.addStorageToTask(pipe, DP_INSTRUMENT_MODEL, inpcorit->second, false);
-								if (!result.empty()) {
-									for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
-										if (sit->conflict != CONFLICT_NO_CONFLICT) {
-                                            itsConflictDialog->addStorageConflict(pipe, sit->dataProductType, sit->conflict);
-										}
-                                        pipe->setConflict(sit->conflict);
-									}
-									bResult = false;
-								}
-							}
-                            if (task_storage->isOutputDataProduktEnabled(DP_CORRELATED_UV)) {
-
-								// inpcorit->second bevat alle storage nodes, dus ook die van de eerdere SAPs waardoor
-								// dit dataprodukt de storage nodes van SAP000 van de input krijgt en niet die van SAP001
-                                std::vector<storageResult> result = data.addStorageToTask(pipe, DP_CORRELATED_UV, inpcorit->second, false);
-								if (!result.empty()) {
-									for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
-										if (sit->conflict != CONFLICT_NO_CONFLICT) {
-                                            itsConflictDialog->addStorageConflict(pipe, sit->dataProductType, sit->conflict);
-										}
-                                        pipe->setConflict(sit->conflict);
-									}
-									bResult = false;
-								}
-							}
+            // the distribution algorithm used
+            const storageNodeDistribution &distribution(theSchedulerSettings.getStorageDistribution());
+            if (distribution == STORAGE_DISTRIBUTION_FLAT_USAGE) {
+                sort_mode = SORT_USAGE;
+            }
+            else if (distribution == STORAGE_DISTIBUTION_LEAST_FRAGMENTED) {
+                sort_mode = SORT_SIZE;
+            }
+            else sort_mode = SORT_NONE;
 
-                            task_storage->generateFileList();
-						}
-					}
-				}
+            const preferredDataProductStorageMap &pdps(theSchedulerSettings.getPreferredDataProductStorage());
+            const preferredProjectStorageMap &pps(theSchedulerSettings.getPreferredProjectStorage());
+            std::vector<int> extra_nodes, usable_storage_nodes(itsDMConnection->getUsableStorageNodeIDs()); // all existing storage node IDs
 
-				double taskDuration((*taskit)->getDuration().totalSeconds() * 8);
-                const dataFileMap &dataFileSizes = task_storage->getOutputFileSizes();
-                if (task_storage->getStorageSelectionMode() != mode) {
-					itsConflictDialog->addConflict(*taskit, CONFLICT_GROUP_STORAGE_MODE_DIFFERENT);
-					bResult = false;
-				}
-				if (!dataFileSizes.empty()) {
-					// summing individual file size for same data product types
-					for (dataFileMap::const_iterator dit = dataFileSizes.begin(); dit != dataFileSizes.end(); ++dit) { // 4
-						if ((*taskit)->isObservation() || ((dit->first != DP_INSTRUMENT_MODEL) && (dit->first != DP_CORRELATED_UV))) { // skip this for instrument model and correlated, which is already set above
-							dfit = combinedOutput.find(dit->first);
-							if (dfit != combinedOutput.end()) {
-								dfit->second.first += dit->second.first;
 
-							}
-							else {
-								combinedOutput[dit->first] = std::pair<double, unsigned>(dit->second.first, dit->second.second);
-							}
-							//determine the maximum single file bandwidth for each data product type in the group of tasks
-							double currentBW = dit->second.first / taskDuration; // kbit/sec
-							bwit = maxSingleFileBW.find(dit->first);
-							if (bwit != maxSingleFileBW.end()) {
-								bwit->second = std::max(bwit->second, currentBW);
-							}
-							else {
-                                maxSingleFileBW[dit->first] = currentBW;
-							}
-						}
-					}
-					// determining the overall minimum number of nodes needed for each data product type
-                    std::map<dataProductTypes, int> minimumNrnodes = task_storage->getMinimumNrOfStorageNodes();
-					for (std::map<dataProductTypes, int>::const_iterator minit = minimumNrnodes.begin(); minit != minimumNrnodes.end(); ++minit) {
-						if ((*taskit)->isObservation() || ((minit->first != DP_INSTRUMENT_MODEL) && (minit->first != DP_CORRELATED_UV))) { // skip for instrument model and correlated,they have been set
-							if (minit->second == -1) {
-								(*taskit)->setConflict(CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
-								itsConflictDialog->addStorageConflict((*taskit), minit->first, CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
-								bResult = false;
-							}
-							else {
-								mit = combinedMinimumNrNodes.find(minit->first);
-								if (mit != combinedMinimumNrNodes.end()) {
-									mit->second = std::max(mit->second, minit->second);
-								}
-								else {
-									combinedMinimumNrNodes[minit->first] = minit->second;
-								}
-							}
-						}
-					}
-				}
-				else {
-					itsConflictDialog->addConflict(*taskit, CONFLICT_STORAGE_NO_DATA);
-					bResult = false;
-				}
-			} // END 3
-		}
-		else {
-			std::cout << "Controller::assignGroupedStorage: Warning: trying to assign storage to group:" << groupIt->first << " in which there are no tasks" << std::endl;
-			continue;
-		}
+            //determine grouped tasks combined storage needs and settings
+            dataFileMap combinedOutput;
+            storage_selection_mode mode;
+            std::map<dataProductTypes, int> combinedMinimumNrNodes;
+            std::map<dataProductTypes, double> maxSingleFileBW;
+            std::map<dataProductTypes, double>::iterator bwit;
+            dataFileMap::iterator dfit;
+            std::map<dataProductTypes, int>::iterator mit;
+            int project_id(0);
+            std::pair<unscheduled_reasons, QString> error;
+            for (std::map<unsigned, std::vector<Task *> >::const_iterator groupIt = groupedTasks.begin(); groupIt != groupedTasks.end(); ++groupIt) { // 2
+                combinedOutput.clear();
+                combinedMinimumNrNodes.clear();
+                if (!groupIt->second.empty()) {
+                    mode = groupIt->second.front()->storage()->getStorageSelectionMode();
+                    project_id = theSchedulerSettings.getCampaignInfo(groupIt->second.front()->getProjectName()).id; // should be the same for all tasks within group (no check done)
+                    for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { // 3
+                        TaskStorage *task_storage((*taskit)->storage());
+                        if ((*taskit)->isPipeline()) {
+                            Pipeline *pipe(static_cast<Pipeline *>(*taskit));
+                            error = setInputFilesForPipeline(pipe);
+                            if (error.first == NO_ERROR) {
+                                pipe->calculateDataFiles();
+                                const storageMap &inputStorageLocations(task_storage->getInputStorageLocations());
+                                storageMap::const_iterator inpcorit = inputStorageLocations.find(DP_CORRELATED_UV);
+                                if (inpcorit != inputStorageLocations.end()) {
+                                    if (task_storage->isOutputDataProduktEnabled(DP_INSTRUMENT_MODEL)) { // set output storage nodes equal to input storage nodes for these type of data products
+                                        std::vector<storageResult> result = data.addStorageToTask(pipe, DP_INSTRUMENT_MODEL, inpcorit->second, false);
+                                        if (!result.empty()) {
+                                            for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
+                                                if (sit->conflict != CONFLICT_NO_CONFLICT) {
+                                                    itsConflictDialog->addStorageConflict(pipe, sit->dataProductType, sit->conflict);
+                                                }
+                                                pipe->setConflict(sit->conflict);
+                                            }
+                                            bResult = false;
+                                        }
+                                    }
+                                    if (task_storage->isOutputDataProduktEnabled(DP_CORRELATED_UV)) {
+
+                                        // inpcorit->second bevat alle storage nodes, dus ook die van de eerdere SAPs waardoor
+                                        // dit dataprodukt de storage nodes van SAP000 van de input krijgt en niet die van SAP001
+                                        std::vector<storageResult> result = data.addStorageToTask(pipe, DP_CORRELATED_UV, inpcorit->second, false);
+                                        if (!result.empty()) {
+                                            for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
+                                                if (sit->conflict != CONFLICT_NO_CONFLICT) {
+                                                    itsConflictDialog->addStorageConflict(pipe, sit->dataProductType, sit->conflict);
+                                                }
+                                                pipe->setConflict(sit->conflict);
+                                            }
+                                            bResult = false;
+                                        }
+                                    }
 
-	if (bResult) { // only do the actual group storage assignment if no conflicts are found
+                                    task_storage->generateFileList();
+                                }
+                            }
+                        }
 
-	if (!combinedOutput.empty()) {
+                        double taskDuration((*taskit)->getDuration().totalSeconds() * 8);
+                        const dataFileMap &dataFileSizes = task_storage->getOutputFileSizes();
+                        if (task_storage->getStorageSelectionMode() != mode) {
+                            itsConflictDialog->addConflict(*taskit, CONFLICT_GROUP_STORAGE_MODE_DIFFERENT);
+                            bResult = false;
+                        }
+                        if (!dataFileSizes.empty()) {
+                            // summing individual file size for same data product types
+                            for (dataFileMap::const_iterator dit = dataFileSizes.begin(); dit != dataFileSizes.end(); ++dit) { // 4
+                                if ((*taskit)->isObservation() || ((dit->first != DP_INSTRUMENT_MODEL) && (dit->first != DP_CORRELATED_UV))) { // skip this for instrument model and correlated, which is already set above
+                                    dfit = combinedOutput.find(dit->first);
+                                    if (dfit != combinedOutput.end()) {
+                                        dfit->second.first += dit->second.first;
 
-		// storage node selection by preferred project nodes?
-		//	storage_selection_mode mode(pTask->getStorageSelectionMode());
-		bool project_preferred_nodes;
-		if ((mode == STORAGE_MODE_MAXIMUM_PROJECT_PREFERRED) || (mode == STORAGE_MODE_MINIMUM_PROJECT_PREFERRED)) project_preferred_nodes = true;
-		else project_preferred_nodes = false;
+                                    }
+                                    else {
+                                        combinedOutput[dit->first] = std::pair<double, unsigned>(dit->second.first, dit->second.second);
+                                    }
+                                    //determine the maximum single file bandwidth for each data product type in the group of tasks
+                                    double currentBW = dit->second.first / taskDuration; // kbit/sec
+                                    bwit = maxSingleFileBW.find(dit->first);
+                                    if (bwit != maxSingleFileBW.end()) {
+                                        bwit->second = std::max(bwit->second, currentBW);
+                                    }
+                                    else {
+                                        maxSingleFileBW[dit->first] = currentBW;
+                                    }
+                                }
+                            }
+                            // determining the overall minimum number of nodes needed for each data product type
+                            std::map<dataProductTypes, int> minimumNrnodes = task_storage->getMinimumNrOfStorageNodes();
+                            for (std::map<dataProductTypes, int>::const_iterator minit = minimumNrnodes.begin(); minit != minimumNrnodes.end(); ++minit) {
+                                if ((*taskit)->isObservation() || ((minit->first != DP_INSTRUMENT_MODEL) && (minit->first != DP_CORRELATED_UV))) { // skip for instrument model and correlated,they have been set
+                                    if (minit->second == -1) {
+                                        if ((*taskit)->getOutputDataproductCluster() != "CEP4") {
+                                            (*taskit)->setConflict(CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
+                                            itsConflictDialog->addStorageConflict((*taskit), minit->first, CONFLICT_STORAGE_SINGLE_FILE_BW_TOO_HIGH);
+                                            bResult = false;
+                                        }
+                                    }
+                                    else {
+                                        mit = combinedMinimumNrNodes.find(minit->first);
+                                        if (mit != combinedMinimumNrNodes.end()) {
+                                            mit->second = std::max(mit->second, minit->second);
+                                        }
+                                        else {
+                                            combinedMinimumNrNodes[minit->first] = minit->second;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                        else {
+                            itsConflictDialog->addConflict(*taskit, CONFLICT_STORAGE_NO_DATA);
+                            bResult = false;
+                        }
+                    } // END 3
+                }
+                else {
+                    std::cout << "Controller::assignGroupedStorage: Warning: trying to assign storage to group:" << groupIt->first << " in which there are no tasks" << std::endl;
+                    continue;
+                }
 
+                if (bResult) { // only do the actual group storage assignment if no conflicts are found
 
-//	double singleFileBW;
-	unsigned nrFiles(0), minNrOfNodes(0), nrFilesPerNode(0);
+                    if (!combinedOutput.empty()) {
 
-	std::vector<int> preferred_nodes;// = emptyVec;
+                        // storage node selection by preferred project nodes?
+                        //	storage_selection_mode mode(pTask->getStorageSelectionMode());
+                        bool project_preferred_nodes;
+                        if ((mode == STORAGE_MODE_MAXIMUM_PROJECT_PREFERRED) || (mode == STORAGE_MODE_MINIMUM_PROJECT_PREFERRED)) project_preferred_nodes = true;
+                        else project_preferred_nodes = false;
 
 
-	// ***********************************************************************
-	// ******************* SEARCH FOR SUITABLE LOCATIONS *********************
-	// ***********************************************************************
+                        //	double singleFileBW;
+                        unsigned nrFiles(0), minNrOfNodes(0), nrFilesPerNode(0);
 
-	// STEP2: search storageLocations for all dataProducts in sequence according to individual file size (large to small) of the dataProduct (i.e. sequence of sortedDataFiles)
+                        std::vector<int> preferred_nodes;// = emptyVec;
 
-	preferredDataProductStorageMap::const_iterator pnit;
-	preferredProjectStorageMap::const_iterator ppit;
 
-	if (project_preferred_nodes) {
-		ppit = pps.find(project_id);
-		if (ppit != pps.end()) {
-			if (ppit->second.empty()) {
-				preferred_nodes = usable_storage_nodes;
-			}
-			else {
-				preferred_nodes = ppit->second;
-				for (std::vector<int>::const_iterator asit = usable_storage_nodes.begin(); asit != usable_storage_nodes.end(); ++asit) {
-					if (std::find(preferred_nodes.begin(), preferred_nodes.end(), *asit) == preferred_nodes.end()) {
-						extra_nodes.push_back(*asit);
-					}
-				}
-			}
-		}
-		else {
-			preferred_nodes = usable_storage_nodes;
-		}
-	}
+                        // ***********************************************************************
+                        // ******************* SEARCH FOR SUITABLE LOCATIONS *********************
+                        // ***********************************************************************
 
-	storageLocationOptions preferred_locations, extra_locations, common_pref_locations, common_extra_locations;
-	for (std::map<dataProductTypes, int>::const_iterator dpit = combinedMinimumNrNodes.begin(); dpit != combinedMinimumNrNodes.end(); ++dpit) { // 5
-		for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { //6
+                        // STEP2: search storageLocations for all dataProducts in sequence according to individual file size (large to small) of the dataProduct (i.e. sequence of sortedDataFiles)
 
-		if (!project_preferred_nodes) {
-			pnit = pdps.find(dpit->first); // are there preferred storage nodes for this data product type specified?
-			if (pnit != pdps.end()) {
-				if (pnit->second.empty()) {
-					preferred_nodes = usable_storage_nodes;
-				}
-				else {
-					preferred_nodes = pnit->second;
-					for (std::vector<int>::const_iterator asit = usable_storage_nodes.begin(); asit != usable_storage_nodes.end(); ++asit) {
-						if (std::find(preferred_nodes.begin(), preferred_nodes.end(), *asit) == preferred_nodes.end()) {
-							extra_nodes.push_back(*asit);
-						}
-					}
-				}
-			}
-			else {
-				preferred_nodes = usable_storage_nodes;
-			}
-		}
+                        preferredDataProductStorageMap::const_iterator pnit;
+                        preferredProjectStorageMap::const_iterator ppit;
 
-		// now search for storage locations using the combined storage requirements (only search ones, not for all tasks separately,
-		// final check will be done when really assigning the storage to each grouped task
-		minNrOfNodes = dpit->second; // the minimum number of storage nodes REQUIRED for this data product
-		dfit = combinedOutput.find(dpit->first);
-		bwit = maxSingleFileBW.find(dpit->first);
-		if (dfit != combinedOutput.end()) {
-			nrFiles = dfit->second.second;
-			unsigned minNrFilesPerNode = static_cast<unsigned>(ceil((float)nrFiles / nrOfAvailableNodes)); // the minimum number of files the selected nodes should be able to hold
-			unsigned maxNrFilesPerNode = static_cast<unsigned>(floor((float)nrFiles / minNrOfNodes)); // the maximum number of files that can be written to one storage node according to bandwidth limitations
-			nrFilesPerNode = minNrFilesPerNode;
+                        if (project_preferred_nodes) {
+                            ppit = pps.find(project_id);
+                            if (ppit != pps.end()) {
+                                if (ppit->second.empty()) {
+                                    preferred_nodes = usable_storage_nodes;
+                                }
+                                else {
+                                    preferred_nodes = ppit->second;
+                                    for (std::vector<int>::const_iterator asit = usable_storage_nodes.begin(); asit != usable_storage_nodes.end(); ++asit) {
+                                        if (std::find(preferred_nodes.begin(), preferred_nodes.end(), *asit) == preferred_nodes.end()) {
+                                            extra_nodes.push_back(*asit);
+                                        }
+                                    }
+                                }
+                            }
+                            else {
+                                preferred_nodes = usable_storage_nodes;
+                            }
+                        }
 
-			// calculate the minimum number of files that have to fit on one storage node
-			if (nrOfAvailableNodes >= minNrOfNodes) {
-				if (minNrFilesPerNode <= maxNrFilesPerNode) {
-					preferred_locations = data.getStorageLocationOptions(dpit->first, (*taskit)->getScheduledStart(), (*taskit)->getScheduledEnd(), dfit->second.first, bwit->second, nrFilesPerNode, sort_mode, preferred_nodes);
-					extra_locations = data.getStorageLocationOptions(dpit->first, (*taskit)->getScheduledStart(), (*taskit)->getScheduledEnd(), dfit->second.first, bwit->second, nrFilesPerNode, sort_mode, extra_nodes);
+                        storageLocationOptions preferred_locations, extra_locations, common_pref_locations, common_extra_locations;
+                        for (std::map<dataProductTypes, int>::const_iterator dpit = combinedMinimumNrNodes.begin(); dpit != combinedMinimumNrNodes.end(); ++dpit) { // 5
+                            for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { //6
 
-                    if (preferred_locations.size() + extra_locations.size() >= minNrOfNodes) {
+                                if (!project_preferred_nodes) {
+                                    pnit = pdps.find(dpit->first); // are there preferred storage nodes for this data product type specified?
+                                    if (pnit != pdps.end()) {
+                                        if (pnit->second.empty()) {
+                                            preferred_nodes = usable_storage_nodes;
+                                        }
+                                        else {
+                                            preferred_nodes = pnit->second;
+                                            for (std::vector<int>::const_iterator asit = usable_storage_nodes.begin(); asit != usable_storage_nodes.end(); ++asit) {
+                                                if (std::find(preferred_nodes.begin(), preferred_nodes.end(), *asit) == preferred_nodes.end()) {
+                                                    extra_nodes.push_back(*asit);
+                                                }
+                                            }
+                                        }
+                                    }
+                                    else {
+                                        preferred_nodes = usable_storage_nodes;
+                                    }
+                                }
 
-					preferred_nodes.clear();
-					extra_nodes.clear();
-					for (storageLocationOptions::const_iterator cpit = preferred_locations.begin(); cpit != preferred_locations.end(); ++cpit) {
-						preferred_nodes.push_back(cpit->first);
-					}
-					for (storageLocationOptions::const_iterator cpit = extra_locations.begin(); cpit != extra_locations.end(); ++cpit) {
-						extra_nodes.push_back(cpit->first);
-					}
+                                // now search for storage locations using the combined storage requirements (only search ones, not for all tasks separately,
+                                // final check will be done when really assigning the storage to each grouped task
+                                minNrOfNodes = dpit->second; // the minimum number of storage nodes REQUIRED for this data product
+                                dfit = combinedOutput.find(dpit->first);
+                                bwit = maxSingleFileBW.find(dpit->first);
+                                if (dfit != combinedOutput.end()) {
+                                    nrFiles = dfit->second.second;
+                                    unsigned minNrFilesPerNode = static_cast<unsigned>(ceil((float)nrFiles / nrOfAvailableNodes)); // the minimum number of files the selected nodes should be able to hold
+                                    unsigned maxNrFilesPerNode = static_cast<unsigned>(floor((float)nrFiles / minNrOfNodes)); // the maximum number of files that can be written to one storage node according to bandwidth limitations
+                                    nrFilesPerNode = minNrFilesPerNode;
+
+                                    // calculate the minimum number of files that have to fit on one storage node
+                                    if (nrOfAvailableNodes >= minNrOfNodes) {
+                                        if (minNrFilesPerNode <= maxNrFilesPerNode) {
+                                            preferred_locations = data.getStorageLocationOptions(dpit->first, (*taskit)->getScheduledStart(), (*taskit)->getScheduledEnd(), dfit->second.first, bwit->second, nrFilesPerNode, sort_mode, preferred_nodes);
+                                            extra_locations = data.getStorageLocationOptions(dpit->first, (*taskit)->getScheduledStart(), (*taskit)->getScheduledEnd(), dfit->second.first, bwit->second, nrFilesPerNode, sort_mode, extra_nodes);
+
+                                            if (preferred_locations.size() + extra_locations.size() >= minNrOfNodes) {
+
+                                                preferred_nodes.clear();
+                                                extra_nodes.clear();
+                                                for (storageLocationOptions::const_iterator cpit = preferred_locations.begin(); cpit != preferred_locations.end(); ++cpit) {
+                                                    preferred_nodes.push_back(cpit->first);
+                                                }
+                                                for (storageLocationOptions::const_iterator cpit = extra_locations.begin(); cpit != extra_locations.end(); ++cpit) {
+                                                    extra_nodes.push_back(cpit->first);
+                                                }
 
-					if (preferred_nodes.empty() && extra_nodes.empty()) {
-						(*taskit)->setConflict(CONFLICT_STORAGE_NO_OPTIONS);
-						itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_NO_OPTIONS);
-						bResult = false;
-						break;
-					}
+                                                if (preferred_nodes.empty() && extra_nodes.empty()) {
+                                                    (*taskit)->setConflict(CONFLICT_STORAGE_NO_OPTIONS);
+                                                    itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_NO_OPTIONS);
+                                                    bResult = false;
+                                                    break;
+                                                }
 
-					if (nrFilesPerNode > maxNrFilesPerNode) { // nr files per node too high will exceed bandwidth to node
-						(*taskit)->setConflict(CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
-						itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
-						bResult = false;
-						break;
-					}
-                }
-                    else {
-                        (*taskit)->setConflict(CONFLICT_STORAGE_MINIMUM_NODES);
-                        itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_MINIMUM_NODES);
-                        bResult = false;
-                        break;
-                    }
-				}
-				else {
-					(*taskit)->setConflict(CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
-					itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
-					bResult = false;
-					break;
-				}
-			}
-			else {
-				(*taskit)->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
-				itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
-				bResult = false;
-				break;
-			}
-		}
+                                                if (nrFilesPerNode > maxNrFilesPerNode) { // nr files per node too high will exceed bandwidth to node
+                                                    if ((*taskit)->getOutputDataproductCluster() != "CEP4") {
+                                                        (*taskit)->setConflict(CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
+                                                        itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
+                                                        bResult = false;
+                                                        break;
+                                                    }
+                                                }
+                                            }
+                                            else {
+                                                if ((*taskit)->getOutputDataproductCluster() != "CEP4") {
+                                                    (*taskit)->setConflict(CONFLICT_STORAGE_MINIMUM_NODES);
+                                                    itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_MINIMUM_NODES);
+                                                    bResult = false;
+                                                    break;
+                                                }
+                                            }
+                                        }
+                                        else {
+                                            if ((*taskit)->getOutputDataproductCluster() != "CEP4") {
+                                                (*taskit)->setConflict(CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
+                                                itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_EXCEEDS_BANDWIDTH);
+                                                bResult = false;
+                                                break;
+                                            }
+                                        }
+                                    }
+                                    else {
+                                        if ((*taskit)->getOutputDataproductCluster() != "CEP4") {
+                                            (*taskit)->setConflict(CONFLICT_STORAGE_TOO_FEW_NODES);
+                                            itsConflictDialog->addStorageConflict((*taskit), dpit->first, CONFLICT_STORAGE_TOO_FEW_NODES);
+                                            bResult = false;
+                                            break;
+                                        }
+                                    }
+                                }
 
 
-	if (bResult) {
-		// check which nodes they have in common, assign common nodes with checking!
-		// ***********************************************************************
-		// ************* DISTRIBUTION OF DATA OVER STORAGE NODES *****************
-		// ***********************************************************************
-		// use the maximum number of suitable storage nodes (= suitable_locations.size())
-		// only keep the locations that are common to all tasks for this data product
-		if (taskit == groupIt->second.begin()) { // for first task search, keep all options
-			common_pref_locations = preferred_locations;
-			common_extra_locations = extra_locations;
-		}
-		else {
-			storageLocationOptions new_common_pref_locations, new_common_extra_locations;
-			for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) {
-				for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
-					if (storageLocationsContains(preferred_locations, sit->first, nsit->raidID)) {
-						new_common_pref_locations.push_back(*sit);
-					}
-				}
-			}
-			common_pref_locations = new_common_pref_locations;
-			for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
-				for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
-					if (storageLocationsContains(extra_locations, sit->first, nsit->raidID)) {
-						new_common_extra_locations.push_back(*sit);
-					}
-				}
-			}
-			common_extra_locations = new_common_extra_locations;
-		}
-		}
-	} // 6, end of search for all task in this group for the current data product
+                                if (bResult) {
+                                    // check which nodes they have in common, assign common nodes with checking!
+                                    // ***********************************************************************
+                                    // ************* DISTRIBUTION OF DATA OVER STORAGE NODES *****************
+                                    // ***********************************************************************
+                                    // use the maximum number of suitable storage nodes (= suitable_locations.size())
+                                    // only keep the locations that are common to all tasks for this data product
+                                    if (taskit == groupIt->second.begin()) { // for first task search, keep all options
+                                        common_pref_locations = preferred_locations;
+                                        common_extra_locations = extra_locations;
+                                    }
+                                    else {
+                                        storageLocationOptions new_common_pref_locations, new_common_extra_locations;
+                                        for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) {
+                                            for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
+                                                if (storageLocationsContains(preferred_locations, sit->first, nsit->raidID)) {
+                                                    new_common_pref_locations.push_back(*sit);
+                                                }
+                                            }
+                                        }
+                                        common_pref_locations = new_common_pref_locations;
+                                        for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
+                                            for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
+                                                if (storageLocationsContains(extra_locations, sit->first, nsit->raidID)) {
+                                                    new_common_extra_locations.push_back(*sit);
+                                                }
+                                            }
+                                        }
+                                        common_extra_locations = new_common_extra_locations;
+                                    }
+                                }
+                            } // 6, end of search for all task in this group for the current data product
 
 
-		unsigned maxFilesToNodes(MAX_UNSIGNED);
-		storageVector locations;
+                            unsigned maxFilesToNodes(MAX_UNSIGNED);
+                            storageVector locations;
 
-		bool sufficient_locations(false);
-		// select enough locations from the common location solutions
-		if ((mode == STORAGE_MODE_MAXIMUM_DATA_TYPE_PREFERRED) || (mode == STORAGE_MODE_MAXIMUM_PROJECT_PREFERRED)) {
-			for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) { // iterate over available locations
-				for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
-					locations.push_back(storageVector::value_type(sit->first, nsit->raidID)); // use only the first raid option of each storage node available
-					// also determine the maximum number of files that can be written to a single suitable node, needed to determine the minimum number of extra (non-preferred) nodes needed in addition to the preferred nodes
-					maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
-					if (locations.size() * nrFilesPerNode >= nrFiles) {
-						sufficient_locations = true;
-//						break; // don't assign more storage nodes than the number of files written
-					}
+                            bool sufficient_locations(false);
+                            // select enough locations from the common location solutions
+                            if ((mode == STORAGE_MODE_MAXIMUM_DATA_TYPE_PREFERRED) || (mode == STORAGE_MODE_MAXIMUM_PROJECT_PREFERRED)) {
+                                for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) { // iterate over available locations
+                                    for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) { // iterates over the raids
+                                        locations.push_back(storageVector::value_type(sit->first, nsit->raidID)); // use only the first raid option of each storage node available
+                                        // also determine the maximum number of files that can be written to a single suitable node, needed to determine the minimum number of extra (non-preferred) nodes needed in addition to the preferred nodes
+                                        maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
+                                        if (locations.size() * nrFilesPerNode >= nrFiles) {
+                                            sufficient_locations = true;
+                                            //						break; // don't assign more storage nodes than the number of files written
+                                        }
 
-				}
-//				if (sufficient_locations) break;
-			}
-			if (!sufficient_locations && (locations.size() < minNrOfNodes)) { // do we need extra locations (non preferred nodes)? If so, use as few as possible of these
-				for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
-					for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
-						locations.push_back(storageVector::value_type(sit->first, nsit->raidID));
-						maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
-						if ((locations.size() * maxFilesToNodes >= nrFiles) && (locations.size() >= minNrOfNodes)) {
-							sufficient_locations = true;
-							break; // don't assign more storage nodes than the number of files written
-						}
-					}
-					if (sufficient_locations) break;
-				}
-			}
-		}
-			else if ((mode == STORAGE_MODE_MINIMUM_DATA_TYPE_PREFERRED) || (mode == STORAGE_MODE_MINIMUM_PROJECT_PREFERRED)) {
-				bool inserted(false);
-				vector<std::pair<int, storageOption> > smallest_vec; // first = node ID
-				for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) {
-					for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
-						maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits); // determine the maximum number of files that can be written to a single suitable node
-						for (vector<std::pair<int, storageOption> >::iterator ssit = smallest_vec.begin(); ssit != smallest_vec.end(); ++ssit) {
-							if (nsit->remainingSpacekB < ssit->second.remainingSpacekB) { // sort according to free space in smallest_vec
-								smallest_vec.insert(ssit, std::pair<int, storageOption>(sit->first, *nsit)); // insert the smallest free space raid arrays in smallest_vec
-								inserted = true;
-								break;
-							}
-						}
-						if (!inserted) {
-							smallest_vec.push_back(std::pair<int, storageOption>(sit->first, *nsit)); // put at the end (it's has the largest free space up to now)
-							break; // only one raid array per node here
-						}
-					}
-					if (smallest_vec.size() * nrFilesPerNode >= nrFiles) {
-						sufficient_locations = true;
-						break; // don't assign more storage nodes than the number of files written
-					}
-				}
+                                    }
+                                    //				if (sufficient_locations) break;
+                                }
+                                if (!sufficient_locations && (locations.size() < minNrOfNodes)) { // do we need extra locations (non preferred nodes)? If so, use as few as possible of these
+                                    for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
+                                        for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
+                                            locations.push_back(storageVector::value_type(sit->first, nsit->raidID));
+                                            maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
+                                            if ((locations.size() * maxFilesToNodes >= nrFiles) && (locations.size() >= minNrOfNodes)) {
+                                                sufficient_locations = true;
+                                                break; // don't assign more storage nodes than the number of files written
+                                            }
+                                        }
+                                        if (sufficient_locations) break;
+                                    }
+                                }
+                            }
+                            else if ((mode == STORAGE_MODE_MINIMUM_DATA_TYPE_PREFERRED) || (mode == STORAGE_MODE_MINIMUM_PROJECT_PREFERRED)) {
+                                bool inserted(false);
+                                vector<std::pair<int, storageOption> > smallest_vec; // first = node ID
+                                for (storageLocationOptions::const_iterator sit = common_pref_locations.begin(); sit != common_pref_locations.end(); ++sit) {
+                                    for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
+                                        maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits); // determine the maximum number of files that can be written to a single suitable node
+                                        for (vector<std::pair<int, storageOption> >::iterator ssit = smallest_vec.begin(); ssit != smallest_vec.end(); ++ssit) {
+                                            if (nsit->remainingSpacekB < ssit->second.remainingSpacekB) { // sort according to free space in smallest_vec
+                                                smallest_vec.insert(ssit, std::pair<int, storageOption>(sit->first, *nsit)); // insert the smallest free space raid arrays in smallest_vec
+                                                inserted = true;
+                                                break;
+                                            }
+                                        }
+                                        if (!inserted) {
+                                            smallest_vec.push_back(std::pair<int, storageOption>(sit->first, *nsit)); // put at the end (it's has the largest free space up to now)
+                                            break; // only one raid array per node here
+                                        }
+                                    }
+                                    if (smallest_vec.size() * nrFilesPerNode >= nrFiles) {
+                                        sufficient_locations = true;
+                                        break; // don't assign more storage nodes than the number of files written
+                                    }
+                                }
 
-				if (!sufficient_locations && (locations.size() < minNrOfNodes)) { // do we need extra locations (non preferred nodes)? If so, use as few as possible of these
-					for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
-						for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
-							maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
-							for (vector<std::pair<int, storageOption> >::iterator ssit = smallest_vec.begin(); ssit != smallest_vec.end(); ++ssit) {
-								if (nsit->remainingSpacekB < ssit->second.remainingSpacekB) { // sort according to free space in smallest_vec
-									smallest_vec.insert(ssit, std::pair<int, storageOption>(sit->first, *nsit)); // insert the smallest free space raid arrays in smallest_vec
-									inserted = true;
-									break;
-								}
-							}
-							if (!inserted) {
-								smallest_vec.push_back(std::pair<int, storageOption>(sit->first, *nsit)); // put at the end (it has the largest free space up to now)
-								break; // only one raid array per node here
-							}
-						}
-						if ((smallest_vec.size() * maxFilesToNodes >= nrFiles) & (smallest_vec.size() >= minNrOfNodes)) {
-							break; // don't assign more storage nodes than the number of files written
-						}
-					}
-				}
-				for (unsigned i = 0; i < minNrOfNodes; ++i) {
-					locations.push_back(pair<int,int>(smallest_vec.at(i).first, smallest_vec.at(i).second.raidID));
-				}
-			}
+                                if (!sufficient_locations && (locations.size() < minNrOfNodes)) { // do we need extra locations (non preferred nodes)? If so, use as few as possible of these
+                                    for (storageLocationOptions::const_iterator sit = common_extra_locations.begin(); sit != common_extra_locations.end(); ++sit) {
+                                        for (nodeStorageOptions::const_iterator nsit = sit->second.begin(); nsit != sit->second.end(); ++nsit) {
+                                            maxFilesToNodes = std::min(maxFilesToNodes, nsit->nrUnits);
+                                            for (vector<std::pair<int, storageOption> >::iterator ssit = smallest_vec.begin(); ssit != smallest_vec.end(); ++ssit) {
+                                                if (nsit->remainingSpacekB < ssit->second.remainingSpacekB) { // sort according to free space in smallest_vec
+                                                    smallest_vec.insert(ssit, std::pair<int, storageOption>(sit->first, *nsit)); // insert the smallest free space raid arrays in smallest_vec
+                                                    inserted = true;
+                                                    break;
+                                                }
+                                            }
+                                            if (!inserted) {
+                                                smallest_vec.push_back(std::pair<int, storageOption>(sit->first, *nsit)); // put at the end (it has the largest free space up to now)
+                                                break; // only one raid array per node here
+                                            }
+                                        }
+                                        if ((smallest_vec.size() * maxFilesToNodes >= nrFiles) & (smallest_vec.size() >= minNrOfNodes)) {
+                                            break; // don't assign more storage nodes than the number of files written
+                                        }
+                                    }
+                                }
+                                for (unsigned i = 0; i < minNrOfNodes; ++i) {
+                                    locations.push_back(pair<int,int>(smallest_vec.at(i).first, smallest_vec.at(i).second.raidID));
+                                }
+                            }
 
-		// finally assign the common storage locations for this data product to all tasks in the group and check if the result is ok (i.e. no conflicts)
-		for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { // 7
-            if ((*taskit)->storage()->isOutputDataProduktEnabled(dpit->first)) {
-				std::vector<storageResult> result = data.addStorageToTask(*taskit, dpit->first, locations, false);
-				if (!result.empty()) {
-					for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
-						if (sit->conflict != CONFLICT_NO_CONFLICT) {
-							itsConflictDialog->addStorageConflict(*taskit, sit->dataProductType, sit->conflict);
-						}
-						(*taskit)->setConflict(sit->conflict);
-					}
-					bResult = false;
-				}
-			}
-		} // END 7
-	} // 5
-	for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) {
-		if ((*taskit)->isPipeline()) {
-            Pipeline *pipe(static_cast<Pipeline *>(*taskit));
-            setInputFilesForPipeline(pipe);
-			// we have to re-assign the storage for the pipeline because the predecessor (observation or pipeline) might have changed in the previous loop
-			for (dataProductTypes dp = _BEGIN_DATA_PRODUCTS_ENUM_; dp < _END_DATA_PRODUCTS_ENUM_; dp = dataProductTypes(dp + 1)) {
-                if (dp != DP_SKY_IMAGE && pipe->storage()->isOutputDataProduktEnabled(dp)) { // for SKY_IMAGE input nodes are not equal to output nodes
-                    const storageMap &inputStorageLocations(pipe->storage()->getInputStorageLocations());
-					storageMap::const_iterator inpcorit = inputStorageLocations.find(DP_CORRELATED_UV);
-					if (inpcorit != inputStorageLocations.end()) {
-                        data.addStorageToTask(*taskit, dp, inpcorit->second, false);
-					}
-				}
-			}
-		}
-        (*taskit)->storage()->generateFileList();
-	}
+                            // finally assign the common storage locations for this data product to all tasks in the group and check if the result is ok (i.e. no conflicts)
+                            for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) { // 7
+                                if ((*taskit)->storage()->isOutputDataProduktEnabled(dpit->first)) {
+                                    std::vector<storageResult> result = data.addStorageToTask(*taskit, dpit->first, locations, false);
+                                    if (!result.empty()) {
+                                        for (std::vector<storageResult>::const_iterator sit = result.begin(); sit != result.end(); ++sit) {
+                                            if (sit->conflict != CONFLICT_NO_CONFLICT) {
+                                                itsConflictDialog->addStorageConflict(*taskit, sit->dataProductType, sit->conflict);
+                                            }
+                                            (*taskit)->setConflict(sit->conflict);
+                                        }
+                                        bResult = false;
+                                    }
+                                }
+                            } // END 7
+                        } // 5
+                        for (std::vector<Task *>::const_iterator taskit = groupIt->second.begin(); taskit != groupIt->second.end(); ++taskit) {
+                            if ((*taskit)->isPipeline()) {
+                                Pipeline *pipe(static_cast<Pipeline *>(*taskit));
+                                setInputFilesForPipeline(pipe);
+                                // we have to re-assign the storage for the pipeline because the predecessor (observation or pipeline) might have changed in the previous loop
+                                for (dataProductTypes dp = _BEGIN_DATA_PRODUCTS_ENUM_; dp < _END_DATA_PRODUCTS_ENUM_; dp = dataProductTypes(dp + 1)) {
+                                    if (dp != DP_SKY_IMAGE && pipe->storage()->isOutputDataProduktEnabled(dp)) { // for SKY_IMAGE input nodes are not equal to output nodes
+                                        const storageMap &inputStorageLocations(pipe->storage()->getInputStorageLocations());
+                                        storageMap::const_iterator inpcorit = inputStorageLocations.find(DP_CORRELATED_UV);
+                                        if (inpcorit != inputStorageLocations.end()) {
+                                            data.addStorageToTask(*taskit, dp, inpcorit->second, false);
+                                        }
+                                    }
+                                }
+                            }
+                            (*taskit)->storage()->generateFileList();
+                        }
 
-	}
-	}
-	} // END for loop over individual groups
-	}
-	else return false; // no storage nodes available
-	}
+                    }
+                }
+            } // END for loop over individual groups
+        }
+        else return false; // no storage nodes available
+    }
 
-	return bResult;
+    return bResult;
 }
 
 
diff --git a/SAS/Scheduler/src/DataHandler.cpp b/SAS/Scheduler/src/DataHandler.cpp
index af731159153629499309867baea08a92b3219d7f..bbd0d5908b9f9141ce1e18e3d86fe354bb9f1df0 100644
--- a/SAS/Scheduler/src/DataHandler.cpp
+++ b/SAS/Scheduler/src/DataHandler.cpp
@@ -519,9 +519,11 @@ bool DataHandler::saveProgramPreferences(void) {
 		return false;
 }
 
-bool DataHandler::saveSettings(const QString &filename) const {
-	QFile file(filename);
-	if (file.open(QIODevice::WriteOnly)) {
+bool DataHandler::saveSettings(const QString &filename) const
+{
+    QFile file(QDir::currentPath() + filename);
+    if (file.open(QIODevice::WriteOnly))
+    {
 		QDataStream out(&file);
 		out << (unsigned)FILE_WRITE_VERSION;
 		out << Controller::theSchedulerSettings;
diff --git a/SAS/Scheduler/src/GraphicResourceScene.cpp b/SAS/Scheduler/src/GraphicResourceScene.cpp
index 9f3ebad37dd35f9b9cfe531aa744705d17d7b3fa..bdbb1c34557f0c16bddfc7d005073e4e553dc35c 100644
--- a/SAS/Scheduler/src/GraphicResourceScene.cpp
+++ b/SAS/Scheduler/src/GraphicResourceScene.cpp
@@ -225,13 +225,13 @@ void GraphicResourceScene::updateStationTimeLines() {
         GraphicStationTaskLine *stationTimeLine = new GraphicStationTaskLine(this, it->second, StationLineYPos);
         addItem(stationTimeLine);
         // create station label
-        QGraphicsSimpleTextItem *stationName = new QGraphicsSimpleTextItem(it->first.c_str(), 0, this);
+        QGraphicsSimpleTextItem *stationName = new QGraphicsSimpleTextItem(it->first.c_str(), 0);
         stationName->setPos(labelXpos, StationLineYPos-2);
         stationName->setFont(QFont("Liberation Sans", 9, QFont::Bold));
         stationName->setZValue(10);
         const QPointF &sp(stationName->pos());
         QRectF r(sp.x()-2, sp.y()-2, 9*(it->first.length()-1)+4, 13);
-        QGraphicsRectItem * rect = new QGraphicsRectItem(r, 0, this);
+        QGraphicsRectItem * rect = new QGraphicsRectItem(r, 0);
         rect->setZValue(9);
         rect->setPen(QPen(Qt::NoPen));
         rect->setBrush(QColor(255,255,255,160));
diff --git a/SAS/Scheduler/src/GraphicStationTaskLine.h b/SAS/Scheduler/src/GraphicStationTaskLine.h
index 1f21ffb5799fa3702589ddad11e56b8f097c4b60..9199381f0488b7b01001280ef114fa32aabcf130 100644
--- a/SAS/Scheduler/src/GraphicStationTaskLine.h
+++ b/SAS/Scheduler/src/GraphicStationTaskLine.h
@@ -33,7 +33,7 @@ public:
 
 private:
     GraphicResourceScene *itsScene;
-	unsigned int itsStationID, itsCurrentHighlightTask;
+    unsigned int itsStationID; //, itsCurrentHighlightTask;
 	int itsWidth, itsHeight;
 };
 
diff --git a/SAS/Scheduler/src/GraphicTask.h b/SAS/Scheduler/src/GraphicTask.h
index f5151eccf91ccdb0a1d28200025d87955f3503fc..42362e99659ed2fd6a2b914e491ae548095a1934 100644
--- a/SAS/Scheduler/src/GraphicTask.h
+++ b/SAS/Scheduler/src/GraphicTask.h
@@ -29,7 +29,7 @@ class QDragMoveEvent;
 class Task;
 class GraphicStationTaskLine;
 
-class GraphicTask : public QObject, public QGraphicsItem {
+class GraphicTask : /*public QObject,*/ public QGraphicsObject {
 
 	Q_OBJECT
 
diff --git a/SAS/Scheduler/src/LOFAR_libScheduler.pro b/SAS/Scheduler/src/LOFAR_libScheduler.pro
index 2d691fa8ce6a599e78c1e88fd50dc3e203a115ea..f2852070cb875615efc44b2d60879f8524524e03 100644
--- a/SAS/Scheduler/src/LOFAR_libScheduler.pro
+++ b/SAS/Scheduler/src/LOFAR_libScheduler.pro
@@ -1,225 +1,224 @@
-# #####################################################################
-# Automatically generated by qmake (2.01a) Tue Mar 15 12:58:58 2011
-# #####################################################################
-TEMPLATE = lib
-#TEMPLATE = app
-TARGET = libScheduler
-QT += core \
-    gui \
-    sql
-CONFIG += debug_and_release
-CONFIG += staticlib  # dynamic link: Problem with linking into app.
-# This needs to be fixed with help of Marcel / JD
-
-CONFIG(debug, debug|release) { 
-    message(Building Makefile.Debug)
-    DEFINES += _DEBUG_
-    DESTDIR = debug
-    OBJECTS_DIR = debug
-    MOC_DIR = debug
-    UI_DIR = debug
-    RCC_DIR = debug
-    INCLUDEPATH += . \
-        debug
-    DEPENDPATH += . \
-        debug
-}
-else { 
-    message(Building Makefile.Release)
-    DEFINES += _RELEASE_
-    DESTDIR = release
-    OBJECTS_DIR = release
-    MOC_DIR = release
-    UI_DIR = release
-    RCC_DIR = release
-    INCLUDEPATH += . \
-        release
-    DEPENDPATH += . \
-        release
-}
-
-# Input
-HEADERS += blocksize.h \
-    ListWidget.h \
-    redistributetasksdialog.h \
-    sasconnectdialog.h \
-    DataTreeWidgetItem.h \
-    shifttasksdialog.h \
-    tiedarraybeamdialog.h \
-    DigitalBeam.h \
-    TiedArrayBeam.h \
-    SpinBox.h \
-    parsettreeviewer.h \
-    FileUtils.h \
-    Angle.h \
-    astrodate.h \
-    astrodatetime.h \
-    astrotime.h \
-    ComboBox.h \
-    conflictdialog.h \
-    Controller.h \
-    DataHandler.h \
-    DataMonitorConnection.h \
-    dataslotdialog.h \
-    DateEdit.h \
-    DateTimeEdit.h \
-    digitalbeamdialog.h \
-    GraphicCurrentTimeLine.h \
-    GraphicResourceScene.h \
-    GraphicStationTaskLine.h \
-    graphicstoragescene.h \
-    GraphicStorageTimeLine.h \
-    GraphicTask.h \
-    GraphicTimeLine.h \
-    LineEdit.h \
-    longbaselinepipeline.h \
-    lofar_scheduler.h \
-    lofar_utils.h \
-    neighboursolution.h \
-    OTDBnode.h \
-    OTDBtree.h \
-    publishdialog.h \
-    qlofardatamodel.h \
-    SASConnection.h \
-    sasprogressdialog.h \
-    sasstatusdialog.h \
-    sasuploaddialog.h \
-    Scheduler.h \
-    schedulerdata.h \
-    schedulerdatablock.h \
-    schedulergui.h \
-    schedulersettings.h \
-    schedulesettingsdialog.h \
-    scheduletabledelegate.h \
-    statehistorydialog.h \
-    station.h \
-    stationlistwidget.h \
-    stationtreewidget.h \
-    Storage.h \
-    StorageNode.h \
-    tablecolumnselectdialog.h \
-    tableview.h \
-    task.h \
-    taskcopydialog.h \
-    taskdialog.h \
-    thrashbin.h \
-    TimeEdit.h \
-    doublespinbox.h \
-    pipeline.h \
-    pulsarpipeline.h \
-    imagingpipeline.h \
-    calibrationpipeline.h \
-    observation.h \
-    taskstorage.h \
-    stationtask.h \
-    storage_definitions.h \
-    demixingsettings.h \
-    CheckBox.h \
-    schedulerLib.h \
-    signalhandler.h
-FORMS += \
-    redistributetasksdialog.ui \
-    sasconnectdialog.ui \
-    shifttasksdialog.ui \
-    tiedarraybeamdialog.ui \
-    parsettreeviewer.ui \
-    conflictdialog.ui \
-    dataslotdialog.ui \
-    digitalbeamdialog.ui \
-    graphicstoragescene.ui \
-    publishdialog.ui \
-    sasprogressdialog.ui \
-    sasstatusdialog.ui \
-    sasuploaddialog.ui \
-    schedulergui.ui \
-    schedulesettingsdialog.ui \
-    statehistorydialog.ui \
-    stationlistwidget.ui \
-    stationtreewidget.ui \
-    tablecolumnselectdialog.ui \
-    taskcopydialog.ui \
-    taskdialog.ui \
-    thrashbin.ui \
-    storageresourceview.ui
-SOURCES += \
-    ListWidget.cpp \
-    redistributetasksdialog.cpp \
-    sasconnectdialog.cpp \
-    DataTreeWidgetItem.cpp \
-    shifttasksdialog.cpp \
-    tiedarraybeamdialog.cpp \
-    DigitalBeam.cpp \
-    TiedArrayBeam.cpp \
-    SpinBox.cpp \
-    parsettreeviewer.cpp \
-    FileUtils.cpp \
-    Angle.cpp \
-    astrodate.cpp \
-    astrodatetime.cpp \
-    astrotime.cpp \
-    ComboBox.cpp \
-    conflictdialog.cpp \
-    Controller.cpp \
-    DataHandler.cpp \
-    DataMonitorConnection.cpp \
-    dataslotdialog.cpp \
-    DateEdit.cpp \
-    DateTimeEdit.cpp \
-    debug_lofar.cpp \
-    digitalbeamdialog.cpp \
-    GraphicCurrentTimeLine.cpp \
-    GraphicResourceScene.cpp \
-    GraphicStationTaskLine.cpp \
-    graphicstoragescene.cpp \
-    GraphicStorageTimeLine.cpp \
-    GraphicTask.cpp \
-    GraphicTimeLine.cpp \
-    LineEdit.cpp \
-    longbaselinepipeline.cpp \
-    lofar_utils.cpp \
-    main.cpp \
-    neighboursolution.cpp \
-    OTDBnode.cpp \
-    OTDBtree.cpp \
-    publishdialog.cpp \
-    qlofardatamodel.cpp \
-    SASConnection.cpp \
-    sasprogressdialog.cpp \
-    sasstatusdialog.cpp \
-    sasuploaddialog.cpp \
-    Scheduler.cpp \
-    schedulerdata.cpp \
-    schedulerdatablock.cpp \
-    schedulergui.cpp \
-    schedulersettings.cpp \
-    schedulesettingsdialog.cpp \
-    scheduletabledelegate.cpp \
-    statehistorydialog.cpp \
-    station.cpp \
-    stationlistwidget.cpp \
-    stationtreewidget.cpp \
-    Storage.cpp \
-    StorageNode.cpp \
-    tablecolumnselectdialog.cpp \
-    tableview.cpp \
-    task.cpp \
-    taskcopydialog.cpp \
-    taskdialog.cpp \
-    thrashbin.cpp \
-    TimeEdit.cpp \
-    doublespinbox.cpp \
-    pipeline.cpp \
-    pulsarpipeline.cpp \
-    imagingpipeline.cpp \
-    calibrationpipeline.cpp \
-    observation.cpp \
-    taskstorage.cpp \
-    stationtask.cpp \
-    demixingsettings.cpp \
-    blocksize.cpp \
-    CheckBox.cpp \
-    schedulerLib.cpp \
-    signalhandler.cpp
-RESOURCES += scheduler_resources.qrc
-
-
+# #####################################################################
+# Automatically generated by qmake (2.01a) Tue Mar 15 12:58:58 2011
+# #####################################################################
+TEMPLATE = lib
+TEMPLATE = app
+TARGET = libScheduler
+QT += core \
+    gui \
+    sql
+CONFIG += debug_and_release
+CONFIG += staticlib  # dynamic link: Problem with linking into app.
+# This needs to be fixed with help of Marcel / JD
+
+CONFIG(debug, debug|release) { 
+    message(Building Makefile.Debug)
+    DEFINES += _DEBUG_
+    DESTDIR = debug
+    OBJECTS_DIR = debug
+    MOC_DIR = debug
+    UI_DIR = debug
+    RCC_DIR = debug
+    INCLUDEPATH += . \
+        debug
+    DEPENDPATH += . \
+        debug
+}
+else { 
+    message(Building Makefile.Release)
+    DEFINES += _RELEASE_
+    DESTDIR = release
+    OBJECTS_DIR = release
+    MOC_DIR = release
+    UI_DIR = release
+    RCC_DIR = release
+    INCLUDEPATH += . \
+        release
+    DEPENDPATH += . \
+        release
+}
+
+# Input
+HEADERS += blocksize.h \
+    ListWidget.h \
+    redistributetasksdialog.h \
+    sasconnectdialog.h \
+    DataTreeWidgetItem.h \
+    shifttasksdialog.h \
+    tiedarraybeamdialog.h \
+    DigitalBeam.h \
+    TiedArrayBeam.h \
+    SpinBox.h \
+    parsettreeviewer.h \
+    FileUtils.h \
+    Angle.h \
+    astrodate.h \
+    astrodatetime.h \
+    astrotime.h \
+    ComboBox.h \
+    conflictdialog.h \
+    Controller.h \
+    DataHandler.h \
+    DataMonitorConnection.h \
+    dataslotdialog.h \
+    DateEdit.h \
+    DateTimeEdit.h \
+    digitalbeamdialog.h \
+    GraphicCurrentTimeLine.h \
+    GraphicResourceScene.h \
+    GraphicStationTaskLine.h \
+    graphicstoragescene.h \
+    GraphicStorageTimeLine.h \
+    GraphicTask.h \
+    GraphicTimeLine.h \
+    LineEdit.h \
+    longbaselinepipeline.h \
+    lofar_scheduler.h \
+    lofar_utils.h \
+    neighboursolution.h \
+    OTDBnode.h \
+    OTDBtree.h \
+    publishdialog.h \
+    qlofardatamodel.h \
+    SASConnection.h \
+    sasprogressdialog.h \
+    sasstatusdialog.h \
+    sasuploaddialog.h \
+    Scheduler.h \
+    schedulerdata.h \
+    schedulerdatablock.h \
+    schedulergui.h \
+    schedulersettings.h \
+    schedulesettingsdialog.h \
+    scheduletabledelegate.h \
+    statehistorydialog.h \
+    station.h \
+    stationlistwidget.h \
+    stationtreewidget.h \
+    Storage.h \
+    StorageNode.h \
+    tablecolumnselectdialog.h \
+    tableview.h \
+    task.h \
+    taskcopydialog.h \
+    taskdialog.h \
+    thrashbin.h \
+    TimeEdit.h \
+    doublespinbox.h \
+    pipeline.h \
+    pulsarpipeline.h \
+    imagingpipeline.h \
+    calibrationpipeline.h \
+    observation.h \
+    taskstorage.h \
+    stationtask.h \
+    storage_definitions.h \
+    demixingsettings.h \
+    CheckBox.h \
+    schedulerLib.h \
+    signalhandler.h
+FORMS += \
+    redistributetasksdialog.ui \
+    sasconnectdialog.ui \
+    shifttasksdialog.ui \
+    tiedarraybeamdialog.ui \
+    parsettreeviewer.ui \
+    conflictdialog.ui \
+    dataslotdialog.ui \
+    digitalbeamdialog.ui \
+    graphicstoragescene.ui \
+    publishdialog.ui \
+    sasprogressdialog.ui \
+    sasstatusdialog.ui \
+    sasuploaddialog.ui \
+    schedulergui.ui \
+    schedulesettingsdialog.ui \
+    statehistorydialog.ui \
+    stationlistwidget.ui \
+    stationtreewidget.ui \
+    tablecolumnselectdialog.ui \
+    taskcopydialog.ui \
+    taskdialog.ui \
+    thrashbin.ui
+SOURCES += \
+    ListWidget.cpp \
+    redistributetasksdialog.cpp \
+    sasconnectdialog.cpp \
+    DataTreeWidgetItem.cpp \
+    shifttasksdialog.cpp \
+    tiedarraybeamdialog.cpp \
+    DigitalBeam.cpp \
+    TiedArrayBeam.cpp \
+    SpinBox.cpp \
+    parsettreeviewer.cpp \
+    FileUtils.cpp \
+    Angle.cpp \
+    astrodate.cpp \
+    astrodatetime.cpp \
+    astrotime.cpp \
+    ComboBox.cpp \
+    conflictdialog.cpp \
+    Controller.cpp \
+    DataHandler.cpp \
+    DataMonitorConnection.cpp \
+    dataslotdialog.cpp \
+    DateEdit.cpp \
+    DateTimeEdit.cpp \
+    debug_lofar.cpp \
+    digitalbeamdialog.cpp \
+    GraphicCurrentTimeLine.cpp \
+    GraphicResourceScene.cpp \
+    GraphicStationTaskLine.cpp \
+    graphicstoragescene.cpp \
+    GraphicStorageTimeLine.cpp \
+    GraphicTask.cpp \
+    GraphicTimeLine.cpp \
+    LineEdit.cpp \
+    longbaselinepipeline.cpp \
+    lofar_utils.cpp \
+    main.cpp \
+    neighboursolution.cpp \
+    OTDBnode.cpp \
+    OTDBtree.cpp \
+    publishdialog.cpp \
+    qlofardatamodel.cpp \
+    SASConnection.cpp \
+    sasprogressdialog.cpp \
+    sasstatusdialog.cpp \
+    sasuploaddialog.cpp \
+    Scheduler.cpp \
+    schedulerdata.cpp \
+    schedulerdatablock.cpp \
+    schedulergui.cpp \
+    schedulersettings.cpp \
+    schedulesettingsdialog.cpp \
+    scheduletabledelegate.cpp \
+    statehistorydialog.cpp \
+    station.cpp \
+    stationlistwidget.cpp \
+    stationtreewidget.cpp \
+    Storage.cpp \
+    StorageNode.cpp \
+    tablecolumnselectdialog.cpp \
+    tableview.cpp \
+    task.cpp \
+    taskcopydialog.cpp \
+    taskdialog.cpp \
+    thrashbin.cpp \
+    TimeEdit.cpp \
+    doublespinbox.cpp \
+    pipeline.cpp \
+    pulsarpipeline.cpp \
+    imagingpipeline.cpp \
+    calibrationpipeline.cpp \
+    observation.cpp \
+    taskstorage.cpp \
+    stationtask.cpp \
+    demixingsettings.cpp \
+    blocksize.cpp \
+    CheckBox.cpp \
+    schedulerLib.cpp \
+    signalhandler.cpp
+RESOURCES += scheduler_resources.qrc
+
+
diff --git a/SAS/Scheduler/src/SASConnection.cpp b/SAS/Scheduler/src/SASConnection.cpp
index 8ad9b60b0f558bc9a37b8d5983bbe2d724078982..2684925db492fb1495f29a005ff52c71306162d7 100644
--- a/SAS/Scheduler/src/SASConnection.cpp
+++ b/SAS/Scheduler/src/SASConnection.cpp
@@ -1898,7 +1898,7 @@ bool SASConnection::getSchedulerInfo(int tree_id, Task &task) {
 	if (query.next()) {
 		int day = query.value(0).toInt();
 		if (day) task.setWindowFirstDay(day);
-		else task.setWindowFirstDay(std::max(QDate::currentDate().toJulianDay() - J2000_EPOCH, (int)Controller::theSchedulerSettings.getEarliestSchedulingDay().toJulian()));
+        else task.setWindowFirstDay(std::max(QDate::currentDate().toJulianDay() - J2000_EPOCH, (qint64)Controller::theSchedulerSettings.getEarliestSchedulingDay().toJulian()));
 	}
 	else { // serious error
 		itsProgressDialog.addError(QString("Error: Scheduler.firstPossibleDay node of SAS tree: ") + treeID + " could not be fetched");
@@ -2501,11 +2501,23 @@ bool SASConnection::saveStationSettings(int treeID, const StationTask &task, con
             if (task.getStationClock() == clock_160Mhz) {
 				//clock mode
                 if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.clockMode","<<Clock160")) return false;
+				// channelWidth
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.channelWidth", STR_CLOCK160_CHANNELWIDTH)) return false;
+				// samplesPerSecond
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.samplesPerSecond", STR_CLOCK160_SAMPLESPERSECOND)) return false;
+				// subbandWidth
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.subbandWidth", STR_CLOCK160_SUBBANDWIDTH)) return false;
 				// systemClock
 				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.sampleClock", STR_CLOCK160_SAMPLECLOCK)) return false;
 			}
 			else {
                 if (!setNodeValue(treeID,"LOFAR.ObsSW.Observation.clockMode","<<Clock200")) return false;
+				// channelWidth
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.channelWidth", STR_CLOCK200_CHANNELWIDTH)) return false;
+				// samplesPerSecond
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.samplesPerSecond", STR_CLOCK200_SAMPLESPERSECOND)) return false;
+				// subbandWidth
+				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.subbandWidth", STR_CLOCK200_SUBBANDWIDTH)) return false;
 				// systemClock
 				if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.sampleClock", STR_CLOCK200_SAMPLECLOCK)) return false;
 			}
@@ -2546,11 +2558,23 @@ bool SASConnection::saveStationSettings(int treeID, const StationTask &task, con
         if (task.getStationClock() == clock_160Mhz) {
 			//clock mode
             if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.clockMode","<<Clock160")) return false;
+			// channelWidth
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.channelWidth", STR_CLOCK160_CHANNELWIDTH)) return false;
+			// samplesPerSecond
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.samplesPerSecond", STR_CLOCK160_SAMPLESPERSECOND)) return false;
+			// subbandWidth
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.subbandWidth", STR_CLOCK160_SUBBANDWIDTH)) return false;
 			// systemClock
 			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.sampleClock", STR_CLOCK160_SAMPLECLOCK)) return false;
 		}
 		else {
             if (!setNodeValue(treeID,"LOFAR.ObsSW.Observation.clockMode","<<Clock200")) return false;
+			// channelWidth
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.channelWidth", STR_CLOCK200_CHANNELWIDTH)) return false;
+			// samplesPerSecond
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.samplesPerSecond", STR_CLOCK200_SAMPLESPERSECOND)) return false;
+			// subbandWidth
+			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.subbandWidth", STR_CLOCK200_SUBBANDWIDTH)) return false;
 			// systemClock
 			if (!setNodeValue(treeID, "LOFAR.ObsSW.Observation.sampleClock", STR_CLOCK200_SAMPLECLOCK)) return false;
 		}
@@ -2559,10 +2583,12 @@ bool SASConnection::saveStationSettings(int treeID, const StationTask &task, con
 		// TBB piggyback allowed?
         const Observation *obs = dynamic_cast<const Observation *>(&task);
         if (obs) {
+            if (diff->TBBPiggybackAllowed)
                 bResult &= setNodeValue(treeID, "LOFAR.ObsSW.Observation.ObservationControl.StationControl.tbbPiggybackAllowed",
                                         (obs->getTBBPiggybackAllowed() ? "true" : "false"));
-                bResult &= setNodeValue(treeID, "LOFAR.ObsSW.Observation.ObservationControl.StationControl.aartfaacPiggybackAllowed",
-                                        (obs->getAartfaacPiggybackAllowed() ? "true" : "false"));
+            if (diff->AartfaacPiggybackAllowed)
+                 bResult &= setNodeValue(treeID, "LOFAR.ObsSW.Observation.ObservationControl.StationControl.aartfaacPiggybackAllowed",
+                                         (obs->getAartfaacPiggybackAllowed() ? "true" : "false"));
         }
     }
 
@@ -2664,6 +2690,9 @@ bool SASConnection::saveInputStorageSettings(int treeID, const Task &task) {
 
 bool SASConnection::saveOutputStorageSettings(int treeID, const Task &task, const task_diff *diff) {
     bool bResult(true);
+    if (task.getOutputDataproductCluster() == "CEP4") { //For CEP4 we're skipping this. /AR
+        return bResult;
+    }
     const TaskStorage *task_storage(task.storage());
     if (task_storage) {
         QString trueStr("true"), falseStr("false");
@@ -2705,7 +2734,7 @@ bool SASConnection::saveOutputDataProducts(int treeID, const Task &task) {
         Task::task_type type(task.getType());
         std::map<dataProductTypes, TaskStorage::outputDataProduct>::const_iterator flit;
         for (dataProductTypes dp = _BEGIN_DATA_PRODUCTS_ENUM_; dp < _END_DATA_PRODUCTS_ENUM_-1; dp = dataProductTypes(dp + 1)) {
-            flit = outputDataProdukt.find(dp);
+            flit = outputDataProdukt.find(dp); //flit = file list iterator
             if (flit != outputDataProdukt.end()) {
                 if (task_storage->isOutputDataProduktAssigned(dp)) {
                     // compile the vector strings for SAS
@@ -2997,6 +3026,7 @@ void SASConnection::getOutputStorageSettings(int treeID, Task &task) {
     if (task_storage) {
         QStringList nodeList, raidList;
         QString storageLocationsKey, keyPrefix;
+        QString outputCluster; // Added to support CEP2/4 switch /AR
         QVariant enabledKey;
         bool enabledValue;
         task_storage->unAssignStorage(); // clear the tasks storage, we will be adding incrementally (Task::setStorage() doesn't delete existing storage locations
@@ -3123,6 +3153,29 @@ void SASConnection::getOutputStorageSettings(int treeID, Task &task) {
                 QStringList identificationsList = value.toString().remove('[').remove(']').split(',',QString::SkipEmptyParts);// string2VectorOfStrings(value.toString());
                 task_storage->addOutputDataProductID(*dpit, identificationsList);
             }
+
+            // get values for storage cluster /AR
+            value = getNodeValue(treeID, keyPrefix + "storageClusterName");
+            if (value.isValid()) {
+                QString cluster = value.toString();
+                if (!cluster.isEmpty()) {
+                    if (outputCluster.isEmpty()) {
+                        outputCluster = cluster;
+                    }
+                    else {
+                        if (cluster != outputCluster) {
+                            itsProgressDialog.addError(QString("tree:") + QString::number(treeID) + " output data product type:" + DATA_PRODUCTS[*dpit] + " different output clusters are not supported");
+                        }
+                    }
+                }
+                //We probably will not need to support this: task_storage->addOutputDataProductCluster(*dpit, ?); /AR
+            }
+        }
+        if (!outputCluster.isEmpty()) {
+            task.setOutputDataproductCluster(outputCluster);
+        }
+        else {
+            task.setOutputDataproductCluster("CEP2"); // CEP2 is default for backward compatibility /AR
         }
     }
 }
@@ -4444,7 +4497,7 @@ bool SASConnection::saveTaskToSAS(int treeID, Task &task, const task_diff *diff)
 	Task::task_status status = task.getStatus();
 	bResult &= saveSchedulerProperties(treeID, task, diff);
 
-    if (task.isStationTask()) {
+    if (task.isStationTask()) { //OBSERVATION, RESERVATION or MAINTENANCE
         bResult &= saveStationSettings(treeID, static_cast<StationTask &>(task), diff);
 
         if (task.isObservation()) {
@@ -4461,7 +4514,7 @@ bool SASConnection::saveTaskToSAS(int treeID, Task &task, const task_diff *diff)
             }
 
             // Cobalt Correlator BlockSize
-            if (status == Task::SCHEDULED) { // in SCHEDULED state always update BlockSize
+            if (status == Task::PRESCHEDULED || status == Task::SCHEDULED) { // in SCHEDULED state always update BlockSize //FIXME? Added PRESCHEDULED for CEP4 /AR
                 bResult &= saveCobaltBlockSize(treeID, obs);
             }
             else if (diff) {
@@ -4489,7 +4542,7 @@ bool SASConnection::saveTaskToSAS(int treeID, Task &task, const task_diff *diff)
         }
 	}
 
-	if (diff) {
+    if (diff) { //FIXME if diff than we do this, otherwise we do it any way? This seems redundant. /AR
 		// all the following differences can potentially change the number of output files being written,
 	    // therefore, we update the storage keys in SAS when anyone of them has changed
 		if (diff->output_data_types || diff->output_storage_settings || diff->output_data_products ||
diff --git a/SAS/Scheduler/src/Storage.cpp b/SAS/Scheduler/src/Storage.cpp
index febe515321b31e0d7bfd9181bc5a0836ad1bfe3a..b4e3c887a85fb8d8a2d6715861c7f4c00f2cca37 100644
--- a/SAS/Scheduler/src/Storage.cpp
+++ b/SAS/Scheduler/src/Storage.cpp
@@ -123,61 +123,66 @@ std::vector<storageResult> Storage::addStorageToTask(Task *pTask, const storageM
             }
         }
         itsLastStorageCheckResult.clear();
-        // check if the total bandwidths for the nodes used do not exceed the nodes their available bandwidths
-        for (std::map<int, double>::const_iterator nit = totalBWPerNodeMap.begin(); nit != totalBWPerNodeMap.end(); ++nit) {
-            storageNodesMap::const_iterator nodeit = itsStorageNodes.find(nit->first);
-            if (nodeit != itsStorageNodes.end()) {
-                //			std::cout << "Total bandwidth required for node:" << nodeit->second.name() << " = " << nit->second << " kb/s" << std::endl;
-                res = nodeit->second.checkBandWidth(start, end, nit->second);
-                if (res != CONFLICT_NO_CONFLICT) {
-                    itsLastStorageCheckResult.push_back(storageResult(_END_DATA_PRODUCTS_ENUM_, nit->first, -1, res));
+        if  (pTask->getOutputDataproductCluster() == "CEP4") { //Can we just skip this for CEP4 ? /AR
+            debugWarn("sis","Storage::addStorageToTask: Did not check storage for task:", pTask->getID(), " (CEP4 detected)");
+        }
+        else {
+            // check if the total bandwidths for the nodes used do not exceed the nodes their available bandwidths
+            for (std::map<int, double>::const_iterator nit = totalBWPerNodeMap.begin(); nit != totalBWPerNodeMap.end(); ++nit) {
+                storageNodesMap::const_iterator nodeit = itsStorageNodes.find(nit->first);
+                if (nodeit != itsStorageNodes.end()) {
+                    //			std::cout << "Total bandwidth required for node:" << nodeit->second.name() << " = " << nit->second << " kb/s" << std::endl;
+                    res = nodeit->second.checkBandWidth(start, end, nit->second);
+                    if (res != CONFLICT_NO_CONFLICT) {
+                        itsLastStorageCheckResult.push_back(storageResult(_END_DATA_PRODUCTS_ENUM_, nit->first, -1, res));
+                    }
                 }
             }
-        }
-        if (itsLastStorageCheckResult.empty()) { // if no total bandwidth error for any node then start the rest of the checks
-            for (dataFileMap::const_iterator dfit = dataFiles.begin(); dfit != dataFiles.end(); ++dfit) {
-                storageMap::const_iterator stit = storageLocations.find(dfit->first);
-                if (stit != storageLocations.end()) {
-                    if (!stit->second.empty()) {
-                        claimSize = (double) dfit->second.first * dfit->second.second / stit->second.size(); // size per file * nrFiles / nr of raid arrays assigned
-                        bandWidth = (double) claimSize / 1000 / durationSec; // MByte/sec, the required remaining disk write speed (or bandwidth) for this array
+            if (itsLastStorageCheckResult.empty()) { // if no total bandwidth error for any node then start the rest of the checks
+                for (dataFileMap::const_iterator dfit = dataFiles.begin(); dfit != dataFiles.end(); ++dfit) {
+                    storageMap::const_iterator stit = storageLocations.find(dfit->first);
+                    if (stit != storageLocations.end()) {
+                        if (!stit->second.empty()) {
+                            claimSize = (double) dfit->second.first * dfit->second.second / stit->second.size(); // size per file * nrFiles / nr of raid arrays assigned
+                            bandWidth = (double) claimSize / 1000 / durationSec; // MByte/sec, the required remaining disk write speed (or bandwidth) for this array
 
-                        // check requested resources
-                        for (storageVector::const_iterator it = stit->second.begin(); it != stit->second.end(); ++it) {
-                            sit = itsStorageNodes.find(it->first);
-                            if (sit != itsStorageNodes.end()) {
-                                // check size requirements
-                                res = sit->second.checkSpaceAndWriteSpeed(start, end, claimSize, bandWidth, it->second); // check space and write speed for every raid array
-                                if (res != CONFLICT_NO_CONFLICT) {
-                                    itsLastStorageCheckResult.push_back(storageResult(dfit->first, it->first, it->second, res));
-                                    //								itsLastStorageCheckResult[it->first].push_back(std::pair<int, task_conflict>(it->second, res)); // store the error result
-                                }
-                                else { // add the claim
-                                    sit->second.addClaim(pTask->getID(), start, end, dfit->first, claimSize, bandWidth, it->second);
-                                }
-                            }
-                        }
-                        // if there were conflicts then remove the claim again from the storage nodes
-                        if (!itsLastStorageCheckResult.empty()) {
-                            std::vector<int> snd;
+                            // check requested resources
                             for (storageVector::const_iterator it = stit->second.begin(); it != stit->second.end(); ++it) {
                                 sit = itsStorageNodes.find(it->first);
                                 if (sit != itsStorageNodes.end()) {
-                                    if (std::find(snd.begin(), snd.end(), stit->first) == snd.end()) {
-                                        sit->second.removeClaim(pTask->getID()); // only call removeClaim one time for every storage node (it removes all claims found for the task ID)
-                                        snd.push_back(stit->first);
+                                    // check size requirements
+                                    res = sit->second.checkSpaceAndWriteSpeed(start, end, claimSize, bandWidth, it->second); // check space and write speed for every raid array
+                                    if (res != CONFLICT_NO_CONFLICT) {
+                                        itsLastStorageCheckResult.push_back(storageResult(dfit->first, it->first, it->second, res));
+                                        //								itsLastStorageCheckResult[it->first].push_back(std::pair<int, task_conflict>(it->second, res)); // store the error result
+                                    }
+                                    else { // add the claim
+                                        sit->second.addClaim(pTask->getID(), start, end, dfit->first, claimSize, bandWidth, it->second);
                                     }
                                 }
                             }
+                            // if there were conflicts then remove the claim again from the storage nodes
+                            if (!itsLastStorageCheckResult.empty()) {
+                                std::vector<int> snd;
+                                for (storageVector::const_iterator it = stit->second.begin(); it != stit->second.end(); ++it) {
+                                    sit = itsStorageNodes.find(it->first);
+                                    if (sit != itsStorageNodes.end()) {
+                                        if (std::find(snd.begin(), snd.end(), stit->first) == snd.end()) {
+                                            sit->second.removeClaim(pTask->getID()); // only call removeClaim one time for every storage node (it removes all claims found for the task ID)
+                                            snd.push_back(stit->first);
+                                        }
+                                    }
+                                }
+                            }
+                        }
+                        else { // no storage has been assigned to this data product type
+                            itsLastStorageCheckResult.push_back(storageResult(dfit->first, -1, -1, CONFLICT_NO_STORAGE_ASSIGNED));
                         }
                     }
                     else { // no storage has been assigned to this data product type
                         itsLastStorageCheckResult.push_back(storageResult(dfit->first, -1, -1, CONFLICT_NO_STORAGE_ASSIGNED));
                     }
                 }
-                else { // no storage has been assigned to this data product type
-                    itsLastStorageCheckResult.push_back(storageResult(dfit->first, -1, -1, CONFLICT_NO_STORAGE_ASSIGNED));
-                }
             }
         }
         if (itsLastStorageCheckResult.empty()) {
@@ -221,7 +226,7 @@ std::vector<storageResult> Storage::addStorageToTask(Task *pTask, dataProductTyp
                 sit = itsStorageNodes.find(it->first);
                 if (sit != itsStorageNodes.end()) {
                     // check size requirements
-                    if (!noCheck) {
+                    if (!noCheck && pTask->getOutputDataproductCluster() != "CEP4") {
                         res = sit->second.checkSpaceAndWriteSpeed(start, end, claimSize, bandWidth, it->second); // check space and bandwidth for every raid array
                     }
                     if (res == CONFLICT_NO_CONFLICT) {
diff --git a/SAS/Scheduler/src/StorageNode.cpp b/SAS/Scheduler/src/StorageNode.cpp
index d6db94dc196b3fae76a03d6f68b50d15b08bca6e..2eb6e29dbda65b46898759f24b718b5f5e7a3996 100644
--- a/SAS/Scheduler/src/StorageNode.cpp
+++ b/SAS/Scheduler/src/StorageNode.cpp
@@ -107,34 +107,34 @@ task_conflict StorageNode::checkBandWidth(const AstroDateTime &start, const Astr
 }
 
 task_conflict StorageNode::checkSpaceAndWriteSpeed(const AstroDateTime &startTime, const AstroDateTime &endTime, const double &claimSize, const double &writeSpeed, int raidID) const {
-	capacityTimeMap::const_iterator cit = itsRemainingSpace.find(raidID);
+    capacityTimeMap::const_iterator cit = itsRemainingSpace.find(raidID);
 //	std::cout << "checking storage node: " << itsName << std::endl << "partition: " << raidID << std::endl << "claim size for this node: "
 //	<< claimSize << std::endl << "writeSpeed for this node: " << writeSpeed << std::endl << "start time: " << startTime.toString() << std::endl
 //	<< "end time: " << endTime.toString() << std::endl;
-	if (cit != itsRemainingSpace.end()) {
+    if (cit != itsRemainingSpace.end()) {
 //		 std::cout << "first free space log: " << cit->second.front().time.toString() << ", space remaining" << cit->second.front().remainingDiskSpacekB << "kB, disk write speed remaining" <<  cit->second.front().remainingDiskWriteBW << "MByte/s";
-		if (startTime > cit->second.front().time) { // start time of observation needs to be after 'now' which is the first time in itsRemainingSpace
-			for (std::vector<capacityLogPoint>::const_iterator sit = cit->second.begin(); sit != cit->second.end(); ++sit) {
+        if (startTime > cit->second.front().time) { // start time of observation needs to be after 'now' which is the first time in itsRemainingSpace
+            for (std::vector<capacityLogPoint>::const_iterator sit = cit->second.begin(); sit != cit->second.end(); ++sit) {
 //				std::cout << sit->time.toString() << ", free space:" << sit->remainingDiskSpacekB << "kB, write speed remaining:" << sit->remainingDiskWriteBW << "MB/s" << std::endl;
-				if (startTime >= sit->time) { // found the last time that is earlier than the requested start time
-					while (sit < cit->second.end()) { // iterate over the following free space log-points to check if space stays sufficient during the task's duration
-						if (claimSize > sit->remainingDiskSpacekB) {
-							return CONFLICT_STORAGE_NODE_SPACE; // insufficient space
-						}
-						else if (writeSpeed > sit->remainingDiskWriteBW) {
+                if (startTime >= sit->time) { // found the last time that is earlier than the requested start time
+                    while (sit < cit->second.end()) { // iterate over the following free space log-points to check if space stays sufficient during the task's duration
+                        if (claimSize > sit->remainingDiskSpacekB) {
+                            return CONFLICT_STORAGE_NODE_SPACE; // insufficient space
+                        }
+                        else if (writeSpeed > sit->remainingDiskWriteBW) {
 //							std::cerr << "conflict write speed: " << "requested: " << writeSpeed << ", " << "node remaining write speed at " << sit->time.toString().c_str() << ": " << sit->remainingDiskWriteBW << std::endl;
-							return CONFLICT_STORAGE_WRITE_SPEED; // requested write speed to high
-						}
-						else if ((sit++)->time > endTime) return CONFLICT_NO_CONFLICT;
-					}
-					return CONFLICT_NO_CONFLICT; // if only the initial entry is logged in itsRemainingSpace we should arrive here.
-				}
-			}
-			return CONFLICT_NO_CONFLICT; // if only the initial entry is logged in itsRemainingSpace we should arrive here.
-		}
-		else return CONFLICT_STORAGE_TIME_TOO_EARLY; // Error: start time of observation before now
-	}
-	else return CONFLICT_RAID_ARRRAY_NOT_FOUND; // Error: partition not found
+                            return CONFLICT_STORAGE_WRITE_SPEED; // requested write speed to high
+                        }
+                        else if ((sit++)->time > endTime) return CONFLICT_NO_CONFLICT;
+                    }
+                    return CONFLICT_NO_CONFLICT; // if only the initial entry is logged in itsRemainingSpace we should arrive here.
+                }
+            }
+            return CONFLICT_NO_CONFLICT; // if only the initial entry is logged in itsRemainingSpace we should arrive here.
+        }
+        else return CONFLICT_STORAGE_TIME_TOO_EARLY; // Error: start time of observation before now
+    }
+    else return CONFLICT_RAID_ARRRAY_NOT_FOUND; // Error: partition not found
 }
 
 // return the ids of the raid arrays that meet the specified bandwidth (kbit/sec) and claimSize within the timespan defined by startTime and endTime
diff --git a/SAS/Scheduler/src/StorageNode.h b/SAS/Scheduler/src/StorageNode.h
index 979510bbc9f1860b6bd8ff05fc7408d41d1b22d4..06e052c8b60d8088d23bba425184ffbae2a3afc1 100644
--- a/SAS/Scheduler/src/StorageNode.h
+++ b/SAS/Scheduler/src/StorageNode.h
@@ -89,7 +89,7 @@ public:
 	// check bandwidth requirements don't exceed the nodes bandwidth in the specified (start,end) period
 	task_conflict checkBandWidth(const AstroDateTime &start, const AstroDateTime &end, const double &totalBW_kbs) const;
 	// check if space is available to add the requested task to the claims of this storage node using the specified raid array (claimSize units: kByte, bandWidth units kbit/sec)
-	task_conflict checkSpaceAndWriteSpeed(const AstroDateTime &start, const AstroDateTime &end, const double &claimSize, const double &writeSpeed, int raidID) const;
+    task_conflict checkSpaceAndWriteSpeed(const AstroDateTime &start, const AstroDateTime &end, const double &claimSize, const double &writeSpeed, int raidID) const;
 	// return the ids of the raid arrays that meet the specified bandwidth (kbit/sec) and fileSize (kByte) within the timespan defined by startTime and endTime
 	nodeStorageOptions getPossibleRaidArrays(const AstroDateTime &startTime, const AstroDateTime &endTime,
 			const double &fileSize, const double &bandWidth, unsigned minNrFiles, std::vector<std::pair<int, task_conflict> > &result) const;
diff --git a/SAS/Scheduler/src/conflictdialog.cpp b/SAS/Scheduler/src/conflictdialog.cpp
index 2cabb3d7226e066cd7eb9a00f61e77ee08b154be..78e12a7b5a626535dbb0a19168c2bb7a333e7a41 100644
--- a/SAS/Scheduler/src/conflictdialog.cpp
+++ b/SAS/Scheduler/src/conflictdialog.cpp
@@ -28,7 +28,11 @@ ConflictDialog::ConflictDialog(const Controller *controller)
 	ui.treeWidgetConflicts->header()->resizeSection(2, 55);
 	ui.treeWidgetConflicts->header()->resizeSection(3, 55);
 	ui.treeWidgetConflicts->header()->resizeSection(4, 55);
-	ui.treeWidgetConflicts->header()->setResizeMode(QHeaderView::ResizeToContents);
+#if QT_VERSION >= 0x050000
+    ui.treeWidgetConflicts->header()->setSectionResizeMode(QHeaderView::ResizeToContents);
+#else
+    ui.treeWidgetConflicts->header()->setResizeMode(QHeaderView::ResizeToContents);
+#endif
 }
 
 ConflictDialog::~ConflictDialog()
diff --git a/SAS/Scheduler/src/conflictdialog.h b/SAS/Scheduler/src/conflictdialog.h
index d49c4b2641d74de2a14f2c25906d37d94f15415f..6ed701403c1ca89cd9da0166619670344b454b6f 100644
--- a/SAS/Scheduler/src/conflictdialog.h
+++ b/SAS/Scheduler/src/conflictdialog.h
@@ -14,7 +14,7 @@
 #ifndef CONFLICTDIALOG_H
 #define CONFLICTDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_conflictdialog.h"
 #include <map>
 #include "taskstorage.h"
diff --git a/SAS/Scheduler/src/dataslotdialog.h b/SAS/Scheduler/src/dataslotdialog.h
index 1ccc11256b63b0b714c4f0140ea045eabc20e20e..dac19546194fb46c110c9b6e5f285e0fe55114be 100644
--- a/SAS/Scheduler/src/dataslotdialog.h
+++ b/SAS/Scheduler/src/dataslotdialog.h
@@ -14,7 +14,7 @@
 #ifndef DATASLOTDIALOG_H
 #define DATASLOTDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_dataslotdialog.h"
 #include "task.h"
 #include "stationtask.h"
diff --git a/SAS/Scheduler/src/digitalbeamdialog.h b/SAS/Scheduler/src/digitalbeamdialog.h
index 0b58d0f80b012eec291da7dc364a11d25634258f..0a633b9b8598f60e955cd406bf0490080dd651fb 100644
--- a/SAS/Scheduler/src/digitalbeamdialog.h
+++ b/SAS/Scheduler/src/digitalbeamdialog.h
@@ -14,7 +14,7 @@
 #ifndef DIGITALBEAMDIALOG_H
 #define DIGITALBEAMDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_digitalbeamdialog.h"
 #include "task.h"
 #include "Angle.h"
diff --git a/SAS/Scheduler/src/graphicstoragescene.h b/SAS/Scheduler/src/graphicstoragescene.h
index 3ed6e436479cbb50c6cfc266b390b549bfc34856..9bbd3f9455e1cb71336d2c8ddc523db08aaf3415 100644
--- a/SAS/Scheduler/src/graphicstoragescene.h
+++ b/SAS/Scheduler/src/graphicstoragescene.h
@@ -14,7 +14,7 @@
 #ifndef GRAPHICSTORAGESCENE_H
 #define GRAPHICSTORAGESCENE_H
 
-#include <QtGui/QWidget>
+#include <QWidget>
 #include "ui_graphicstoragescene.h"
 #include <QGraphicsScene>
 #include "GraphicTimeLine.h"
diff --git a/SAS/Scheduler/src/lofar_scheduler.h b/SAS/Scheduler/src/lofar_scheduler.h
index 43797b2808681ca4c9ee9f02e69462450edea572..b96fbc85aa5e82a674c9bc6b3a2242182f0c0d0d 100644
--- a/SAS/Scheduler/src/lofar_scheduler.h
+++ b/SAS/Scheduler/src/lofar_scheduler.h
@@ -32,7 +32,7 @@
 
 #define FILE_WRITE_VERSION 10
 
-#define J2000_EPOCH 2451545
+#define J2000_EPOCH 2451545LL
 // pi
 #define PI	3.14159265358979323846264338327950288419716939937510
 #define PI_DIV2  1.57079632679489661923132169163975144209858469968755
@@ -185,8 +185,9 @@ enum data_headers {
 	TASK_DURATION,
 	TASK_TYPE,
 	TASK_STATUS,
-	UNSCHEDULED_REASON,
-	TASK_DESCRIPTION,
+    CLUSTER_NAME,
+    UNSCHEDULED_REASON,
+    TASK_DESCRIPTION,
 	STATION_ID,
 	RESERVATION_NAME,
 	PRIORITY,
diff --git a/SAS/Scheduler/src/parsettreeviewer.h b/SAS/Scheduler/src/parsettreeviewer.h
index d3f942c01fd6af2456aaa26c25d5a1c712b32d78..3513c74294a28d17057dc23cd26205f9a8a01b97 100644
--- a/SAS/Scheduler/src/parsettreeviewer.h
+++ b/SAS/Scheduler/src/parsettreeviewer.h
@@ -15,7 +15,7 @@
 #ifndef PARSETTREEVIEWER_H
 #define PARSETTREEVIEWER_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_parsettreeviewer.h"
 #include "OTDBtree.h"
 
diff --git a/SAS/Scheduler/src/publishdialog.h b/SAS/Scheduler/src/publishdialog.h
index 8b2058ea3069f6e513b2df1ed8ac30797d28c2b3..be94450b8f8895032b5e2708c20980b496ab32e7 100755
--- a/SAS/Scheduler/src/publishdialog.h
+++ b/SAS/Scheduler/src/publishdialog.h
@@ -14,7 +14,7 @@
 #ifndef PUBLISHDIALOG_H
 #define PUBLISHDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include <QFileInfo>
 #include "ui_publishdialog.h"
 #include "schedulersettings.h"
diff --git a/SAS/Scheduler/src/qlofardatamodel.cpp b/SAS/Scheduler/src/qlofardatamodel.cpp
index 7dddb91bf6a1c5efa60289b6233d8246e6cb4ce7..f37a23841928bf950ed7cac08e783e22ab27e289 100644
--- a/SAS/Scheduler/src/qlofardatamodel.cpp
+++ b/SAS/Scheduler/src/qlofardatamodel.cpp
@@ -47,7 +47,7 @@ QVariant QLofarDataModel::data(const QModelIndex & index, int role) const
 
 	if (role == Qt::BackgroundRole) {
 		if (isErrorIndex(index)) // error
-			return Qt::red;
+            return QColor(Qt::red);
 		else {
 			int task_status = index.model()->data(index.model()->index(index.row(),TASK_STATUS),USERDATA_ROLE).toInt();
 			if (task_status == Task::FINISHED) {
@@ -115,18 +115,18 @@ QVariant QLofarDataModel::data(const QModelIndex & index, int role) const
 //					}
 				}
 				else { // regular tasks
-					return Qt::white;
+                    return QColor(Qt::white);
 				}
 			}
 		}
 	}
 	else if (role == Qt::ForegroundRole ) {
 		if (isErrorIndex(index)) // error
-			return Qt::white;
+            return QColor(Qt::white);
 		else {
 			int task_status = index.model()->data(index.model()->index(index.row(),TASK_STATUS),USERDATA_ROLE).toInt();
 			if ((task_status >= Task::COMPLETING) && (task_status <= Task::ABORTED)) {
-				return Qt::black;
+                return QColor(Qt::black);
 			}
 			else {
 				int task_type = index.model()->data(index.model()->index(index.row(),TASK_TYPE),USERDATA_ROLE).toInt();
@@ -148,10 +148,10 @@ QVariant QLofarDataModel::data(const QModelIndex & index, int role) const
 					case PLANNED_START:
 					case PLANNED_END:
 					case TASK_STATUS:
-						return Qt::black; // for editable cells
+                        return QColor(Qt::black); // for editable cells
 						break;
 					default:
-						return Qt::darkGray; // for non-editable cells
+                        return QColor(Qt::darkGray); // for non-editable cells
 						break;
 					}
 				}
@@ -170,15 +170,15 @@ QVariant QLofarDataModel::data(const QModelIndex & index, int role) const
 					case PLANNED_START:
 					case PLANNED_END:
 					case TASK_STATUS:
-						return Qt::black; // for editable cells
+                        return QColor(Qt::black); // for editable cells
 						break;
 					default:
-						return Qt::darkGray; // for non-editable cells
+                        return QColor(Qt::darkGray); // for non-editable cells
 						break;
 					}
 				}
 				else { // regular tasks
-					return Qt::black;
+                    return QColor(Qt::black);
 				}
 			}
 		}
diff --git a/SAS/Scheduler/src/redistributetasksdialog.h b/SAS/Scheduler/src/redistributetasksdialog.h
index ed8d15e19a4b14e5610e2176a7619547cb657950..f2650a2b7acd631a7da08a52e6c25bfe8d4e4cb3 100644
--- a/SAS/Scheduler/src/redistributetasksdialog.h
+++ b/SAS/Scheduler/src/redistributetasksdialog.h
@@ -14,7 +14,7 @@
 #ifndef REDISTRIBUTETASKSDIALOG_H
 #define REDISTRIBUTETASKSDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_redistributetasksdialog.h"
 
 class redistributeTasksDialog : public QDialog
diff --git a/SAS/Scheduler/src/sasconnectdialog.h b/SAS/Scheduler/src/sasconnectdialog.h
index 14c2c0d94d5601d7c132ea497e6c3b699a910dc1..bd9037d2397739820f4c941d39fe77bc3ca7bc4a 100644
--- a/SAS/Scheduler/src/sasconnectdialog.h
+++ b/SAS/Scheduler/src/sasconnectdialog.h
@@ -14,7 +14,7 @@
 #ifndef SASCONNECTDIALOG_H
 #define SASCONNECTDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_sasconnectdialog.h"
 #include <QString>
 #include <QStringList>
diff --git a/SAS/Scheduler/src/sasprogressdialog.h b/SAS/Scheduler/src/sasprogressdialog.h
index 3543fcfbd417f6f1b557d4430e05861667382dfa..652b9841c92ffe0b01cac6a27ed0beeeee9ae685 100644
--- a/SAS/Scheduler/src/sasprogressdialog.h
+++ b/SAS/Scheduler/src/sasprogressdialog.h
@@ -1,7 +1,7 @@
 #ifndef SASPROGRESSDIALOG_H
 #define SASPROGRESSDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_sasprogressdialog.h"
 
 class SASProgressDialog : public QDialog
diff --git a/SAS/Scheduler/src/sasstatusdialog.h b/SAS/Scheduler/src/sasstatusdialog.h
index 51250c9cd5ec0c2f3d49e80fb412b17a756c4c8a..bda129521459209f5e957d08460268918bcf2054 100644
--- a/SAS/Scheduler/src/sasstatusdialog.h
+++ b/SAS/Scheduler/src/sasstatusdialog.h
@@ -1,7 +1,7 @@
 #ifndef SASSTATUSDIALOG_H
 #define SASSTATUSDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_sasstatusdialog.h"
 
 class SASStatusDialog : public QDialog
diff --git a/SAS/Scheduler/src/sasuploaddialog.h b/SAS/Scheduler/src/sasuploaddialog.h
index 0597ee0c87fa46999a90f19eaa81124ec8247e16..8cb6e512f1f15933eadc9d1d5a7da12dd9a398ae 100644
--- a/SAS/Scheduler/src/sasuploaddialog.h
+++ b/SAS/Scheduler/src/sasuploaddialog.h
@@ -1,7 +1,7 @@
 #ifndef SASUPLOADDIALOG_H
 #define SASUPLOADDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_sasuploaddialog.h"
 #include "task.h"
 
diff --git a/SAS/Scheduler/src/schedulerLib.cpp b/SAS/Scheduler/src/schedulerLib.cpp
index a1bc4ee448cf7f0c54551eab00553bb3fce92453..448c180078996ecf7b1092e987f5dba725e4f7fc 100644
--- a/SAS/Scheduler/src/schedulerLib.cpp
+++ b/SAS/Scheduler/src/schedulerLib.cpp
@@ -13,6 +13,7 @@
 
 #include <QtGui>
 #include <QApplication>
+#include <QDir>
 #include "lofar_scheduler.h"
 #include "Controller.h"
 #include "schedulergui.h"
@@ -58,6 +59,17 @@ int main_function(int argc, char *argv[])
     // specific object
     handler = new SignalHandler(&app, &c);
 
+ #ifdef Q_OS_MACX
+    if (QSysInfo::MacintoshVersion > QSysInfo::MV_10_8)
+    {   //OSX 10.9+, we find and set the directory of the .app, otherwise QDir::currentPath can be empty.
+        QDir dir = app.applicationDirPath();
+        dir.cdUp();
+        dir.cdUp();
+        dir.cdUp(); //To get from the executable in the bundle to the .app location
+        QDir::setCurrent(dir.absolutePath());
+    }
+ #endif
+
     // c.start() does not return it does this after closing gui window.
     try {
     c.start(); // controller starts the GUI
diff --git a/SAS/Scheduler/src/schedulergui.cpp b/SAS/Scheduler/src/schedulergui.cpp
index 1052ce7873d0743f121b55484c46088ffa62773e..204200af9f1b19a49f2eca189850c312d1fff255 100644
--- a/SAS/Scheduler/src/schedulergui.cpp
+++ b/SAS/Scheduler/src/schedulergui.cpp
@@ -17,6 +17,7 @@
 #include <QTableView>
 #include <QDesktopWidget>
 #include <QLCDNumber>
+#include <QFileDialog>
 #include <sstream>
 #include <vector>
 #include <algorithm>
@@ -38,9 +39,9 @@ using std::string;
 using std::endl;
 
 const char * DATA_HEADERS[NR_DATA_HEADERS] = { "task ID", "SAS ID", "MoM ID", "group ID", "project ID", "task name", "planned start (UTC)", "planned end (UTC)", "duration",
-        "task type", "task status", "error reason", "task description", "stations", "reservation", "priority", "fix day", "fix time",
+        "task type", "task status" , "cluster", "error reason", "task description", "stations", "reservation", "priority", "fix day", "fix time",
 		"first possible date", "last possible date", "window min time", "window max time", "antenna mode", "clock", "filter", "# subbands",
-		"contact name", "phone", "e-mail", "predecessors", "pred. min time dif", "pred. max time dif", "night wf.", "data size" };
+        "contact name", "phone", "e-mail", "predecessors", "pred. min time dif", "pred. max time dif", "night wf.", "data size"};
 
 extern QString currentUser;
 
@@ -235,7 +236,7 @@ void SchedulerGUI::createMainToolbar(void) {
     itsLCDtimer->setSegmentStyle(QLCDNumber::Flat);
     itsLCDtimer->setFrameStyle(QLCDNumber::Sunken);
     itsLCDtimer->setFrameShape(QLCDNumber::WinPanel);
-    itsLCDtimer->setNumDigits(8);
+    itsLCDtimer->setDigitCount(8);
     itsLCDtimer->setToolTip("current UTC");
     itsLCDtimer->display("00:00:00");
     itsMainToolBar->addWidget(itsLCDtimer);
@@ -361,16 +362,21 @@ void SchedulerGUI::updateGraphicTasks(const scheduledTasksMap &scheduledTasks, c
 
 void SchedulerGUI::createTableDock(void) {
     // create table dock and its layout
-	itsTableDock = new QDockWidget(tr("Table schedule view"), this);
+    itsTableDock = new QDockWidget("Table schedule view", this);
 	itsTableDockWidgetContents = new QWidget();
     itsTableDockMainLayout = new QGridLayout(itsTableDockWidgetContents);
     itsTableDockMainLayout->setMargin(5);
     // create the table view
 	itsTableView = new TableView(itsTableDockWidgetContents);
 	itsTableView->setWordWrap(false);
-	itsTableView->verticalHeader()->setResizeMode(QHeaderView::Fixed);
+#if QT_VERSION >= 0x050000
+    itsTableView->verticalHeader()->setSectionResizeMode(QHeaderView::Fixed);
+    itsTableView->horizontalHeader()->setSectionsMovable(true);
+#else
+    itsTableView->verticalHeader()->setResizeMode(QHeaderView::Fixed);
+    itsTableView->horizontalHeader()->setMovable(true);
+#endif
 	itsTableView->setDragEnabled(false);
-	itsTableView->horizontalHeader()->setMovable(true);
 	itsTableView->setDropIndicatorShown(true);
 	itsTableView->setAcceptDrops(false);
 	itsTableView->setAlternatingRowColors(true);
@@ -721,7 +727,11 @@ void SchedulerGUI::newTable(SchedulerData const &data) {
 	itsTableView->setModel(itsModel);
 	itsTableView->setItemDelegate(&itsDelegate);
 	itsTableView->horizontalHeader()->setStretchLastSection(true);
-	itsTableView->horizontalHeader()->setClickable(true);
+#if QT_VERSION >= 0x050000
+    itsTableView->horizontalHeader()->setSectionsClickable(true);
+#else
+    itsTableView->horizontalHeader()->setClickable(true);
+#endif
 	itsTableView->horizontalHeader()->setSortIndicatorShown(true);
 	writeTableData(data);
 }
@@ -779,6 +789,7 @@ void SchedulerGUI::setDefaultColumnWidths(void) {
 	itsTableView->setColumnWidth(FIXED_DAY,50);
 	itsTableView->setColumnWidth(FIXED_TIME,50);
 	itsTableView->setColumnWidth(PRIORITY,50);
+    itsTableView->setColumnWidth(CLUSTER_NAME,50);
 }
 
 void SchedulerGUI::writeTableData(SchedulerData const &data) {
@@ -931,6 +942,8 @@ void SchedulerGUI::updateTableTask(const Task *pTask, int row) {
         itsModel->setData(itsModel->index(row, CONTACT_EMAIL), pTask->getContactEmail(), Qt::UserRole); // for sorting
         itsModel->setData(itsModel->index(row, TASK_TYPE), pTask->getTypeStr());
         itsModel->setData(itsModel->index(row, TASK_TYPE), pTask->getTypeStr(), Qt::UserRole); // for sorting
+        itsModel->setData(itsModel->index(row, CLUSTER_NAME), pTask->getOutputDataproductCluster());
+        itsModel->setData(itsModel->index(row, CLUSTER_NAME), pTask->getOutputDataproductCluster(), Qt::UserRole); // for sorting
 
         const StationTask *pStationTask = dynamic_cast<const StationTask *>(pTask);
         if (pStationTask) { // is this a stationTask?
@@ -2101,7 +2114,7 @@ QString SchedulerGUI::fileDialog(const QString &title, const QString &def_suffix
 	QFileDialog dialog;
 	QFileInfo fi;
 	QString path="";
-	dialog.setFilters(filter.split('\n'));
+    dialog.setNameFilters(filter.split('\n'));
 	dialog.setWindowTitle(title);
 
 
diff --git a/SAS/Scheduler/src/schedulergui.h b/SAS/Scheduler/src/schedulergui.h
index df748adc54312a5eef11bb91c2c4c14b0fc49d5e..ead8a6eb64c5f047e349ef65c4b7a05ec127c374 100644
--- a/SAS/Scheduler/src/schedulergui.h
+++ b/SAS/Scheduler/src/schedulergui.h
@@ -17,9 +17,11 @@
 #include <string>
 #include <vector>
 #include <map>
-#include <QtGui/QMainWindow>
+#include <QMainWindow>
 #include <QFileInfo>
 #include <QToolBar>
+#include <QGraphicsView>
+#include <QScrollBar>
 #include "lofar_scheduler.h"
 #include "ui_schedulergui.h"
 #include "taskdialog.h"
@@ -37,7 +39,7 @@ class SchedulerData;
 class QPushButton;
 class QCheckBox;
 class QTableWidgetItem;
-class QGraphicsView;
+//class QGraphicsView;
 class QWidget;
 class QDockWidget;
 class QStatusBar;
diff --git a/SAS/Scheduler/src/schedulersettings.h b/SAS/Scheduler/src/schedulersettings.h
index 60232ce0939868331276f83209701e0824afe65f..84c67fe4ac26bd36c857493f504c89392470e547 100644
--- a/SAS/Scheduler/src/schedulersettings.h
+++ b/SAS/Scheduler/src/schedulersettings.h
@@ -219,7 +219,7 @@ private:
 	AstroDate itsEarliestDay, itsLatestDay;
 	scheduleWeekVector itsScheduleWeeks;
 	AstroTime itsMinTimeBetweenTasks;
-	quint16 uniqueStationID, itsMinNrOfStorageNodes;
+    quint16 uniqueStationID; //, itsMinNrOfStorageNodes;
 	preferredDataProductStorageMap itsPreferredDataProductStorage;
 	preferredProjectStorageMap itsPreferredProjectStorage;
 	storageNodeDistribution itsDataDistributionScheme;
diff --git a/SAS/Scheduler/src/schedulesettingsdialog.cpp b/SAS/Scheduler/src/schedulesettingsdialog.cpp
index c90496777cfc853dd1094d0a45d5ce8ae05a4082..1d8b4d75a8d8c05255997a2cc6a9cda5aa4a1820 100644
--- a/SAS/Scheduler/src/schedulesettingsdialog.cpp
+++ b/SAS/Scheduler/src/schedulesettingsdialog.cpp
@@ -130,7 +130,11 @@ ScheduleSettingsDialog::ScheduleSettingsDialog(Controller * controller) :
 	header << "ID" << "Name" << "Process type" << "Process subtype" << "Strategy" << "Status" << "Description";
 	ui.tableWidgetDefaultTemplates->setHorizontalHeaderLabels(header);
 	ui.tableWidgetDefaultTemplates->horizontalHeader()->setStretchLastSection(true);
-	ui.tableWidgetDefaultTemplates->horizontalHeader()->setResizeMode(QHeaderView::Interactive);
+#if QT_VERSION >= 0x050000
+    ui.tableWidgetDefaultTemplates->horizontalHeader()->setSectionResizeMode(QHeaderView::Interactive);
+#else
+    ui.tableWidgetDefaultTemplates->horizontalHeader()->setResizeMode(QHeaderView::Interactive);
+#endif
 
     ui.checkBoxAutoPublish->setChecked(Controller::theSchedulerSettings.getAutoPublish());
 	itsPublishLocal = Controller::theSchedulerSettings.publishLocal();
@@ -741,7 +745,7 @@ void ScheduleSettingsDialog::privateKeyBrowseDialog(void) {
 			dialog.setDirectory(prevFile.absoluteDir());
 		}
 	}
-	dialog.setFilter("private key files (*.ppk)");
+    dialog.setNameFilter("private key files (*.ppk)");
 	dialog.exec();
 	if (dialog.result() == QDialog::Accepted) {
 		QStringList files = dialog.selectedFiles();
diff --git a/SAS/Scheduler/src/schedulesettingsdialog.h b/SAS/Scheduler/src/schedulesettingsdialog.h
index 47140bc969125d22eb67ae50b51b48c6e4e25d6f..f75bb572ada3ccfd07ca6fb73b38ff9266dd8763 100644
--- a/SAS/Scheduler/src/schedulesettingsdialog.h
+++ b/SAS/Scheduler/src/schedulesettingsdialog.h
@@ -14,7 +14,7 @@
 #ifndef SCHEDULESETTINGSDIALOG_H
 #define SCHEDULESETTINGSDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_schedulesettingsdialog.h"
 #include "lofar_scheduler.h"
 #include "astrodate.h"
diff --git a/SAS/Scheduler/src/shifttasksdialog.h b/SAS/Scheduler/src/shifttasksdialog.h
index aa613371af17570e8166ab9f1462b74e42461c24..cee4a0152ddf87dd4253342bd03bf4a8bb6b2a02 100644
--- a/SAS/Scheduler/src/shifttasksdialog.h
+++ b/SAS/Scheduler/src/shifttasksdialog.h
@@ -21,7 +21,7 @@ enum moveType {
 	MOVE_TO_START
 };
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_shifttasksdialog.h"
 #include "Controller.h"
 
diff --git a/SAS/Scheduler/src/statehistorydialog.cpp b/SAS/Scheduler/src/statehistorydialog.cpp
index c550b53a9f21ded23e50f4adeb5b55a33daddd49..72f2e002140461b1c6e8d15e7e21c1a2caf817ac 100644
--- a/SAS/Scheduler/src/statehistorydialog.cpp
+++ b/SAS/Scheduler/src/statehistorydialog.cpp
@@ -10,7 +10,11 @@ StateHistoryDialog::StateHistoryDialog(QWidget *parent)
 	ui.tableWidgetStateInfo->setHorizontalHeaderLabels(header);
 	ui.tableWidgetStateInfo->setEditTriggers(QAbstractItemView::NoEditTriggers);
 	ui.tableWidgetStateInfo->horizontalHeader()->setStretchLastSection(true);
-	ui.tableWidgetStateInfo->verticalHeader()->setResizeMode(QHeaderView::ResizeToContents);
+#if QT_VERSION >= 0x050000
+    ui.tableWidgetStateInfo->verticalHeader()->setSectionResizeMode(QHeaderView::ResizeToContents);
+#else
+    ui.tableWidgetStateInfo->verticalHeader()->setResizeMode(QHeaderView::ResizeToContents);
+#endif
 	this->setWindowTitle("Task state change history");
 }
 
@@ -34,5 +38,9 @@ void StateHistoryDialog::addStateInfo(const QString &treeID, const QString &momI
 	newItem = new QTableWidgetItem(modtime.toString("yyyy-MM-dd hh:mm:ss"));
 	ui.tableWidgetStateInfo->setItem(row, 4, newItem);
 
-	ui.tableWidgetStateInfo->horizontalHeader()->setResizeMode(QHeaderView::ResizeToContents);
+#if QT_VERSION >= 0x050000
+    ui.tableWidgetStateInfo->horizontalHeader()->setSectionResizeMode(QHeaderView::ResizeToContents);
+#else
+    ui.tableWidgetStateInfo->horizontalHeader()->setResizeMode(QHeaderView::ResizeToContents);
+#endif
 }
diff --git a/SAS/Scheduler/src/statehistorydialog.h b/SAS/Scheduler/src/statehistorydialog.h
index d45e199e8ac4d2424804579dfc3090811b44c2db..87d4915862136914875be489ac6996cc43797765 100644
--- a/SAS/Scheduler/src/statehistorydialog.h
+++ b/SAS/Scheduler/src/statehistorydialog.h
@@ -1,7 +1,7 @@
 #ifndef STATEHISTORYDIALOG_H
 #define STATEHISTORYDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_statehistorydialog.h"
 #include <QDateTime>
 
diff --git a/SAS/Scheduler/src/stationlistwidget.h b/SAS/Scheduler/src/stationlistwidget.h
index b75e9f63d9644cad1f26a62605b7849b54f299f9..b7dea39d5b923a9ca718e39fcad86662a3a9cb15 100644
--- a/SAS/Scheduler/src/stationlistwidget.h
+++ b/SAS/Scheduler/src/stationlistwidget.h
@@ -14,7 +14,7 @@
 #ifndef STATIONLISTWIDGET_H
 #define STATIONLISTWIDGET_H
 
-#include <QtGui/QListWidget>
+#include <QListWidget>
 #include "ui_stationlistwidget.h"
 
 class QMouseEvents;
diff --git a/SAS/Scheduler/src/stationtreewidget.h b/SAS/Scheduler/src/stationtreewidget.h
index 8b3df586b635488ce0ca97358901d3fca6fa4683..9d61b8fcd64f83b9ab28f3b006ed4ed7850f8c6b 100644
--- a/SAS/Scheduler/src/stationtreewidget.h
+++ b/SAS/Scheduler/src/stationtreewidget.h
@@ -14,7 +14,7 @@
 #ifndef STATIONTREEWIDGET_H
 #define STATIONTREEWIDGET_H
 
-#include <QtGui/QTreeWidget>
+#include <QTreeWidget>
 #include "ui_stationtreewidget.h"
 
 // class used as tree of used stations
diff --git a/SAS/Scheduler/src/tablecolumnselectdialog.h b/SAS/Scheduler/src/tablecolumnselectdialog.h
index 21e13ce603c99ffd4f6b53228cdf08de4f91954a..265d19e12a92401a5e2861808d39bb3b38e6cbcd 100644
--- a/SAS/Scheduler/src/tablecolumnselectdialog.h
+++ b/SAS/Scheduler/src/tablecolumnselectdialog.h
@@ -1,7 +1,7 @@
 #ifndef TABLECOLUMNSELECTDIALOG_H
 #define TABLECOLUMNSELECTDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_tablecolumnselectdialog.h"
 
 class tableColumnSelectDialog : public QDialog
diff --git a/SAS/Scheduler/src/tableview.h b/SAS/Scheduler/src/tableview.h
index 512e03eb18184445f0bce899d682af8939471afe..668579b8e44c1a24df0e157ac3feac06b9c4efe8 100644
--- a/SAS/Scheduler/src/tableview.h
+++ b/SAS/Scheduler/src/tableview.h
@@ -1,7 +1,7 @@
 #ifndef TABLEVIEW_H
 #define TABLEVIEW_H
 
-#include <QtGui/QTableView>
+#include <QTableView>
 #include <QSet>
 
 class TableView : public QTableView
diff --git a/SAS/Scheduler/src/task.cpp b/SAS/Scheduler/src/task.cpp
index fad5df164da55ec77d48a6c966cc0251b452a076..b25f8fcfa9bec902cda29da766d34260fdba66c6 100755
--- a/SAS/Scheduler/src/task.cpp
+++ b/SAS/Scheduler/src/task.cpp
@@ -240,7 +240,8 @@ Task::task_status convertSASstatus(SAS_task_status sas_state) {
 
 Task::Task()
 : taskID(0), itsPriority(0.0), itsStatus(DESCRIBED), itsTaskType(UNKNOWN),
-  fixed_day(false), fixed_time(false), itsPenalty(0), penaltyCalculationNeeded(true), itsShiftDirection(SHIFT_RIGHT)
+  fixed_day(false), fixed_time(false), itsPenalty(0), penaltyCalculationNeeded(true), itsShiftDirection(SHIFT_RIGHT),
+  itsOutputDataproductCluster("---")
 {
 	clearAllConflicts();
 	// set the time window equal to the schedule boundaries
@@ -252,7 +253,7 @@ Task::Task()
 Task::Task(unsigned task_id)
 : taskID(task_id), itsPriority(0.0), itsStatus(UNSCHEDULED), itsTaskType(UNKNOWN),
   fixed_day(false), fixed_time(false), itsPenalty(0), penaltyCalculationNeeded(true),
-  itsShiftDirection(SHIFT_RIGHT)
+  itsShiftDirection(SHIFT_RIGHT), itsOutputDataproductCluster("---")
 {
 	clearAllConflicts();
 	// set the time window equal to the schedule boundaries
@@ -265,7 +266,8 @@ Task::Task(unsigned task_id)
 Task::Task(unsigned task_id, const OTDBtree &SAS_tree)
 : itsProjectName(SAS_tree.campaign()), taskID(task_id), itsPriority(0.0),
     fixed_day(false), fixed_time(false), itsPenalty(0), penaltyCalculationNeeded(true),
-    itsShiftDirection(SHIFT_RIGHT), itsSASTree(SAS_tree)
+    itsShiftDirection(SHIFT_RIGHT), itsOutputDataproductCluster("---"), itsSASTree(SAS_tree)
+
 {
 	setType(SAS_tree.processType(), SAS_tree.processSubType(), SAS_tree.strategy());
 
@@ -280,7 +282,8 @@ Task::Task(unsigned task_id, const OTDBtree &SAS_tree)
 
 Task::Task(const QSqlQuery &query, const OTDBtree &SAS_tree)
 : itsProjectName(SAS_tree.campaign()),
-  itsPenalty(0), penaltyCalculationNeeded(true), itsShiftDirection(SHIFT_RIGHT), itsSASTree(SAS_tree)
+  itsPenalty(0), penaltyCalculationNeeded(true), itsShiftDirection(SHIFT_RIGHT), itsOutputDataproductCluster("---"), itsSASTree(SAS_tree)
+
 {
 	setType(SAS_tree.processType(), SAS_tree.processSubType(),SAS_tree.strategy());
 
@@ -300,7 +303,7 @@ Task::Task(const QSqlQuery &query, const OTDBtree &SAS_tree)
 		firstPossibleDay = day;
 	}
 	else { // first possible day not set use the schedule start day or the current date whichever is latest
-		firstPossibleDay = std::max(QDate::currentDate().toJulianDay() - J2000_EPOCH, (int)Controller::theSchedulerSettings.getEarliestSchedulingDay().toJulian());
+        firstPossibleDay = std::max(QDate::currentDate().toJulianDay() - J2000_EPOCH, (qint64)Controller::theSchedulerSettings.getEarliestSchedulingDay().toJulian());
 	}
 	QString time = query.value(query.record().indexOf("windowMaximumTime")).toString();
 	if (!time.isEmpty()) {
@@ -474,6 +477,7 @@ Task & Task::operator=(const Task &other) {
         firstPossibleDay = other.firstPossibleDay;
         lastPossibleDay = other.lastPossibleDay;
         itsSASTree = other.itsSASTree;
+        itsOutputDataproductCluster = other.itsOutputDataproductCluster;
     }
     return *this;
 }
@@ -754,13 +758,6 @@ bool Task::setReason(const std::string &reason) {
 	return false;
 }
 */
-/*
-void Task::addPredecessor(unsigned int pid, AstroTime min, AstroTime max) {
-	std::pair<AstroTime, AstroTime> minMaxTimes (min, max);
-	std::pair<unsigned int, std::pair<AstroTime, AstroTime> > predecessor (pid, minMaxTimes);
-	predecessors.push_back(predecessor);
-}
-*/
 
 QString Task::getPredecessorsString(const QChar &separater) const {
     QStringList predlist;
diff --git a/SAS/Scheduler/src/task.h b/SAS/Scheduler/src/task.h
index d44e502fe013e7793d453606104601dd7a8a73ce..d4f1d90b204d6565e2dfd004b623571af3b9e03c 100755
--- a/SAS/Scheduler/src/task.h
+++ b/SAS/Scheduler/src/task.h
@@ -319,6 +319,7 @@ public:
 	bool getPenaltyCalculationNeeded(void) const {return penaltyCalculationNeeded;}
 	bool getShiftDirection(void) const {return itsShiftDirection;}
     const task_conflicts &getConflicts(void) const {return itsConflicts;}
+    const QString &getOutputDataproductCluster(void) const {return itsOutputDataproductCluster;} // Added to support CEP4, maybe should be std::string /AR
 
 	// set methods
 	void setID(unsigned id) {taskID = id;}
@@ -378,6 +379,8 @@ public:
 	inline void setSASTreeID(int treeID) {itsSASTree.itsTreeID = treeID;}
 	inline void setGroupID(unsigned groupID) {itsSASTree.itsGroupID = groupID;}
 	inline void setMoMID(int momID) {itsSASTree.itsMomID = momID;}
+    inline void setOutputDataproductCluster(const QString &clusterName) {itsOutputDataproductCluster = clusterName;} // Added to support CEP4, maybe should be std::string /AR
+
 
 	void clearAllStorageConflicts(void);
 
@@ -409,6 +412,7 @@ protected:
 	bool itsShiftDirection;
 	IDvector itsPredecessors, itsSuccessors;
 	task_conflicts itsConflicts;
+    QString itsOutputDataproductCluster; // Added to support CEP4, maybe should be std::string /AR
 
 	// objects
 //	std::vector<unsigned int> successors;
diff --git a/SAS/Scheduler/src/taskcopydialog.h b/SAS/Scheduler/src/taskcopydialog.h
index 8b964e1f3a93b395b331cf3085609d7698b7da19..2a4417451237e5b6c933d098f792d3f8278018c3 100644
--- a/SAS/Scheduler/src/taskcopydialog.h
+++ b/SAS/Scheduler/src/taskcopydialog.h
@@ -1,7 +1,7 @@
 #ifndef TASKCOPYDIALOG_H
 #define TASKCOPYDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_taskcopydialog.h"
 #include "astrodatetime.h"
 #include "astrotime.h"
diff --git a/SAS/Scheduler/src/taskdialog.cpp b/SAS/Scheduler/src/taskdialog.cpp
index f62311d89ba903a29cc45c508e13b752463a9fbf..49b1ef01849dce5aa4a9f523eefb3f96eb4167db 100644
--- a/SAS/Scheduler/src/taskdialog.cpp
+++ b/SAS/Scheduler/src/taskdialog.cpp
@@ -162,7 +162,11 @@ TaskDialog::TaskDialog(QWidget *parentGUI, Controller *controller)
 	ui.tableWidgetTiedArrayBeams->setColumnWidth(1,150);
 	ui.tableWidgetTiedArrayBeams->setColumnWidth(2,130);
 	ui.tableWidgetTiedArrayBeams->horizontalHeader()->setStretchLastSection(true);
-	ui.tableWidgetTiedArrayBeams->horizontalHeader()->setResizeMode(QHeaderView::Interactive);
+#if QT_VERSION >= 0x050000
+    ui.tableWidgetTiedArrayBeams->horizontalHeader()->setSectionResizeMode(QHeaderView::Interactive);
+#else
+    ui.tableWidgetTiedArrayBeams->horizontalHeader()->setResizeMode(QHeaderView::Interactive);
+#endif
 
 	// enable default output data type
 	ui.checkBoxCorrelatedData->blockSignals(true);
diff --git a/SAS/Scheduler/src/taskdialog.h b/SAS/Scheduler/src/taskdialog.h
index 74c812e6e6987ce27b0fc58240556f31fa1c9d4c..860f1c80a03c8e6a7a61ca0a423721c5ea808e39 100644
--- a/SAS/Scheduler/src/taskdialog.h
+++ b/SAS/Scheduler/src/taskdialog.h
@@ -14,7 +14,7 @@
 #ifndef TASKDIALOG_H
 #define TASKDIALOG_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include <vector>
 #include <string>
 #include "lofar_scheduler.h"
diff --git a/SAS/Scheduler/src/taskstorage.cpp b/SAS/Scheduler/src/taskstorage.cpp
index 34cac6d7874b11582637415e151c5a4765020fca..a26e85e235f286b78418b03d8cbbb4b131b2443c 100644
--- a/SAS/Scheduler/src/taskstorage.cpp
+++ b/SAS/Scheduler/src/taskstorage.cpp
@@ -297,49 +297,44 @@ QDataStream& operator>> (QDataStream &in, TaskStorage &storage) {
     return in;
 }
 
-
+//WK code commented out
 // Returns True if the input and output node locations are equal for
 // all the input and output products
 // THis function should be moved to the pipeline class?
-bool TaskStorage::getEqualityInputOutputProducts()const
-{
-    // Check we have the same number of dataproduct types
-    //if (itsInputDataProducts.size() != itsOutputDataProducts.size())
-    //    return false;
-
-    //loop over the input and output data types
-    std::map<dataProductTypes, inputDataProduct >::const_iterator inputTypePair;
-    std::map<dataProductTypes, outputDataProduct >::const_iterator outputTypePair;
-    for (outputTypePair = itsOutputDataProducts.begin();
-         outputTypePair != itsOutputDataProducts.end();  // length is the same
-          ++outputTypePair )
-    {
-        if(itsInputDataProducts.find(outputTypePair->first) !=
-                itsInputDataProducts.end())
-            continue;
-        inputDataProduct input = itsInputDataProducts.at(outputTypePair->first);
-
-
-        // Check if we have the same number of input and output entries
-        if (input.locations.size() !=
-            outputTypePair->second.locations.size())
-            return false;
-
-        // Loop over all the input and output locations
-        QStringList::const_iterator inputLoc;
-        QStringList::const_iterator outputLoc;
-        for (inputLoc = input.locations.begin(),
-             outputLoc = outputTypePair->second.locations.begin();
-             inputLoc != input.locations.end();
-             ++inputLoc , ++outputLoc)
-        {
-            //return false if the nodes are not the same
-            if (inputLoc->split(":").at(0) != outputLoc->split(":").at(0))
-                return false;
-        }
-    }
-    return true;
-}
+//bool TaskStorage::getEqualityInputOutputProducts()const
+//{
+//    // Check we have the same number of dataproduct types
+//    if (itsInputDataProducts.size() != itsOutputDataProducts.size())
+//        return false;
+
+//    //loop over the input and output data types
+//    std::map<dataProductTypes, inputDataProduct >::const_iterator inputTypePair;
+//    std::map<dataProductTypes, outputDataProduct >::const_iterator outputTypePair;
+//    for (inputTypePair = itsInputDataProducts.begin(),
+//         outputTypePair = itsOutputDataProducts.begin();
+//         inputTypePair != itsInputDataProducts.end();  // length is the same
+//         ++inputTypePair, ++outputTypePair )
+//    {
+//        // Check if we have the same number of input and output entries
+//        if (inputTypePair->second.locations.size() !=
+//            outputTypePair->second.locations.size())
+//            return false;
+
+//        // Loop over all the input and output locations
+//        QStringList::const_iterator inputLoc;
+//        QStringList::const_iterator outputLoc;
+//        for (inputLoc = inputTypePair->second.locations.begin(),
+//             outputLoc = outputTypePair->second.locations.begin();
+//             inputLoc != inputTypePair->second.locations.end();
+//             ++inputLoc , ++outputLoc)
+//        {
+//            //return false if the nodes are not the same
+//            if (inputLoc->split(":").at(0) != outputLoc->split(":").at(0))
+//                return false;
+//        }
+//    }
+//    return true;
+//}
 
 void TaskStorage::setInputFileSizes(dataProductTypes dpType, const std::pair<double, unsigned> &inputFileSizes) {
     itsInputDataFiles[dpType] = inputFileSizes;
diff --git a/SAS/Scheduler/src/taskstorage.h b/SAS/Scheduler/src/taskstorage.h
index e68df5549bef2076bf730a606ca1aa14ee431a01..202898a8f5ad3868fe5f516a6544362407a2dc27 100644
--- a/SAS/Scheduler/src/taskstorage.h
+++ b/SAS/Scheduler/src/taskstorage.h
@@ -169,7 +169,8 @@ public:
     bool diff(const TaskStorage *other, task_diff &dif) const;
     QString diffString(const task_diff &dif) const;
 
-    bool getEqualityInputOutputProducts()const;
+//WK code commented out
+//    bool getEqualityInputOutputProducts()const;
 
 private:
     const Task *itsOwner;
diff --git a/SAS/Scheduler/src/thrashbin.h b/SAS/Scheduler/src/thrashbin.h
index 6bab89d125e8ee9b15d885612549bd0b3fc57385..72e8ff88003c29d2fcca1bba6435c390b7340f14 100644
--- a/SAS/Scheduler/src/thrashbin.h
+++ b/SAS/Scheduler/src/thrashbin.h
@@ -14,7 +14,7 @@
 #ifndef THRASHBIN_H
 #define THRASHBIN_H
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_thrashbin.h"
 #include <map>
 #include <vector>
diff --git a/SAS/Scheduler/src/tiedarraybeamdialog.h b/SAS/Scheduler/src/tiedarraybeamdialog.h
index acfe58d2269845926b951caca7a0d29ea392a1f4..19bba5856d0f174f3075dfc9bbf67109978318af 100644
--- a/SAS/Scheduler/src/tiedarraybeamdialog.h
+++ b/SAS/Scheduler/src/tiedarraybeamdialog.h
@@ -18,7 +18,7 @@ struct tabProps {
 	bool angle1, angle2, dispersion_measure, coherent;
 };
 
-#include <QtGui/QDialog>
+#include <QDialog>
 #include "ui_tiedarraybeamdialog.h"
 #include "TiedArrayBeam.h"
 #include "taskdialog.h"