diff --git a/atdb/docs/ATDB Workflow Diagram - Imaging.png b/atdb/docs/ATDB Workflow Diagram - Imaging.png
deleted file mode 100644
index f61cb148fa3bff2020520a0c4b195e7e1bedcc98..0000000000000000000000000000000000000000
Binary files a/atdb/docs/ATDB Workflow Diagram - Imaging.png and /dev/null differ
diff --git a/atdb/docs/ATDB-LDV Workflow Diagram.png b/atdb/docs/ATDB-LDV Workflow Diagram.png
index 4eb27db75d80fa611901586bfadebbb729503805..6bc631ea2b782577efe6afbe77c2943a0be9dd49 100644
Binary files a/atdb/docs/ATDB-LDV Workflow Diagram.png and b/atdb/docs/ATDB-LDV Workflow Diagram.png differ
diff --git a/atdb/taskdatabase/services/jobs.py b/atdb/taskdatabase/services/jobs.py
deleted file mode 100644
index cc24dcf922714727169863b8c403c0832e004df6..0000000000000000000000000000000000000000
--- a/atdb/taskdatabase/services/jobs.py
+++ /dev/null
@@ -1,17 +0,0 @@
-"""
-Jobs contains the business logic for the different system jobs that have to be executed based on status changes
-for Observations or DataProducts in ATDB.
-"""
-
-import logging;
-
-logger = logging.getLogger(__name__)
-
-def dispatchJob(myTaskObject, new_status):
-    """
-    Adds a job to the jobs table (or executes it directly)
-    :param (in) myObject: Observation or Dataproduct that triggers the action
-    :param (in) status: The status that triggers the action
-    """
-    # logger.info("*** dispatchJob(" + str(myTaskObject) + "," + str(new_status) + ") ***")
-
diff --git a/atdb/taskdatabase/services/signals.py b/atdb/taskdatabase/services/signals.py
index a955fe09eff6b53fab2db47cd8b9cfdc2d4a842f..aab7e3ffbebd8f90250a17dcac576b2a35009a06 100644
--- a/atdb/taskdatabase/services/signals.py
+++ b/atdb/taskdatabase/services/signals.py
@@ -6,7 +6,6 @@ from django.contrib.auth.models import User
 from django.dispatch import receiver
 from django.contrib.contenttypes.models import ContentType
 from taskdatabase.models import Task, Workflow, LogEntry, Status
-from . import jobs
 
 """
 Signals sent from different parts of the backend are centrally defined and handled here.
@@ -15,17 +14,6 @@ Signals sent from different parts of the backend are centrally defined and handl
 logger = logging.getLogger(__name__)
 
 
-#--- HTTP REQUEST signals-------------
-
-@receiver(request_started)
-def request_started_handler(sender, **kwargs):
-    logger.debug("signal : request_started")
-
-
-@receiver(request_finished)
-def request_finished_handler(sender, **kwargs):
-    logger.debug("signal : request_finished")
-
 #--- Task signals-------------
 
 @receiver(pre_save, sender=Task)
@@ -33,50 +21,6 @@ def pre_save_task_handler(sender, **kwargs):
     logger.info("SIGNAL : pre_save Task(" + str(kwargs.get('instance')) + ")")
     handle_pre_save(sender, **kwargs)
 
-
-def add_workflow(myTaskObject):
-    new_workflow_id = myTaskObject.new_workflow_id
-    new_workflow_uri = myTaskObject.new_workflow_uri
-
-    # first try to find the workflow by desired workflow_id
-    try:
-        new_workflow = Workflow.objects.get(id=new_workflow_id)
-    except:
-        new_workflow = None
-
-    if (new_workflow == None):
-        # then try workflow_uri
-        try:
-            new_workflow = Workflow.objects.get(workflow_uri=new_workflow_uri)
-        except:
-            pass
-
-    # first check if works needs to be done at all
-    if (myTaskObject.workflow != new_workflow):
-        # set the new workflow
-        myTaskObject.workflow = new_workflow
-
-    return myTaskObject
-
-
-def add_predecessor_obsolete(myTaskObject):
-    # connect the task to a workflow after posting a (flat) task through the REST API
-
-    try:
-        new_predecessor_id = myTaskObject.new_predecessor_id
-
-        # first try to find the workflow by desired workflow_id
-        new_predecessor = Task.objects.get(id=new_predecessor_id)
-
-        if (myTaskObject.predecessor != new_predecessor):
-            # set the new status
-            myTaskObject.predecessor = new_predecessor
-    except:
-        pass
-
-    return myTaskObject
-
-
 def handle_pre_save(sender, **kwargs):
     """
     pre_save handler. Mainly to check status changes and dispatch jobs in needed.
@@ -99,7 +43,7 @@ def handle_pre_save(sender, **kwargs):
         # set the new status
         myTaskObject.status = new_status
 
-        # add the new to the status history by brewing a status object out of it
+        # add the new status to the status history
         myStatus = Status(name=new_status, task=myTaskObject)
         myStatus.save()
 
@@ -111,54 +55,11 @@ def handle_pre_save(sender, **kwargs):
     myTaskObject.save()
     connect_signals()
 
-    # dispatch a job if the status has changed.
-    if (new_status != None) and (status != new_status):
-        jobs.dispatchJob(myTaskObject, new_status)
-
-
-@receiver(post_save, sender=Task)
-def post_save_task_handler(sender, **kwargs):
-    #logger.info("SIGNAL : post_save Task(" + str(kwargs.get('instance')) + ")")
-    handle_post_save(sender, **kwargs)
-
-
-def handle_post_save(sender, **kwargs):
-    """
-     post_save handler for Task. To create and write its initial status
-    :param (in) sender: The model class that sends the trigger
-    :param (in) kwargs: The instance of the object that sends the trigger.
-    """
-    logger.info("handle_post_save("+str(kwargs.get('instance'))+")")
-    myTaskObject = kwargs.get('instance')
-
-    # Create new task
-    if kwargs['created']:
-        logger.info("save new "+str(myTaskObject.task_type))
-
-        # set status
-        myTaskObject.status = myTaskObject.new_status
-
-        # add the new to the status history by brewing a status object out of it
-        myStatus = Status(name=myTaskObject.new_status, task=myTaskObject)
-        myStatus.save()
-
-    # connect the task to a workflow after posting a (flat) task through the REST API
-    #myTaskObject = add_workflow(myTaskObject)
-    #myTaskObject = add_predecessor(myTaskObject)
-
-    # temporarily disconnect the post_save handler to save the dataproduct (again) and avoiding recursion.
-    # I don't use pre_save, because then the 'created' key is not available, which is the most handy way to
-    # determine if this dataproduct already exists. (I could also check the database, but this is easier).
-    disconnect_signals()
-    myTaskObject.save()
-    connect_signals()
 
 def connect_signals():
     #logger.info("connect_signals")
     pre_save.connect(pre_save_task_handler, sender=Task)
-    post_save.connect(post_save_task_handler, sender=Task)
 
 def disconnect_signals():
     #logger.info("disconnect_signals")
     pre_save.disconnect(pre_save_task_handler, sender=Task)
-    post_save.disconnect(post_save_task_handler, sender=Task)
diff --git a/atdb/taskdatabase/templates/taskdatabase/index.html b/atdb/taskdatabase/templates/taskdatabase/index.html
index 367d1b271bf796771727e317182226af83699aee..df1a701a646a6ac3861741d02ef0992c293a401f 100644
--- a/atdb/taskdatabase/templates/taskdatabase/index.html
+++ b/atdb/taskdatabase/templates/taskdatabase/index.html
@@ -45,7 +45,7 @@
     </div>
     {% include 'taskdatabase/pagination.html' %}
 </div>
-    <p class="footer"> Version 1.0.0 (26 feb 2021 - 7:00)
+    <p class="footer"> Version 1.0.0 (1 mar 2021 - 11:00)
     <script type="text/javascript">
         (function(seconds) {
             var refresh,