diff --git a/SAS/TMSS/backend/services/scheduling/lib/constraints/template_constraints_v1.py b/SAS/TMSS/backend/services/scheduling/lib/constraints/template_constraints_v1.py
index d02f9f43afab5beeb76cb1a505c2410401d3c588..18cf8bc94ede9b0f46bd2871f5c01e30f6b05f13 100644
--- a/SAS/TMSS/backend/services/scheduling/lib/constraints/template_constraints_v1.py
+++ b/SAS/TMSS/backend/services/scheduling/lib/constraints/template_constraints_v1.py
@@ -62,7 +62,7 @@ def can_run_within_timewindow(scheduling_unit: models.SchedulingUnitBlueprint, l
 def can_run_after(scheduling_unit: models.SchedulingUnitBlueprint, lower_bound: datetime) -> bool:
     '''Check if the given scheduling_unit can run somewhere after the given lowerbound timestamp depending on the sub's constrains-template/doc.'''
     constraints = scheduling_unit.scheduling_constraints_doc
-    if 'before' in constraints['time']:
+    if 'time' in constraints and 'before' in constraints['time']:
         before = parser.parse(constraints['time']['before'], ignoretz=True)
         return before > lower_bound
 
@@ -102,6 +102,10 @@ def can_run_anywhere_within_timewindow_with_daily_constraints(scheduling_unit: m
     """
     main_observation_task_name = get_longest_observation_task_name_from_requirements_doc(scheduling_unit)
     constraints = scheduling_unit.scheduling_constraints_doc
+
+    if not "daily" in constraints:
+        return True
+
     if constraints['daily']['require_day'] or constraints['daily']['require_night'] or constraints['daily']['avoid_twilight']:
 
         if (upper_bound - lower_bound).days >= 1:
@@ -161,7 +165,7 @@ def can_run_within_timewindow_with_time_constraints(scheduling_unit: models.Sche
     constraints = scheduling_unit.scheduling_constraints_doc
 
     # Check the 'at' constraint and then only check can_run_anywhere for the single possible time window
-    if 'at' in constraints['time']:
+    if 'time' in constraints and 'at' in constraints['time']:
         at = parser.parse(constraints['time']['at'], ignoretz=True)
         if (at >= lower_bound and at + scheduling_unit.duration <= upper_bound):    # todo: suggestion: use scheduling_unit.requirements_doc['tasks']['Observation']['specifications_doc']['duration']
             return can_run_anywhere_within_timewindow_with_time_constraints(scheduling_unit, lower_bound=at,
@@ -191,6 +195,9 @@ def can_run_anywhere_within_timewindow_with_time_constraints(scheduling_unit: mo
     can_run_not_between = True
     constraints = scheduling_unit.scheduling_constraints_doc
 
+    if not "time" in constraints:
+        return True
+
     # given time window needs to end before constraint
     if 'before' in constraints['time']:
         before = parser.parse(constraints['time']['before'], ignoretz=True)
@@ -352,8 +359,6 @@ def can_run_anywhere_within_timewindow_with_sky_constraints(scheduling_unit: mod
                                 average_transit_time = _reference_date + sum([date - _reference_date for date in sap_datetime_list], timedelta()) / len(sap_datetime_list)
                                 transit_times.get(station, []).append(average_transit_time)
 
-                        logger.warning('##### %s' % transit_times)
-
                         for station, times in transit_times.items():
                             for i in range(len(timestamps)):
                                 offset = (timestamps[i] - times[i]).total_seconds()
@@ -404,15 +409,15 @@ def get_earliest_possible_start_time(scheduling_unit: models.SchedulingUnitBluep
     main_observation_task_name = get_longest_observation_task_name_from_requirements_doc(scheduling_unit)
     duration = timedelta(seconds=scheduling_unit.requirements_doc['tasks'][main_observation_task_name]['specifications_doc']['duration'])
     try:
-        if 'at' in constraints['time']:
+        if 'time' in constraints and 'at' in constraints['time']:
             at = parser.parse(constraints['time']['at'], ignoretz=True)
             return max(lower_bound, at)
 
-        if 'after' in constraints['time']:
+        if 'time' in constraints and 'after' in constraints['time']:
             after = parser.parse(constraints['time']['after'], ignoretz=True)
             return max(lower_bound, after)
 
-        if constraints['daily']['require_day'] or constraints['daily']['require_night'] or constraints['daily']['avoid_twilight']:
+        if 'daily' in constraints and (constraints['daily']['require_day'] or constraints['daily']['require_night'] or constraints['daily']['avoid_twilight']):
             station_groups = scheduling_unit.requirements_doc['tasks'][main_observation_task_name]['specifications_doc']["station_groups"]
             stations = list(set(sum([group['stations'] for group in station_groups], [])))  # flatten all station_groups to single list
             all_sun_events = timestamps_and_stations_to_sun_rise_and_set(timestamps=(lower_bound,lower_bound+timedelta(days=1)), stations=tuple(stations))
@@ -480,7 +485,7 @@ def compute_scores(scheduling_unit: models.SchedulingUnitBlueprint, lower_bound:
 
     # for now (as a proof of concept and sort of example), just return 1's. Return 1000 (placeholder value, change later) if the 'at' constraint is in, so it gets prioritised.
     scores = {'daily': 1.0,
-              'time': 1000.0 if ('at' in constraints['time'] and constraints['time']['at'] is not None) else 1.0,
+              'time': 1000.0 if ('time' in constraints and 'at' in constraints['time'] and constraints['time']['at'] is not None) else 1.0,
               'sky': 1.0}
 
     # add "common" scores which do not depend on constraints, such as project rank and creation date
diff --git a/SAS/TMSS/backend/services/scheduling/lib/dynamic_scheduling.py b/SAS/TMSS/backend/services/scheduling/lib/dynamic_scheduling.py
index 5ff4971b7f719615583eaf50ad3aaf5b86d27f92..5769fa88fbda113e9cb37bc1b3ed98007abcbbc8 100644
--- a/SAS/TMSS/backend/services/scheduling/lib/dynamic_scheduling.py
+++ b/SAS/TMSS/backend/services/scheduling/lib/dynamic_scheduling.py
@@ -33,7 +33,7 @@ from datetime import datetime, timedelta, time
 
 from lofar.sas.tmss.tmss.tmssapp import models
 from lofar.sas.tmss.tmss.tmssapp.tasks import schedule_independent_subtasks_in_scheduling_unit_blueprint, unschedule_subtasks_in_scheduling_unit_blueprint
-from lofar.sas.tmss.tmss.tmssapp.subtasks import update_subtasks_start_times_for_scheduling_unit, clear_defined_subtasks_start_stop_times_for_scheduling_unit
+from lofar.sas.tmss.tmss.tmssapp.subtasks import update_subtasks_start_times_for_scheduling_unit, clear_defined_subtasks_start_stop_times_for_scheduling_unit, cancel_subtask
 from lofar.sas.tmss.client.tmssbuslistener import *
 from lofar.common.datetimeutils import round_to_second_precision
 from threading import Thread, Event
@@ -68,7 +68,14 @@ def find_best_next_schedulable_unit(scheduling_units:[models.SchedulingUnitBluep
     filtered_scheduling_units = filter_scheduling_units_using_constraints(scheduling_units, lower_bound_start_time, upper_bound_stop_time)
 
     if filtered_scheduling_units:
-        best_scored_scheduling_unit = get_best_scored_scheduling_unit_scored_by_constraints(filtered_scheduling_units, lower_bound_start_time, upper_bound_stop_time)
+        triggered_scheduling_units = [scheduling_unit for scheduling_unit in filtered_scheduling_units if scheduling_unit.is_triggered]
+        if triggered_scheduling_units:
+            highest_priority_triggered_scheduling_unit = max(triggered_scheduling_units, key=lambda su: su.project.trigger_priority)
+            best_scored_scheduling_unit = ScoredSchedulingUnit(scheduling_unit=highest_priority_triggered_scheduling_unit,
+                                                               start_time=get_earliest_possible_start_time(highest_priority_triggered_scheduling_unit, lower_bound_start_time),
+                                                               scores={}, weighted_score=None)  # we don't care about scores in case of a trigger
+        else:
+            best_scored_scheduling_unit = get_best_scored_scheduling_unit_scored_by_constraints(filtered_scheduling_units, lower_bound_start_time, upper_bound_stop_time)
         return best_scored_scheduling_unit
 
     # no filtered scheduling units found...
@@ -92,10 +99,14 @@ def schedule_next_scheduling_unit() -> models.SchedulingUnitBlueprint:
     lower_bound_start_time = get_min_earliest_possible_start_time(schedulable_units, datetime.utcnow()+DEFAULT_NEXT_STARTTIME_GAP)
 
     # estimate the upper_bound_stop_time, which may give us a small timewindow before any next scheduled unit, or a default window of a day
-    try:
-        upper_bound_stop_time = max(su.start_time for su in get_scheduled_scheduling_units(lower=lower_bound_start_time, upper=lower_bound_start_time + timedelta(days=1)))
-    except ValueError:
+    if any([su.is_triggered for su in schedulable_units]):
+        # ignore what's scheduled if we have triggers
         upper_bound_stop_time = lower_bound_start_time + timedelta(days=1)
+    else:
+        try:
+            upper_bound_stop_time = max(su.start_time for su in get_scheduled_scheduling_units(lower=lower_bound_start_time, upper=lower_bound_start_time + timedelta(days=1)))
+        except ValueError:
+            upper_bound_stop_time = lower_bound_start_time + timedelta(days=1)
 
     # no need to irritate user in log files with subsecond scheduling precision
     lower_bound_start_time = round_to_second_precision(lower_bound_start_time)
@@ -115,15 +126,18 @@ def schedule_next_scheduling_unit() -> models.SchedulingUnitBlueprint:
                 # make start_time "look nice" for us humans
                 best_start_time = round_to_second_precision(best_start_time)
 
-                logger.info("schedule_next_scheduling_unit: found best candidate id=%s '%s' weighted_score=%s start_time=%s",
-                            best_scheduling_unit.id, best_scheduling_unit.name, best_scheduling_unit_score, best_start_time)
+                logger.info("schedule_next_scheduling_unit: found best candidate id=%s '%s' weighted_score=%s start_time=%s is_triggered=%s",
+                            best_scheduling_unit.id, best_scheduling_unit.name, best_scheduling_unit_score, best_start_time, best_scheduling_unit.is_triggered)
+
+                if best_scheduling_unit.is_triggered:
+                    cancel_running_observation_if_needed_and_possible(best_scored_scheduling_unit)
 
                 if unschededule_blocking_scheduled_units_if_needed_and_possible(best_scored_scheduling_unit):
                     # no (old) scheduled scheduling_units in the way, so schedule our candidate!
                     scheduled_scheduling_unit = schedule_independent_subtasks_in_scheduling_unit_blueprint(best_scheduling_unit, start_time=best_start_time)
 
-                    logger.info("schedule_next_scheduling_unit: scheduled best candidate id=%s '%s' score=%s start_time=%s",
-                                best_scheduling_unit.id, best_scheduling_unit.name, best_scheduling_unit_score, best_start_time)
+                    logger.info("schedule_next_scheduling_unit: scheduled best candidate id=%s '%s' score=%s start_time=%s is_triggered=%s",
+                                best_scheduling_unit.id, best_scheduling_unit.name, best_scheduling_unit_score, best_start_time, best_scheduling_unit.is_triggered)
                     return scheduled_scheduling_unit
 
         except SubtaskSchedulingException as e:
@@ -253,7 +267,7 @@ class TMSSDynamicSchedulingMessageHandler(TMSSEventMessageHandler):
             # This way we are sure that the latest units are always taken into account while scheduling, but we do not waste cpu cylces.
             self._do_schedule_event.set()
 
-    def onSchedulingUnitDraftConstraintsUpdated(self, id: int, scheduling_constraints_doc: dict):
+    def onSchedulingUnitDraftConstraintsUpdated(self, id: int, scheduling_constraints_doc: dict):  # todo: Does this now have to be onSchedulingUnitBlueprintConstraintsUpdated (since we now have those on the blueprint as well)?
         affected_scheduling_units = models.SchedulingUnitBlueprint.objects.filter(draft__id=id).all()
         for scheduling_unit in affected_scheduling_units:
             if scheduling_unit.status == 'scheduled':
@@ -312,6 +326,15 @@ def get_scheduled_scheduling_units(lower:datetime=None, upper:datetime=None) ->
     return list(models.SchedulingUnitBlueprint.objects.filter(id__in=scheduled_subtasks.values('task_blueprints__scheduling_unit_blueprint_id').distinct()).all())
 
 
+def get_running_observation_subtasks(stopping_after:datetime=None) -> [models.Subtask]:
+    '''get a list of all starting/started subtasks, optionally filter for those finishing after the provided time'''
+    running_obs_subtasks = models.Subtask.objects.filter(state__value__in=[models.SubtaskState.Choices.STARTING.value, models.SubtaskState.Choices.STARTED.value],
+                                                         specifications_template__type__value=models.SubtaskType.Choices.OBSERVATION.value)
+    if stopping_after is not None:
+        running_obs_subtasks = running_obs_subtasks.filter(stop_time__gte=stopping_after)
+    return list(running_obs_subtasks.all())
+
+
 def unschededule_blocking_scheduled_units_if_needed_and_possible(candidate: ScoredSchedulingUnit) -> bool:
     '''check if there are any already scheduled units in the way, and unschedule them if allowed. Return True if nothing is blocking anymore.'''
     # check any previously scheduled units, and unschedule if needed/allowed
@@ -322,7 +345,16 @@ def unschededule_blocking_scheduled_units_if_needed_and_possible(candidate: Scor
     for scheduled_scheduling_unit in scheduled_scheduling_units:
         scheduled_score = compute_scores(scheduled_scheduling_unit, candidate.start_time, candidate.start_time + candidate.scheduling_unit.duration)
 
-        if candidate.weighted_score > scheduled_score.weighted_score:
+        # in case of a triggered candidate with higher trigger priority than the scheduled unit, we don't care about
+        # scores, but only check trigger priority.
+        if candidate.scheduling_unit.is_triggered:
+            if (not scheduled_scheduling_unit.is_triggered) or candidate.scheduling_unit.project.trigger_priority > scheduled_scheduling_unit.project.trigger_priority:
+                logger.info("unscheduling id=%s '%s' because it is in the way and has a lower trigger_priority=%s than the best candidate id=%s '%s' trigger_priority=%s start_time=%s",
+                    scheduled_scheduling_unit.id, scheduled_scheduling_unit.name, scheduled_scheduling_unit.project.trigger_priority,
+                    candidate.scheduling_unit.id, candidate.scheduling_unit.name, candidate.scheduling_unit.project.trigger_priority, candidate.scheduling_unit.start_time)
+                unschedule_subtasks_in_scheduling_unit_blueprint(scheduled_scheduling_unit)
+
+        elif candidate.weighted_score > scheduled_score.weighted_score:
             # ToDo: also check if the scheduled_scheduling_unit is manually/dynamically scheduled
             logger.info("unscheduling id=%s '%s' because it is in the way and has a lower score than the best candidate id=%s '%s' score=%s start_time=%s",
                 scheduled_scheduling_unit.id, scheduled_scheduling_unit.name,
@@ -345,4 +377,28 @@ def unschededule_blocking_scheduled_units_if_needed_and_possible(candidate: Scor
     return True
 
 
+def cancel_running_observation_if_needed_and_possible(candidate: ScoredSchedulingUnit) -> bool:
+    '''check if there are starting/started observation subtasks that block the candidate.
+     Only triggered scheduling_units can cancel running observations and only if they belong to a project with a higher
+     trigger_priority than the projects of the subtasks to cancel'''
+
+    # todo: is it sufficient to cancel the subtasks, or do we cancel the whole scheduling unit?
+    if candidate.scheduling_unit.is_triggered:
+        running_obs_subtasks = get_running_observation_subtasks(candidate.start_time)
+        for obs in running_obs_subtasks:
+            if obs.project is None:
+                logger.warning('cannot cancel running subtask pk=%s for triggered scheduling_unit pk=%s because it does belong to a project and hence has unknown priority' %
+                               (obs.pk, candidate.scheduling_unit.name))
+                continue
+            if candidate.scheduling_unit.project.trigger_priority > obs.project.trigger_priority:
+                logger.info('cancelling observation subtask pk=%s trigger_priority=%s because it blocks the triggered scheduling_unit pk=%s trigger_priority=%s' %
+                            (obs.pk, obs.project.trigger_priority, candidate.scheduling_unit.pk, candidate.scheduling_unit.project.trigger_priority))
+                # todo: check if cancellation is really necessary or the trigger can be scheduled afterwards
+                #  I guess we could just do can_run_after(candidate, obs.stop_time) here for that?
+                #  We could also only do this, of there is a 'before' constraint on each trigger.
+                #  -> Clarify and implemented with TMSS-704.
+                cancel_subtask(obs)
+            else:
+                logger.info('NOT cancelling subtask pk=%s trigger_priority=%s for triggered scheduling_unit pk=%s trigger_priority=%s because its priority is too low' %
+                            (obs.pk, obs.project.trigger_priority, candidate.scheduling_unit.pk, candidate.scheduling_unit.project.trigger_priority))
 
diff --git a/SAS/TMSS/backend/services/scheduling/test/t_dynamic_scheduling.py b/SAS/TMSS/backend/services/scheduling/test/t_dynamic_scheduling.py
index 002d6ed94053dcc868c3c1f93bb508ca589e2d65..5c4dde3fd2756219d9d95731c36677f988ea439f 100755
--- a/SAS/TMSS/backend/services/scheduling/test/t_dynamic_scheduling.py
+++ b/SAS/TMSS/backend/services/scheduling/test/t_dynamic_scheduling.py
@@ -119,7 +119,8 @@ class TestDynamicScheduling(TestCase):  # Note: we use django.test.TestCase inst
     @staticmethod
     def create_simple_observation_scheduling_unit(name:str=None, scheduling_set=None,
                                                   obs_duration:int=60,
-                                                  constraints=None):
+                                                  constraints=None,
+                                                  is_triggered=False):
         constraints_template = models.SchedulingConstraintsTemplate.objects.get(name="constraints")
         constraints = add_defaults_to_json_object_for_schema(constraints or {}, constraints_template.schema)
 
@@ -136,7 +137,8 @@ class TestDynamicScheduling(TestCase):  # Note: we use django.test.TestCase inst
                                                          requirements_doc=scheduling_unit_spec,
                                                          observation_strategy_template=strategy_template,
                                                          scheduling_constraints_doc=constraints,
-                                                         scheduling_constraints_template=constraints_template)
+                                                         scheduling_constraints_template=constraints_template,
+                                                         is_triggered=is_triggered)
 
     def test_simple_observation_with_at_constraint(self):
         """
@@ -833,7 +835,7 @@ class TestSkyConstraints(unittest.TestCase):
         self.distance_mock = self.distance_patcher.start()
         self.distance_mock.return_value = self.distance_data
         self.addCleanup(self.distance_patcher.stop)
-        
+
         self.target_rise_and_set_data = {"CS002": [{"rise": datetime(2020, 1, 1, 8, 0, 0), "set": datetime(2020, 1, 1, 12, 30, 0), "always_above_horizon": False, "always_below_horizon": False},
                                                    {"rise": datetime(2020, 1, 1, 8, 0, 0), "set": datetime(2020, 1, 1, 12, 30, 0), "always_above_horizon": False, "always_below_horizon": False}]}
         self.target_rise_and_set_data_always_above = {"CS002": [{"rise": None, "set": None, "always_above_horizon": True, "always_below_horizon": False}]}
@@ -1771,6 +1773,467 @@ class TestReservedStations(unittest.TestCase):
         self.assertTrue(can_run_within_station_reservations(self.scheduling_unit_blueprint))
 
 
+class TestTriggers(TestCase):
+    """
+    Tests for scheduling behavior of triggered observations
+    """
+    def setUp(self):
+
+        # wipe all radb entries (via cascading deletes) in between tests, so the tests don't influence each other
+        with PostgresDatabaseConnection(tmss_test_env.ra_test_environment.radb_test_instance.dbcreds) as radb:
+            radb.executeQuery('DELETE FROM resource_allocation.specification;')
+            radb.executeQuery('TRUNCATE resource_allocation.resource_usage;')
+            radb.commit()
+
+        # wipe all scheduling_unit_drafts in between tests, so the tests don't influence each other
+        for scheduling_set in models.SchedulingSet.objects.all():
+            for scheduling_unit_draft in scheduling_set.scheduling_unit_drafts.all():
+                for scheduling_unit_blueprint in scheduling_unit_draft.scheduling_unit_blueprints.all():
+                    for task_blueprint in scheduling_unit_blueprint.task_blueprints.all():
+                        for subtask in task_blueprint.subtasks.all():
+                            try:
+                                if subtask.state.value == models.SubtaskState.Choices.SCHEDULED.value:
+                                    unschedule_subtask(subtask)
+                            except Exception as e:
+                                logger.exception(e)
+                            for output in subtask.outputs.all():
+                                for dataproduct in output.dataproducts.all():
+                                    dataproduct.delete()
+                                for consumer in output.consumers.all():
+                                    consumer.delete()
+                                output.delete()
+                            for input in subtask.inputs.all():
+                                input.delete()
+                            subtask.delete()
+                        task_blueprint.delete()
+                    scheduling_unit_blueprint.delete()
+                scheduling_unit_draft.delete()
+
+        # create a scheduling set in a project that allows triggers
+        self.scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data())
+        self.scheduling_set.project.can_trigger = True
+        self.scheduling_set.project.save()
+
+        # create a second scheduling set in a project that allows triggers and has higher trigger_priority
+        self.scheduling_set_high_trigger_priority = models.SchedulingSet.objects.create(**SchedulingSet_test_data())
+        self.scheduling_set_high_trigger_priority.project.can_trigger = True
+        self.scheduling_set_high_trigger_priority.project.trigger_priority = self.scheduling_set_high_trigger_priority.project.trigger_priority + 1
+        self.scheduling_set_high_trigger_priority.project.save()
+
+        self.rarpc_patcher = mock.patch('lofar.sas.resourceassignment.resourceassignmentservice.rpc.RADBRPC')
+        self.addCleanup(self.rarpc_patcher.stop)
+        self.rarpc_mock = self.rarpc_patcher.start()
+        self.rarpc_mock.getTasks.return_value = []
+
+    def test_simple_triggered_scheduling_unit_gets_scheduled(self):
+
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, triggered_scheduling_unit_blueprint.id)
+        self.assertEqual(scheduled_scheduling_unit.status, 'scheduled')
+
+    def test_triggered_scheduling_unit_with_at_constraint_gets_scheduled_at_correct_time(self):
+
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    'scheduling_unit for at %s' % self._testMethodName,
+                                    scheduling_set=self.scheduling_set)
+        # Clear constraints
+        scheduling_unit_draft.scheduling_constraints_doc['sky'] = {}
+        scheduling_unit_draft.scheduling_constraints_doc['time']["between"] = []
+        scheduling_unit_draft.scheduling_constraints_doc['time']["not_between"] = []
+        scheduling_unit_draft.scheduling_constraints_doc['time'].pop('at', None)
+        scheduling_unit_draft.scheduling_constraints_doc['time'].pop("before", None)
+        scheduling_unit_draft.scheduling_constraints_doc['time'].pop('after', None)
+        # Set at constraint
+        at = round_to_second_precision(datetime.utcnow() + timedelta(minutes=17))
+        scheduling_unit_draft.scheduling_constraints_doc['time']['at'] = at.isoformat()
+        scheduling_unit_draft.save()
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled and assert is has been scheduled at "at" timestamp
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, triggered_scheduling_unit_blueprint.id)
+        self.assertEqual(scheduled_scheduling_unit.status, 'scheduled')
+        self.assertEqual(scheduled_scheduling_unit.start_time, at)
+
+    def test_triggered_scheduling_unit_has_priority_over_regular_observation(self):
+
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the triggered scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, triggered_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'schedulable')
+        self.assertEqual(triggered_scheduling_unit_blueprint.status, 'scheduled')
+
+    def test_triggered_scheduling_unit_unschedules_regular_observation(self):
+
+        # create a regular scheduling_unit
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        regular_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'scheduled')
+
+        # add a triggered scheduling_unit
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        triggered_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert now the new triggered scheduling_unit has been scheduled, and the regular one has been unscheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, triggered_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'schedulable')
+        self.assertEqual(triggered_scheduling_unit_blueprint.status, 'scheduled')
+
+    @mock.patch("lofar.sas.tmss.services.scheduling.dynamic_scheduling.cancel_subtask")
+    def test_triggered_scheduling_unit_cancels_regular_observation(self, cancel_mock):
+
+        # create a regular scheduling_unit
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        regular_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'scheduled')
+
+        # put obs to started state
+        subtask = scheduled_scheduling_unit.task_blueprints.first().subtasks.first()
+        subtask.state = models.SubtaskState.objects.get(value='starting')
+        subtask.save()
+        subtask.state = models.SubtaskState.objects.get(value='started')
+        subtask.save()
+
+        # assert obs it detected as running
+        running_subtasks = get_running_observation_subtasks()
+        self.assertIn(subtask, running_subtasks)
+
+        # also assert cut-off date is considered
+        running_subtasks = get_running_observation_subtasks(subtask.stop_time + timedelta(minutes=5))
+        self.assertNotIn(subtask, running_subtasks)
+
+        # add a triggered scheduling_unit with higher priority
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        triggered_scheduling_unit_blueprint.save()
+
+        # wipe all radb entries (via cascading deletes) so that we don't get a conflict later because we omöy mock
+        # the cancellation.
+        # todo: TMSS-704: if we don't do this, the triggered SU goes in error state (conflict due to the cancel being
+        #  mocked?) - confirm that's ok.
+        with PostgresDatabaseConnection(tmss_test_env.ra_test_environment.radb_test_instance.dbcreds) as radb:
+            radb.executeQuery('DELETE FROM resource_allocation.specification;')
+            radb.executeQuery('TRUNCATE resource_allocation.resource_usage;')
+            radb.commit()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # assert the subtask has been cancelled
+        cancel_mock.assert_called_with(subtask)
+
+        # Note that we cannot check that the regular_scheduling_unit_blueprint or the subtask has been cancelled since
+        # we only mocked the cancellation.
+
+        # Assert now the new triggered scheduling_unit has been scheduled
+        # todo: TMSS-704: We should only cancel if the trigger cannot run afterwards due to constraints.
+        #  Add such constraints once the scheduler considers that, since that will break this test.
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, triggered_scheduling_unit_blueprint.id)
+        self.assertEqual(triggered_scheduling_unit_blueprint.status, 'scheduled')
+
+    @mock.patch("lofar.sas.tmss.services.scheduling.dynamic_scheduling.cancel_subtask")
+    def test_triggered_scheduling_unit_does_not_cancel_regular_observation_with_same_trigger_priority(self, cancel_mock):
+
+        # create a regular scheduling_unit
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        regular_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'scheduled')
+
+        # put obs to started state
+        subtask = scheduled_scheduling_unit.task_blueprints.first().subtasks.first()
+        subtask.state = models.SubtaskState.objects.get(value='starting')
+        subtask.save()
+        subtask.state = models.SubtaskState.objects.get(value='started')
+        subtask.save()
+
+        # assert obs it detected as running
+        running_subtasks = get_running_observation_subtasks()
+        self.assertIn(subtask, running_subtasks)
+
+        # also assert cut-off date is considered
+        running_subtasks = get_running_observation_subtasks(subtask.stop_time + timedelta(minutes=5))
+        self.assertNotIn(subtask, running_subtasks)
+
+        # add a triggered scheduling_unit with same trigger priority
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        triggered_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # assert that the subtask has NOT been cancelled and is still in state 'started'
+        cancel_mock.assert_not_called()
+        self.assertEqual(subtask.state.value, 'started')
+
+        # Assert that the new triggered scheduling_unit has NOT been scheduled, and the regular one is still observing
+        self.assertIsNone(scheduled_scheduling_unit)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'observing')
+        #self.assertEqual(triggered_scheduling_unit_blueprint.status, 'schedulable')  # todo: TMSS-704: Make this pass. Currently goes to error state
+
+    @mock.patch("lofar.sas.tmss.services.scheduling.dynamic_scheduling.cancel_subtask")
+    def test_triggered_scheduling_unit_does_not_cancel_regular_observation_if_it_cannot_run_anyway(self, cancel_mock):
+
+        # create a regular scheduling_unit
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        regular_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint.id)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'scheduled')
+
+        # put obs to started state
+        subtask = scheduled_scheduling_unit.task_blueprints.first().subtasks.first()
+        subtask.state = models.SubtaskState.objects.get(value='starting')
+        subtask.save()
+        subtask.state = models.SubtaskState.objects.get(value='started')
+        subtask.save()
+
+        # assert obs it detected as running
+        running_subtasks = get_running_observation_subtasks()
+        self.assertIn(subtask, running_subtasks)
+
+        # also assert cut-off date is considered
+        running_subtasks = get_running_observation_subtasks(subtask.stop_time + timedelta(minutes=5))
+        self.assertNotIn(subtask, running_subtasks)
+
+        # add a triggered scheduling_unit with higher priority, but a between constraint that can never be met
+        scheduling_unit_draft = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {'time': {'between': [{"from": datetime.utcnow().isoformat(), "to": (datetime.utcnow()+timedelta(minutes=10)).isoformat()},]}}
+        triggered_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # assert that the subtask has NOT been cancelled and is still in state 'started'
+        #cancel_mock.assert_not_called()
+        self.assertEqual(subtask.state.value, 'started')
+
+        # Assert that the new triggered scheduling_unit has NOT been scheduled, and the regular one is still observing
+        self.assertIsNone(scheduled_scheduling_unit)
+        self.assertEqual(regular_scheduling_unit_blueprint.status, 'observing')
+        #self.assertEqual(triggered_scheduling_unit_blueprint.status, 'schedulable')  # todo: TMSS-704: Make this pass. Currently goes to error state.
+
+    @mock.patch("lofar.sas.tmss.services.scheduling.dynamic_scheduling.cancel_subtask")
+    def test_triggered_scheduling_unit_gets_scheduled_in_correct_trigger_priority_order(self, cancel_mock):
+
+        # create three regular scheduling_units, two with high trigger priority, one with lower
+        scheduling_unit_draft_high1 = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_high1 = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_high1)
+        regular_scheduling_unit_blueprint_high1.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_high1.save()
+
+        scheduling_unit_draft_high2 = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_high2 = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_high2)
+        regular_scheduling_unit_blueprint_high2.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_high2.save()
+
+        scheduling_unit_draft_low = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_low = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_low)
+        regular_scheduling_unit_blueprint_low.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_low.save()
+
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint_high1.id)
+        self.assertEqual(regular_scheduling_unit_blueprint_high1.status, 'scheduled')
+
+        # put first obs to started state
+        subtask = scheduled_scheduling_unit.task_blueprints.first().subtasks.first()
+        subtask.state = models.SubtaskState.objects.get(value='starting')
+        subtask.save()
+        subtask.state = models.SubtaskState.objects.get(value='started')
+        subtask.save()
+
+        # add a triggered scheduling_unit with same priority
+        scheduling_unit_draft_trigger = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_trigger)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {}
+        triggered_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # assert that the subtask has NOT been cancelled and is still in state 'started', and its SU is observing
+        cancel_mock.assert_not_called()
+        self.assertEqual(subtask.state.value, 'started')
+        self.assertEqual(regular_scheduling_unit_blueprint_high1.status, 'observing')
+
+        # Assert that the new triggered scheduling_unit has been scheduled, and starts in between the same and lower
+        # priority units
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        #self.assertEqual(triggered_scheduling_unit_blueprint.status, 'scheduled')  # todo: TMSS-704: Make this pass. Currently goes to error state
+        self.assertEqual(regular_scheduling_unit_blueprint_high2.status, 'scheduled')
+        self.assertEqual(regular_scheduling_unit_blueprint_low.status, 'schedulable')  # todo: why high2 gets scheduled, but this is only schedulable?
+        self.assertGreater(regular_scheduling_unit_blueprint_low.start_time, triggered_scheduling_unit_blueprint.stop_time)
+        #self.assertGreater(triggered_scheduling_unit_blueprint.start_time, regular_scheduling_unit_blueprint_high2.stop_time)  # todo: TMSS-704: Make this pass. Currently starts after high1, but unexpectedly before high2
+        self.assertGreater(regular_scheduling_unit_blueprint_high2.start_time, regular_scheduling_unit_blueprint_high1.stop_time)
+
+    @mock.patch("lofar.sas.tmss.services.scheduling.dynamic_scheduling.cancel_subtask")
+    def test_triggered_scheduling_unit_goes_to_unschedulable_if_it_cannot_cancel_and_does_not_fit(self, cancel_mock):
+
+        # create three regular scheduling_units, two with high trigger priority, one with lower
+        scheduling_unit_draft_high1 = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_high1 = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_high1)
+        regular_scheduling_unit_blueprint_high1.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_high1.save()
+
+        scheduling_unit_draft_high2 = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_high2 = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_high2)
+        regular_scheduling_unit_blueprint_high2.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_high2.save()
+
+        scheduling_unit_draft_low = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set,
+                                    is_triggered=False)
+        regular_scheduling_unit_blueprint_low = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_low)
+        regular_scheduling_unit_blueprint_low.scheduling_constraints_doc = {}
+        regular_scheduling_unit_blueprint_low.save()
+
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # Assert the scheduling_unit has been scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        self.assertEqual(scheduled_scheduling_unit.id, regular_scheduling_unit_blueprint_high1.id)
+        self.assertEqual(regular_scheduling_unit_blueprint_high1.status, 'scheduled')
+
+        # put first obs to started state
+        subtask = scheduled_scheduling_unit.task_blueprints.first().subtasks.first()
+        subtask.state = models.SubtaskState.objects.get(value='starting')
+        subtask.save()
+        subtask.state = models.SubtaskState.objects.get(value='started')
+        subtask.save()
+
+        # add a triggered scheduling_unit with same trigger priority, and a between constraint that can only be met
+        # when the regular obs would be cancelled (which is not allowed because it requires higher trigger priority).
+        scheduling_unit_draft_trigger = TestDynamicScheduling.create_simple_observation_scheduling_unit(
+                                    "triggered scheduling unit for %s" % self._testMethodName,
+                                    scheduling_set=self.scheduling_set_high_trigger_priority,
+                                    is_triggered=True)
+        triggered_scheduling_unit_blueprint = create_task_blueprints_and_subtasks_from_scheduling_unit_draft(scheduling_unit_draft_trigger)
+        triggered_scheduling_unit_blueprint.scheduling_constraints_doc = {'time': {'between': [{"from": datetime.utcnow().isoformat(), "to": (datetime.utcnow()+timedelta(hours=3)).isoformat()},]}}
+        triggered_scheduling_unit_blueprint.save()
+
+        scheduled_scheduling_unit = do_dynamic_schedule()
+
+        # assert that the subtask has NOT been cancelled and is still in state 'started', and its SU is observing
+        cancel_mock.assert_not_called()
+        self.assertEqual(subtask.state.value, 'started')
+        self.assertEqual(regular_scheduling_unit_blueprint_high1.status, 'observing')
+
+        # Assert that the new triggered scheduling_unit has NOT been scheduled and regular observations remain scheduled
+        self.assertIsNotNone(scheduled_scheduling_unit)
+        #self.assertEqual(triggered_scheduling_unit_blueprint.status, 'unschedulable')  # todo: TMSS-704: Make this pass. Currently goes to error state
+        self.assertEqual(regular_scheduling_unit_blueprint_high2.status, 'scheduled')
+        self.assertEqual(regular_scheduling_unit_blueprint_low.status, 'schedulable')  # todo: why high2 gets scheduled, but this is only schedulable?
+
+
 logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
 
 if __name__ == '__main__':
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/migrations/0001_initial.py b/SAS/TMSS/backend/src/tmss/tmssapp/migrations/0001_initial.py
index 0110ee6adaa831b503f1b8b42ea28f0ed7a6d0d2..664d10fa25687a243cfc038c2e16dc18f531701c 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/migrations/0001_initial.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/migrations/0001_initial.py
@@ -1,4 +1,4 @@
-# Generated by Django 3.0.9 on 2021-04-28 21:14
+# Generated by Django 3.0.9 on 2021-05-04 15:32
 
 from django.conf import settings
 import django.contrib.postgres.fields
@@ -7,7 +7,6 @@ import django.contrib.postgres.indexes
 from django.db import migrations, models
 import django.db.models.deletion
 import lofar.sas.tmss.tmss.tmssapp.models.common
-import lofar.sas.tmss.tmss.tmssapp.models.specification
 
 
 class Migration(migrations.Migration):
@@ -621,6 +620,7 @@ class Migration(migrations.Migration):
                 ('piggyback_allowed_aartfaac', models.BooleanField(help_text='Piggyback key for AARTFAAC.', null=True)),
                 ('priority_rank', models.FloatField(default=0.0, help_text='Priority of this scheduling unit w.r.t. other scheduling units within the same queue and project.')),
                 ('scheduling_constraints_doc', django.contrib.postgres.fields.jsonb.JSONField(help_text='Scheduling Constraints for this run.', null=True)),
+                ('is_triggered', models.BooleanField(default=False, help_text='boolean (default FALSE), which indicates whether this observation was triggered (responsive telescope)')),
             ],
             options={
                 'abstract': False,
@@ -643,6 +643,7 @@ class Migration(migrations.Migration):
                 ('piggyback_allowed_tbb', models.BooleanField(help_text='Piggyback key for TBB.', null=True)),
                 ('piggyback_allowed_aartfaac', models.BooleanField(help_text='Piggyback key for AARTFAAC.', null=True)),
                 ('priority_rank', models.FloatField(default=0.0, help_text='Priority of this scheduling unit w.r.t. other scheduling units within the same queue and project.')),
+                ('is_triggered', models.BooleanField(default=False, help_text='boolean (default FALSE), which indicates whether this observation was triggered (responsive telescope)')),
             ],
             options={
                 'abstract': False,
@@ -725,7 +726,7 @@ class Migration(migrations.Migration):
             options={
                 'abstract': False,
             },
-            bases=(models.Model, lofar.sas.tmss.tmss.tmssapp.models.common.TemplateSchemaMixin),
+            bases=(models.Model, lofar.sas.tmss.tmss.tmssapp.models.common.ProjectPropertyMixin, lofar.sas.tmss.tmss.tmssapp.models.common.TemplateSchemaMixin),
         ),
         migrations.CreateModel(
             name='SubtaskAllowedStateTransitions',
@@ -864,7 +865,7 @@ class Migration(migrations.Migration):
                 ('specifications_doc', django.contrib.postgres.fields.jsonb.JSONField(help_text='Specifications for this task.')),
                 ('output_pinned', models.BooleanField(default=False, help_text='True if the output of this task is pinned to disk, that is, forbidden to be removed.')),
             ],
-            bases=(models.Model, lofar.sas.tmss.tmss.tmssapp.models.specification.ProjectPropertyMixin, lofar.sas.tmss.tmss.tmssapp.models.common.TemplateSchemaMixin),
+            bases=(models.Model, lofar.sas.tmss.tmss.tmssapp.models.common.ProjectPropertyMixin, lofar.sas.tmss.tmss.tmssapp.models.common.TemplateSchemaMixin),
         ),
         migrations.CreateModel(
             name='TaskRelationBlueprint',
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/models/common.py b/SAS/TMSS/backend/src/tmss/tmssapp/models/common.py
index b13765d5a07282c1e9b24e71c1f3a1946cd7dd02..b36141b60469b4271d08d33dc2ee41780adcd335 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/models/common.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/models/common.py
@@ -14,6 +14,12 @@ from django.urls import reverse as reverse_url
 import json
 import jsonschema
 from datetime import timedelta
+from django.utils.functional import cached_property
+from lofar.sas.tmss.tmss.exceptions import TMSSException
+
+#
+# Mixins
+#
 
 class RefreshFromDbInvalidatesCachedPropertiesMixin():
     """Helper Mixin class which invalidates all 'cached_property' attributes on a model upon refreshing from the db"""
@@ -22,11 +28,30 @@ class RefreshFromDbInvalidatesCachedPropertiesMixin():
         return super().refresh_from_db(*args, **kwargs)
 
     def invalidate_cached_properties(self):
-        from django.utils.functional import cached_property
         for key, value in self.__class__.__dict__.items():
             if isinstance(value, cached_property):
                 self.__dict__.pop(key, None)
 
+
+class ProjectPropertyMixin(RefreshFromDbInvalidatesCachedPropertiesMixin):
+    @cached_property
+    def project(self): # -> Project:
+        '''return the related project of this task
+        '''
+        if not hasattr(self, 'path_to_project'):
+            return TMSSException("Please define a 'path_to_project' attribute on the %s object for the ProjectPropertyMixin to function." % type(self))
+        obj = self
+        for attr in self.path_to_project.split('__'):
+            obj = getattr(obj, attr)
+            if attr == 'project':
+                return obj
+            if obj and not isinstance(obj, Model):  # ManyToMany fields
+                obj = obj.first()
+            if obj is None:
+                logger.warning("The element '%s' in the path_to_project of the %s object returned None for pk=%s" % (attr, type(self), self.pk))
+                return None
+
+
 # abstract models
 
 class BasicCommon(Model):
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/models/scheduling.py b/SAS/TMSS/backend/src/tmss/tmssapp/models/scheduling.py
index 3fa4cc2134aa7b636f5a8809f0483fc749c2c229..82179139a3f1d7bc8b07889744474ed0a5a2596f 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/models/scheduling.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/models/scheduling.py
@@ -12,7 +12,7 @@ from django.db.models import Model, ForeignKey, OneToOneField, CharField, DateTi
     ManyToManyField, CASCADE, SET_NULL, PROTECT, QuerySet, BigAutoField, UniqueConstraint
 from django.contrib.postgres.fields import ArrayField, JSONField
 from django.contrib.auth.models import User
-from .common import AbstractChoice, BasicCommon, Template, NamedCommon, TemplateSchemaMixin
+from .common import AbstractChoice, BasicCommon, Template, NamedCommon, TemplateSchemaMixin, ProjectPropertyMixin
 from enum import Enum
 from django.db.models.expressions import RawSQL
 from django.core.exceptions import ValidationError
@@ -138,7 +138,7 @@ class SIPidentifier(Model):
 #
 # Instance Objects
 #
-class Subtask(BasicCommon, TemplateSchemaMixin):
+class Subtask(BasicCommon, ProjectPropertyMixin, TemplateSchemaMixin):
     """
     Represents a low-level task, which is an atomic unit of execution, such as running an observation, running
     inspection plots on the observed data, etc. Each task has a specific configuration, will have resources allocated
@@ -156,6 +156,7 @@ class Subtask(BasicCommon, TemplateSchemaMixin):
     created_or_updated_by_user = ForeignKey(User, null=True, editable=False, on_delete=PROTECT, help_text='The user who created / updated the subtask.')
     raw_feedback = CharField(null=True, max_length=1048576, help_text='The raw feedback for this Subtask')
     global_identifier = OneToOneField('SIPidentifier', null=False, editable=False, on_delete=PROTECT, help_text='The global unique identifier for LTA SIP.')
+    path_to_project = 'task_blueprints__scheduling_unit_blueprint__draft__scheduling_set__project'
 
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/models/specification.py b/SAS/TMSS/backend/src/tmss/tmssapp/models/specification.py
index 25326994619527ce4e4278fa35ce9c04448fb1b2..09b39e157d1fea726601d3e16299be9e8aaf08a9 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/models/specification.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/models/specification.py
@@ -10,7 +10,7 @@ from django.contrib.postgres.fields import JSONField
 from enum import Enum
 from django.db.models.expressions import RawSQL
 from django.db.models.deletion import ProtectedError
-from .common import AbstractChoice, BasicCommon, Template, NamedCommon, TemplateSchemaMixin, NamedCommonPK, RefreshFromDbInvalidatesCachedPropertiesMixin
+from .common import AbstractChoice, BasicCommon, Template, NamedCommon, TemplateSchemaMixin, NamedCommonPK, RefreshFromDbInvalidatesCachedPropertiesMixin, ProjectPropertyMixin
 from lofar.common.json_utils import validate_json_against_schema, validate_json_against_its_schema, add_defaults_to_json_object_for_schema
 from lofar.sas.tmss.tmss.exceptions import *
 from django.core.exceptions import ValidationError
@@ -19,24 +19,7 @@ from collections import Counter
 from django.utils.functional import cached_property
 from pprint import pformat
 from lofar.sas.tmss.tmss.exceptions import TMSSException
-
-#
-# Mixins
-#
-
-class ProjectPropertyMixin(RefreshFromDbInvalidatesCachedPropertiesMixin):
-    @cached_property
-    def project(self): # -> Project:
-        '''return the related project of this task
-        '''
-        if not hasattr(self, 'path_to_project'):
-            return TMSSException("Please define a 'path_to_project' attribute on the object for the ProjectPropertyMixin to function.")
-        obj = self
-        for attr in self.path_to_project.split('__'):
-            obj = getattr(obj, attr)
-            if attr == 'project':
-                return obj
-
+from lofar.sas.tmss.tmss.exceptions import BlueprintCreationException, TMSSException
 
 #
 # I/O
@@ -414,6 +397,7 @@ class SchedulingUnitDraft(NamedCommon, TemplateSchemaMixin):
     piggyback_allowed_aartfaac = BooleanField(help_text='Piggyback key for AARTFAAC.', null=True)
     priority_rank = FloatField(null=False, default=0.0, help_text='Priority of this scheduling unit w.r.t. other scheduling units within the same queue and project.')
     priority_queue = ForeignKey('PriorityQueueType', null=False, on_delete=PROTECT, default="A", help_text='Priority queue of this scheduling unit. Queues provide a strict ordering between scheduling units.')
+    is_triggered = BooleanField(default=False, null=False, help_text='boolean (default FALSE), which indicates whether this observation was triggered (responsive telescope)')
 
     def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
         if self.requirements_doc is not None and self.requirements_template_id and self.requirements_template.schema is not None:
@@ -503,6 +487,7 @@ class SchedulingUnitBlueprint(RefreshFromDbInvalidatesCachedPropertiesMixin, Tem
     priority_queue = ForeignKey('PriorityQueueType', null=False, on_delete=PROTECT, default="A", help_text='Priority queue of this scheduling unit. Queues provide a strict ordering between scheduling units.')
     scheduling_constraints_doc = JSONField(help_text='Scheduling Constraints for this run.', null=True)
     scheduling_constraints_template = ForeignKey('SchedulingConstraintsTemplate', on_delete=CASCADE, null=True, help_text='Schema used for scheduling_constraints_doc.')
+    is_triggered = BooleanField(default=False, null=False, help_text='boolean (default FALSE), which indicates whether this observation was triggered (responsive telescope)')
 
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
@@ -521,7 +506,7 @@ class SchedulingUnitBlueprint(RefreshFromDbInvalidatesCachedPropertiesMixin, Tem
         if self._state.adding:
             # On creation, propagate the following scheduling_unit_draft attributes as default for the new scheduling_unit_blueprint
             for copy_field in ['ingest_permission_required', 'piggyback_allowed_tbb', 'piggyback_allowed_aartfaac',
-                               'scheduling_constraints_doc', 'scheduling_constraints_template']:
+                               'scheduling_constraints_doc', 'scheduling_constraints_template', 'is_triggered']:
                 if hasattr(self, 'draft'):
                     setattr(self, copy_field, getattr(self.draft, copy_field))
         else:
@@ -536,6 +521,20 @@ class SchedulingUnitBlueprint(RefreshFromDbInvalidatesCachedPropertiesMixin, Tem
         self.__original_scheduling_constraints_doc = self.scheduling_constraints_doc
         self.__original_scheduling_constraints_template_id = self.scheduling_constraints_template_id
 
+        if self._state.adding and self.is_triggered:
+            if self.project.can_trigger:
+                from lofar.sas.tmss.services.scheduling.constraints import can_run_after
+                start_time = datetime.datetime.utcnow() + datetime.timedelta(minutes=3)
+                if self.scheduling_constraints_template is None or can_run_after(self, start_time):
+                    logger.info('Triggered obs name=%s can run after start_time=%s. The scheduler will pick this up and cancel ongoing observations if necessary.' % (self.name, start_time))
+                else:
+                    logger.info('Triggered obs name=%s cannot run after start_time=%s. Adding it for book-keeping, but it will be unschedulable.' % (self.name, start_time))
+                    # todo: set to unschedulable? This is a derived state and we do not have subtasks at this point. We could check this in 'status' of course, but this check is quite costly...
+            else:
+                msg = 'Triggered obs name=%s is rejected because its project name=%s does not allow triggering.' % (self.name, self.project.name)
+                logger.warning(msg)
+                raise BlueprintCreationException(msg)
+
         super().save(force_insert, force_update, using, update_fields)
 
     @cached_property
@@ -757,20 +756,6 @@ class SchedulingUnitBlueprint(RefreshFromDbInvalidatesCachedPropertiesMixin, Tem
         return fields_found
 
 
-class ProjectPropertyMixin():
-    @cached_property
-    def project(self) -> Project:
-        '''return the related project of this task
-        '''
-        if not hasattr(self, 'path_to_project'):
-            return TMSSException("Please define a 'path_to_project' attribute on the object for the ProjectPropertyMixin to function.")
-        obj = self
-        for attr in self.path_to_project.split('__'):
-            obj = getattr(obj, attr)
-            if attr == 'project':
-                return obj
-
-
 class TaskDraft(NamedCommon, ProjectPropertyMixin, TemplateSchemaMixin):
     specifications_doc = JSONField(help_text='Specifications for this task.')
     copies = ForeignKey('TaskDraft', related_name="copied_from", on_delete=SET_NULL, null=True, help_text='Source reference, if we are a copy (NULLable).')
diff --git a/SAS/TMSS/backend/test/t_permissions_system_roles.py b/SAS/TMSS/backend/test/t_permissions_system_roles.py
index 74c3d6c24088a38cf214a7038f22ccd9152241ff..e00d9db0c658780f892747d107e1db14b3285aa3 100755
--- a/SAS/TMSS/backend/test/t_permissions_system_roles.py
+++ b/SAS/TMSS/backend/test/t_permissions_system_roles.py
@@ -589,7 +589,7 @@ class SystemPermissionTestCase(unittest.TestCase):
 
         # Try to task_log subtask and assert Paulus can do it within the TO observer group permissions.
         response = GET_and_assert_equal_expected_code(self, BASE_URL + '/subtask/%s/task_log/' % self.obs_subtask_id,
-                                                      200,
+                                                      302,
                                                       auth=self.test_data_creator.auth)
 
 
diff --git a/SAS/TMSS/backend/test/t_tmssapp_specification_django_API.py b/SAS/TMSS/backend/test/t_tmssapp_specification_django_API.py
index f1218237f3ff7b8ee8a7d70e24c3486d081cf500..3639b0fbe6efa756c15c156a925248310e62d834 100755
--- a/SAS/TMSS/backend/test/t_tmssapp_specification_django_API.py
+++ b/SAS/TMSS/backend/test/t_tmssapp_specification_django_API.py
@@ -44,8 +44,7 @@ from lofar.sas.tmss.test.tmss_test_data_django_models import *
 from django.db.utils import IntegrityError
 from django.core.exceptions import ValidationError
 from django.db.models.deletion import ProtectedError
-from lofar.sas.tmss.tmss.exceptions import SchemaValidationException
-from lofar.sas.tmss.tmss.exceptions import TMSSException
+from lofar.sas.tmss.tmss.exceptions import SchemaValidationException, BlueprintCreationException, TMSSException
 
 class GeneratorTemplateTest(unittest.TestCase):
     def test_GeneratorTemplate_gets_created_with_correct_creation_timestamp(self):
@@ -850,239 +849,264 @@ class SchedulingUnitBlueprintTest(unittest.TestCase):
         scheduling_unit_blueprint.scheduling_constraints_doc = {'foo': 'matic'}
         scheduling_unit_blueprint.save()
 
+    def test_SchedulingUnitBlueprint_gets_created_with_correct_is_triggered_flag(self):
 
-# class TaskBlueprintTest(unittest.TestCase):
-#     @classmethod
-#     def setUpClass(cls) -> None:
-#         cls.task_draft = models.TaskDraft.objects.create(**TaskDraft_test_data())
-#         cls.scheduling_unit_blueprint = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data())
-#
-#     def test_TaskBlueprint_gets_created_with_correct_creation_timestamp(self):
-#
-#         # setup
-#         before = datetime.utcnow()
-#         entry = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#
-#         after = datetime.utcnow()
-#
-#         # assert
-#         self.assertLess(before, entry.created_at)
-#         self.assertGreater(after, entry.created_at)
-#
-#     def test_TaskBlueprint_update_timestamp_gets_changed_correctly(self):
-#
-#         # setup
-#         entry = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         before = datetime.utcnow()
-#         entry.save()
-#         after = datetime.utcnow()
-#
-#         # assert
-#         self.assertLess(before, entry.updated_at)
-#         self.assertGreater(after, entry.updated_at)
-#
-#     def test_TaskBlueprint_prevents_missing_template(self):
-#
-#         # setup
-#         test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         test_data['specifications_template'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskBlueprint.objects.create(**test_data)
-#
-#     def test_TaskBlueprint_prevents_missing_draft(self):
-#
-#         # setup
-#         test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         test_data['draft'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskBlueprint.objects.create(**test_data)
-#
-#     def test_TaskBlueprint_prevents_draft_deletion(self):
-#         # setup
-#         test_data = dict(TaskBlueprint_test_data())
-#         blueprint = models.TaskBlueprint.objects.create(**test_data)
-#         draft = blueprint.draft
-#         with self.assertRaises(ProtectedError):
-#             draft.delete()
-#
-#     def test_TaskBlueprint_prevents_missing_scheduling_unit_blueprint(self):
-#
-#         # setup
-#         test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         test_data['scheduling_unit_blueprint'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskBlueprint.objects.create(**test_data)
-#
-#     def test_TaskBlueprint_predecessors_and_successors_none(self):
-#         task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#
-#         self.assertEqual(set(), set(task_blueprint_1.predecessors.all()))
-#         self.assertEqual(set(), set(task_blueprint_2.predecessors.all()))
-#         self.assertEqual(set(), set(task_blueprint_1.successors.all()))
-#         self.assertEqual(set(), set(task_blueprint_2.successors.all()))
-#
-#     def test_TaskBlueprint_predecessors_and_successors_simple(self):
-#         task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#         task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
-#
-#         models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_1, consumer=task_blueprint_2))
-#
-#         self.assertEqual(task_blueprint_1, task_blueprint_2.predecessors.all()[0])
-#         self.assertEqual(task_blueprint_2, task_blueprint_1.successors.all()[0])
-#
-#     def test_TaskBlueprint_predecessors_and_successors_complex(self):
-#         task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4())))
-#         task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
-#         task_blueprint_3: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
-#         task_blueprint_4: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
-#         task_blueprint_5: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
-#         task_blueprint_6: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
-#
-#         # ST1 ---> ST3 ---> ST4
-#         #      |        |
-#         # ST2 -          -> ST5 ---> ST6
-#
-#         models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_1, consumer=task_blueprint_3))
-#         trb1 = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_2, consumer=task_blueprint_3))
-#         models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_3, consumer=task_blueprint_4))
-#         models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_3, consumer=task_blueprint_5))
-#         models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_5, consumer=task_blueprint_6))
-#
-#         self.assertEqual(set((task_blueprint_1, task_blueprint_2)), set(task_blueprint_3.predecessors.all()))
-#         self.assertEqual(set((task_blueprint_4, task_blueprint_5)), set(task_blueprint_3.successors.all()))
-#         self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_4.predecessors.all()))
-#         self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_5.predecessors.all()))
-#         self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_1.successors.all()))
-#         self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_2.successors.all()))
-#         self.assertEqual(set(), set(task_blueprint_1.predecessors.all()))
-#         self.assertEqual(set(), set(task_blueprint_2.predecessors.all()))
-#         self.assertEqual(set(), set(task_blueprint_4.successors.all()))
-#         self.assertEqual(set((task_blueprint_6,)), set(task_blueprint_5.successors.all()))
-#
-#
-# class TaskRelationBlueprintTest(unittest.TestCase):
-#     @classmethod
-#     def setUpClass(cls) -> None:
-#         cls.producer = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())
-#         cls.consumer = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())
-#
-#     def test_TaskRelationBlueprint_gets_created_with_correct_creation_timestamp(self):
-#         # setup
-#         before = datetime.utcnow()
-#         entry = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#
-#         after = datetime.utcnow()
-#
-#         # assert
-#         self.assertLess(before, entry.created_at)
-#         self.assertGreater(after, entry.created_at)
-#
-#     def test_TaskRelationBlueprint_update_timestamp_gets_changed_correctly(self):
-#         # setup
-#         entry = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         before = datetime.utcnow()
-#         entry.save()
-#         after = datetime.utcnow()
-#
-#         # assert
-#         self.assertLess(before, entry.updated_at)
-#         self.assertGreater(after, entry.updated_at)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_selection_template(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['selection_template'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_draft(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['draft'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_producer(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['producer'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_consumer(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['consumer'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_input(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['input_role'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#     def test_TaskRelationBlueprint_prevents_missing_output(self):
-#         # setup
-#         test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
-#         test_data['output_role'] = None
-#
-#         # assert
-#         with self.assertRaises(IntegrityError):
-#             models.TaskRelationBlueprint.objects.create(**test_data)
-#
-#
-#
-#
-# class TestStationTimeLine(unittest.TestCase):
-#     """
-#     Actually this simple testcase should be in a separate module (t_tmssapp_calculations_django_API.py)
-#     but I was just lazy and spare some overhead and I just 'piggyback' with this module
-#     """
-#
-#     def test_StationTimeline_raises_Error_on_duplicate_station_timeline(self):
-#         """
-#         Test if adding a duplicate station-timestamp combination leads to an Error and so data is not inserted
-#         """
-#         import datetime
-#
-#         test_data = {"station_name": "CS001",
-#                      "timestamp": datetime.date(2021, 4, 1),
-#                      "sunrise_start": datetime.datetime(year=2021, month=4, day=1, hour=6, minute=1, second=0),
-#                      "sunrise_end": datetime.datetime(year=2021, month=4, day=1, hour=7, minute=2, second=0),
-#                      "sunset_start": datetime.datetime(year=2021, month=4, day=1, hour=20, minute=31, second=0),
-#                      "sunset_end": datetime.datetime(year=2021, month=4, day=1, hour=21, minute=33, second=0) }
-#
-#         models.StationTimeline.objects.create(**test_data)
-#         with self.assertRaises(IntegrityError) as context:
-#             models.StationTimeline.objects.create(**test_data)
-#             self.assertIn('unique_station_time_line', str(context.exception))
-#
-#         self.assertEqual(len(models.StationTimeline.objects.filter(timestamp=datetime.date(2021, 4, 1))), 1)
-#         self.assertEqual(len(models.StationTimeline.objects.all()), 1)
-#         # Add a non-duplicate
-#         test_data["station_name"] = "CS002"
-#         models.StationTimeline.objects.create(**test_data)
-#         self.assertEqual(len(models.StationTimeline.objects.filter(timestamp=datetime.date(2021, 4, 1))), 2)
-#         self.assertEqual(len(models.StationTimeline.objects.all()), 2)
+        # setup
+        project = models.Project.objects.create(**Project_test_data(can_trigger=True))
+        scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data(project=project))
+        constraints_template = models.SchedulingConstraintsTemplate.objects.get(name="constraints")
+        scheduling_unit_draft = models.SchedulingUnitDraft.objects.create(**SchedulingUnitDraft_test_data(scheduling_set=scheduling_set, is_triggered=True, scheduling_constraints_template=constraints_template))
+        scheduling_unit_blueprint = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=scheduling_unit_draft))
+
+        # assert
+        self.assertEqual(scheduling_unit_blueprint.is_triggered, scheduling_unit_blueprint.is_triggered)
+
+    def test_SchedulingUnitBlueprint_prevents_triggers_if_project_does_not_allow_triggers(self):
+
+        # setup
+        project = models.Project.objects.create(**Project_test_data(can_trigger=False))
+        scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data(project=project))
+        scheduling_unit_draft = models.SchedulingUnitDraft.objects.create(**SchedulingUnitDraft_test_data(scheduling_set=scheduling_set, is_triggered=True))
+
+        # assert
+        with self.assertRaises(BlueprintCreationException) as context:
+            models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=scheduling_unit_draft))
+
+        self.assertIn('does not allow triggering', str(context.exception))
+
+
+class TaskBlueprintTest(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls) -> None:
+        cls.task_draft = models.TaskDraft.objects.create(**TaskDraft_test_data())
+        cls.scheduling_unit_blueprint = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data())
+
+    def test_TaskBlueprint_gets_created_with_correct_creation_timestamp(self):
+
+        # setup
+        before = datetime.utcnow()
+        entry = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+
+        after = datetime.utcnow()
+
+        # assert
+        self.assertLess(before, entry.created_at)
+        self.assertGreater(after, entry.created_at)
+
+    def test_TaskBlueprint_update_timestamp_gets_changed_correctly(self):
+
+        # setup
+        entry = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        before = datetime.utcnow()
+        entry.save()
+        after = datetime.utcnow()
+
+        # assert
+        self.assertLess(before, entry.updated_at)
+        self.assertGreater(after, entry.updated_at)
+
+    def test_TaskBlueprint_prevents_missing_template(self):
+
+        # setup
+        test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        test_data['specifications_template'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskBlueprint.objects.create(**test_data)
+
+    def test_TaskBlueprint_prevents_missing_draft(self):
+
+        # setup
+        test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        test_data['draft'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskBlueprint.objects.create(**test_data)
+
+    def test_TaskBlueprint_prevents_draft_deletion(self):
+        # setup
+        test_data = dict(TaskBlueprint_test_data())
+        blueprint = models.TaskBlueprint.objects.create(**test_data)
+        draft = blueprint.draft
+        with self.assertRaises(ProtectedError):
+            draft.delete()
+
+    def test_TaskBlueprint_prevents_missing_scheduling_unit_blueprint(self):
+
+        # setup
+        test_data = dict(TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        test_data['scheduling_unit_blueprint'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskBlueprint.objects.create(**test_data)
+
+    def test_TaskBlueprint_predecessors_and_successors_none(self):
+        task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+
+        self.assertEqual(set(), set(task_blueprint_1.predecessors.all()))
+        self.assertEqual(set(), set(task_blueprint_2.predecessors.all()))
+        self.assertEqual(set(), set(task_blueprint_1.successors.all()))
+        self.assertEqual(set(), set(task_blueprint_2.successors.all()))
+
+    def test_TaskBlueprint_predecessors_and_successors_simple(self):
+        task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+        task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=self.task_draft, scheduling_unit_blueprint=self.scheduling_unit_blueprint))
+
+        models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_1, consumer=task_blueprint_2))
+
+        self.assertEqual(task_blueprint_1, task_blueprint_2.predecessors.all()[0])
+        self.assertEqual(task_blueprint_2, task_blueprint_1.successors.all()[0])
+
+    def test_TaskBlueprint_predecessors_and_successors_complex(self):
+        task_blueprint_1: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4())))
+        task_blueprint_2: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
+        task_blueprint_3: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
+        task_blueprint_4: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
+        task_blueprint_5: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
+        task_blueprint_6: models.TaskBlueprint = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(name=str(uuid.uuid4()), task_draft=task_blueprint_1.draft, scheduling_unit_blueprint=task_blueprint_1.scheduling_unit_blueprint))
+
+        # ST1 ---> ST3 ---> ST4
+        #      |        |
+        # ST2 -          -> ST5 ---> ST6
+
+        models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_1, consumer=task_blueprint_3))
+        trb1 = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_2, consumer=task_blueprint_3))
+        models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_3, consumer=task_blueprint_4))
+        models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_3, consumer=task_blueprint_5))
+        models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=task_blueprint_5, consumer=task_blueprint_6))
+
+        self.assertEqual(set((task_blueprint_1, task_blueprint_2)), set(task_blueprint_3.predecessors.all()))
+        self.assertEqual(set((task_blueprint_4, task_blueprint_5)), set(task_blueprint_3.successors.all()))
+        self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_4.predecessors.all()))
+        self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_5.predecessors.all()))
+        self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_1.successors.all()))
+        self.assertEqual(set((task_blueprint_3,)), set(task_blueprint_2.successors.all()))
+        self.assertEqual(set(), set(task_blueprint_1.predecessors.all()))
+        self.assertEqual(set(), set(task_blueprint_2.predecessors.all()))
+        self.assertEqual(set(), set(task_blueprint_4.successors.all()))
+        self.assertEqual(set((task_blueprint_6,)), set(task_blueprint_5.successors.all()))
+
+
+class TaskRelationBlueprintTest(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls) -> None:
+        cls.producer = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())
+        cls.consumer = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())
+
+    def test_TaskRelationBlueprint_gets_created_with_correct_creation_timestamp(self):
+        # setup
+        before = datetime.utcnow()
+        entry = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+
+        after = datetime.utcnow()
+
+        # assert
+        self.assertLess(before, entry.created_at)
+        self.assertGreater(after, entry.created_at)
+
+    def test_TaskRelationBlueprint_update_timestamp_gets_changed_correctly(self):
+        # setup
+        entry = models.TaskRelationBlueprint.objects.create(**TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        before = datetime.utcnow()
+        entry.save()
+        after = datetime.utcnow()
+
+        # assert
+        self.assertLess(before, entry.updated_at)
+        self.assertGreater(after, entry.updated_at)
+
+    def test_TaskRelationBlueprint_prevents_missing_selection_template(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['selection_template'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+    def test_TaskRelationBlueprint_prevents_missing_draft(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['draft'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+    def test_TaskRelationBlueprint_prevents_missing_producer(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['producer'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+    def test_TaskRelationBlueprint_prevents_missing_consumer(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['consumer'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+    def test_TaskRelationBlueprint_prevents_missing_input(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['input_role'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+    def test_TaskRelationBlueprint_prevents_missing_output(self):
+        # setup
+        test_data = dict(TaskRelationBlueprint_test_data(producer=self.producer, consumer=self.consumer))
+        test_data['output_role'] = None
+
+        # assert
+        with self.assertRaises(IntegrityError):
+            models.TaskRelationBlueprint.objects.create(**test_data)
+
+
+
+
+class TestStationTimeLine(unittest.TestCase):
+    """
+    Actually this simple testcase should be in a separate module (t_tmssapp_calculations_django_API.py)
+    but I was just lazy and spare some overhead and I just 'piggyback' with this module
+    """
+
+    def test_StationTimeline_raises_Error_on_duplicate_station_timeline(self):
+        """
+        Test if adding a duplicate station-timestamp combination leads to an Error and so data is not inserted
+        """
+        import datetime
+
+        test_data = {"station_name": "CS001",
+                     "timestamp": datetime.date(2021, 4, 1),
+                     "sunrise_start": datetime.datetime(year=2021, month=4, day=1, hour=6, minute=1, second=0),
+                     "sunrise_end": datetime.datetime(year=2021, month=4, day=1, hour=7, minute=2, second=0),
+                     "sunset_start": datetime.datetime(year=2021, month=4, day=1, hour=20, minute=31, second=0),
+                     "sunset_end": datetime.datetime(year=2021, month=4, day=1, hour=21, minute=33, second=0) }
+
+        models.StationTimeline.objects.create(**test_data)
+        with self.assertRaises(IntegrityError) as context:
+            models.StationTimeline.objects.create(**test_data)
+            self.assertIn('unique_station_time_line', str(context.exception))
+
+        self.assertEqual(len(models.StationTimeline.objects.filter(timestamp=datetime.date(2021, 4, 1))), 1)
+        self.assertEqual(len(models.StationTimeline.objects.all()), 1)
+        # Add a non-duplicate
+        test_data["station_name"] = "CS002"
+        models.StationTimeline.objects.create(**test_data)
+        self.assertEqual(len(models.StationTimeline.objects.filter(timestamp=datetime.date(2021, 4, 1))), 2)
+        self.assertEqual(len(models.StationTimeline.objects.all()), 2)
 
 
 if __name__ == "__main__":
diff --git a/SAS/TMSS/backend/test/tmss_test_data_django_models.py b/SAS/TMSS/backend/test/tmss_test_data_django_models.py
index 538d5d7480920cc1ca4c924914f675e91e4c8892..bed85447893a64d92816fc75ac3a828df3359e55 100644
--- a/SAS/TMSS/backend/test/tmss_test_data_django_models.py
+++ b/SAS/TMSS/backend/test/tmss_test_data_django_models.py
@@ -118,7 +118,7 @@ def Cycle_test_data() -> dict:
             "start": datetime.utcnow().isoformat(),
             "stop": datetime.utcnow().isoformat()}
 
-def Project_test_data(name: str=None, priority_rank: int = 1, auto_pin=False) -> dict:
+def Project_test_data(name: str=None, priority_rank: int = 1, auto_pin=False, can_trigger=False) -> dict:
     if name is None:
         name = 'my_project_' + str(uuid.uuid4())
 
@@ -129,7 +129,7 @@ def Project_test_data(name: str=None, priority_rank: int = 1, auto_pin=False) ->
                "auto_ingest": False,
                "priority_rank": priority_rank,
                "trigger_priority": 1000,
-               "can_trigger": False,
+               "can_trigger": can_trigger,
                "private_data": True,
                "expert": True,
                "filler": False,
@@ -193,7 +193,8 @@ def SchedulingUnitDraft_test_data(name="my_scheduling_unit_draft", scheduling_se
                                   template: models.SchedulingUnitTemplate=None, requirements_doc: dict=None,
                                   observation_strategy_template: models.SchedulingUnitObservingStrategyTemplate=None,
                                   scheduling_constraints_doc: dict=None,
-                                  scheduling_constraints_template: models.SchedulingConstraintsTemplate=None) -> dict:
+                                  scheduling_constraints_template: models.SchedulingConstraintsTemplate=None,
+                                  is_triggered=False) -> dict:
     if scheduling_set is None:
         scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data())
 
@@ -220,7 +221,8 @@ def SchedulingUnitDraft_test_data(name="my_scheduling_unit_draft", scheduling_se
             "requirements_template": template,
             "observation_strategy_template": observation_strategy_template,
             "scheduling_constraints_template": scheduling_constraints_template,
-            "scheduling_constraints_doc": scheduling_constraints_doc}
+            "scheduling_constraints_doc": scheduling_constraints_doc,
+            "is_triggered": is_triggered}
 
 def TaskDraft_test_data(name: str=None, specifications_template: models.TaskTemplate=None, specifications_doc: dict=None, scheduling_unit_draft: models.SchedulingUnitDraft=None, output_pinned=False) -> dict:
     if name is None:
diff --git a/SAS/TMSS/backend/test/tmss_test_environment_unittest_setup.py b/SAS/TMSS/backend/test/tmss_test_environment_unittest_setup.py
index 2c3dd34f8f81bd2a256eaa7ffe5164408eb8de34..acbc4384ad8402735abf504ee44d2ba66e662fa8 100644
--- a/SAS/TMSS/backend/test/tmss_test_environment_unittest_setup.py
+++ b/SAS/TMSS/backend/test/tmss_test_environment_unittest_setup.py
@@ -65,7 +65,7 @@ def _call_API_and_assert_expected_response(test_instance, url, call, data, expec
     elif call == 'POST':
         response = requests.post(url, json=data, auth=auth)
     elif call == 'GET':
-        response = requests.get(url, auth=auth)
+        response = requests.get(url, auth=auth, allow_redirects=False)
     elif call == 'PATCH':
         response = requests.patch(url, json=data, auth=auth)
     elif call == 'DELETE':