diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
index 334008e611918f6959bcdd166e50f0da12b6cf99..ea374250236b38524742631147c1c98879f7867b 100644
--- a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
@@ -552,6 +552,10 @@ class PriorityScheduler(StationScheduler):
         logger.debug("my_task_priority, messing around with MoM QS")
 
         my_momid = self.task["mom_id"]
+        if my_momid is None:
+            logger.debug("PriorityScheduler: returning default priority of -1 for non-MoM task %s", self.task.get('otdb_id', self.task.get('tmss_id')))
+            return -1
+
         priority_dict = self.momqueryservice.get_project_priorities_for_objects([my_momid])
         my_priority = priority_dict[my_momid]
 
@@ -676,10 +680,12 @@ class PriorityScheduler(StationScheduler):
         logger.debug("PriorityScheduler: conflicting tasks are %s", conflicting_tasks)
 
         # check which tasks we can kill
-        task_priorities = self.momqueryservice.get_project_priorities_for_objects(conflicting_task_momids)
-        logger.debug("PriorityScheduler: conflicting task priorities are %s", task_priorities)
-        # We can't kill tasks without a mom_id (reservations and such) !
-        kill_task_list = [t for t in conflicting_tasks if t["mom_id"] is not None and task_priorities[t["mom_id"]] < self._my_task_priority()]
+        kill_task_list = []
+        if conflicting_task_momids:
+            task_priorities = self.momqueryservice.get_project_priorities_for_objects(conflicting_task_momids)
+            logger.debug("PriorityScheduler: conflicting task priorities are %s", task_priorities)
+            # We can't kill tasks without a mom_id (reservations and such) !
+            kill_task_list = [t for t in conflicting_tasks if t["mom_id"] is not None and task_priorities[t["mom_id"]] < self._my_task_priority()]
         logger.debug("PriorityScheduler: task kill list is %s", kill_task_list)
 
         # update if we're blocked by an earlier task than we know so far
diff --git a/SAS/TMSS/test/t_scheduling.py b/SAS/TMSS/test/t_scheduling.py
index aef63e1bae5b272060f3e34bb67390a92f39da34..8d0b68214e60bc2500212d1a286f7acde34d3d5c 100755
--- a/SAS/TMSS/test/t_scheduling.py
+++ b/SAS/TMSS/test/t_scheduling.py
@@ -57,7 +57,8 @@ from lofar.sas.tmss.test.tmss_test_data_django_models import *
 # import and setup rest test data creator
 from lofar.sas.tmss.test.tmss_test_data_rest import TMSSRESTTestDataCreator
 test_data_creator = TMSSRESTTestDataCreator(tmss_test_env.django_server.url, (tmss_test_env.ldap_server.dbcreds.user, tmss_test_env.ldap_server.dbcreds.password))
-
+from datetime import datetime, timedelta
+from lofar.sas.resourceassignment.resourceassigner.rarpc import RARPC
 
 class SchedulingTest(unittest.TestCase):
     def setUp(self):
@@ -85,6 +86,46 @@ class SchedulingTest(unittest.TestCase):
             self.assertEqual('scheduled', subtask['state_value'])
             self.assertEqual('scheduled', ra_test_env.radb.getTask(tmss_id=subtask_id)['status'])
 
+    def test_schedule_observation_subtask_with_blocking_reservations(self):
+
+        # create a reservation on station CS001
+        with RARPC.create() as rarpc:
+            ra_spec = { 'task_type': 'reservation',
+                        'task_subtype': 'maintenance',
+                        'status': 'prescheduled',
+                        'starttime': datetime.utcnow()-timedelta(hours=1),
+                        'endtime': datetime.utcnow() + timedelta(hours=1),
+                        'cluster': None,
+                        'specification': {} }
+            inner_spec = { 'Observation.VirtualInstrument.stationList': ['CS001'],
+                           'Observation.startTime': '2020-01-08 06:30:00',
+                           'Observation.endTime': '2021-07-08 06:30:00' }
+            ra_spec['specification'] = inner_spec
+            assigned = rarpc.do_assignment(ra_spec)
+            self.assertTrue(assigned)
+
+        with tmss_test_env.create_tmss_client() as client:
+            subtask_template = client.get_subtask_template("observationcontrol schema")
+            spec = get_default_json_object_for_schema(subtask_template['schema'])
+            spec['stations']['digital_pointings'][0]['subbands'] = [0]
+            cluster_url = client.get_path_as_json_object('/cluster/1')['url']
+
+            subtask_data = test_data_creator.Subtask(specifications_template_url=subtask_template['url'],
+                                                     specifications_doc=spec,
+                                                     cluster_url=cluster_url)
+            subtask = test_data_creator.post_data_and_get_response_as_json_object(subtask_data, '/subtask/')
+            subtask_id = subtask['id']
+            test_data_creator.post_data_and_get_url(test_data_creator.SubtaskOutput(subtask_url=subtask['url']), '/subtask_output/')
+
+            client.set_subtask_status(subtask_id, 'defined')
+
+            with self.assertRaises(Exception):
+                client.schedule_subtask(subtask_id)
+
+            subtask = client.get_subtask(subtask_id)
+            self.assertEqual('error', subtask['state_value'])
+            self.assertEqual('conflict', ra_test_env.radb.getTask(tmss_id=subtask_id)['status'])
+
     def test_schedule_pipeline_subtask_with_enough_resources_available(self):
         with tmss_test_env.create_tmss_client() as client:
             cluster_url = client.get_path_as_json_object('/cluster/1')['url']