diff --git a/.gitattributes b/.gitattributes
index ff7a4e0019ed9cfd4bd565b4c0bb9dce133addd8..46e18f6784d2c516af5418c52452ccfbf0edea21 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -5086,6 +5086,7 @@ SAS/TriggerServices/bin/triggercancellationservice.ini -text
 SAS/TriggerServices/bin/triggerrestinterface -text
 SAS/TriggerServices/bin/triggerservice -text
 SAS/TriggerServices/bin/triggerservice.ini -text
+SAS/TriggerServices/config/dbcredentials_trigger_restinterface.ini -text
 SAS/TriggerServices/django_rest/CMakeLists.txt -text
 SAS/TriggerServices/django_rest/db.sqlite3 -text
 SAS/TriggerServices/django_rest/manage.py -text
diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py
index 9182b0cf7a3a78f05b99e43b3737be2bf863c15d..bb2ff9118223e7202cb3834bbeb75458ab1ce6eb 100644
--- a/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py
@@ -361,6 +361,10 @@ class ResourceAvailabilityChecker(object):
         """ Returns list of available resources of type id in needed_resources_by_type_id.keys()
             starting at group id root_gid in the format [{type_id: {<resource_dict>}, ...}, ...].
         """
+        # Replace list of dicts to a dict of dicts because rid is not garanteed the correct index
+        # of the list.
+        available_recources = {r["id"]:r for r in db_resource_list}
+
         # Search breadth-first starting at root_gid.
         gids = [root_gid]
         resources_list = []
@@ -372,11 +376,14 @@ class ResourceAvailabilityChecker(object):
 
             res_group = self.resource_group_relations[gids[i]]
             for rid in res_group['resource_ids']:
-                type_id = db_resource_list[rid]['type_id']
-                if type_id in needed_resources_by_type_id and db_resource_list[rid]['active'] and \
-                                db_resource_list[rid]['available_capacity'] > 0:
-                    resources[type_id] = db_resource_list[rid]
-                    type_ids_seen.add(type_id)
+                if rid in available_recources:
+                    type_id = available_recources[rid]['type_id']
+                    if type_id in needed_resources_by_type_id and available_recources[rid]['active'] and \
+                                    available_recources[rid]['available_capacity'] > 0:
+                        resources[type_id] = available_recources[rid]
+                        type_ids_seen.add(type_id)
+                else:
+                    logger.debug("requested resource id %s is not available/claimable", rid)
 
             # Only add resource IDs if all needed types are present in this resource group
             if type_ids_seen == set(needed_resources_by_type_id):
diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
index 9974e7e6ce30890cc430587f1f31a48f1727d6da..79fbe86715533d1d27506e5cbddd6e06ad21ae64 100644
--- a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py
@@ -126,11 +126,9 @@ class BasicScheduler(object):
 
     def _pre_process_allocation(self):
         """
-        Placeholder for derived classes, available to be able perform for any processing prior to the actual allocation
-        of resources done by allocate_resources().
-
-        Does nothing in this base class.
+        Cleans unusable resources so that on a next try thet will not block based on previous usage.
         """
+        self.unusable_resources = []
 
         logger.debug("BasicScheduler: _pre_process_allocation for task %s", self.task_id)
 
diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py
index 5170f5275a3721ac4a0f366ddb3405d9c8fe4fdb..0e57b4fc59890615fa9e172f0de521ff4449bbf9 100755
--- a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py
@@ -1379,7 +1379,7 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase):
         expected_claims = [resource_type_3_dict, resource_type_5_dict, resource_type_3_dict, resource_type_5_dict,
                            resource_type_3_dict, resource_type_5_dict, resource_type_3_dict, resource_type_5_dict]
 
-        self.logger_mock.debug.assert_any_call('fit_multiple_resources: created claim: %s', expected_claims)
+        self.logger_mock.info.assert_any_call('fit_multiple_resources: created claims: %s', expected_claims)
 
     def test_get_is_claimable_invalid_resource_group(self):
         """ If we try to find claims with a non-existing root_resource_group, get_is_claimable should fail. """
@@ -1391,15 +1391,14 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase):
                 'storage': 100
             }
         }]
-        claimable_resources_list = {
-            self.cep4storage_resource_id: {
+        claimable_resources_list = [{
                 'id': self.cep4storage_resource_id,
                 'type_id': 5,
                 'claimable_capacity': 400,
                 'available_capacity': 400,
                 'active': True
             }
-        }
+        ]
 
         with self.assertRaises(ValueError):
             _, _ = self.uut.get_is_claimable(estimates, claimable_resources_list)
@@ -1418,21 +1417,20 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase):
                 'storage': 100
             }
         }]
-        claimable_resources_list = {
-            self.cep4bandwidth_resource_id: {
+        claimable_resources_list = [{
                 'id': self.cep4bandwidth_resource_id,
                 'type_id': 3,
                 'claimable_capacity': 4000,
                 'available_capacity': 4000,
                 'active': True
             },
-            self.cep4storage_resource_id: {
+            {
                 'id': self.cep4storage_resource_id,
                 'type_id': 5,
                 'claimable_capacity': 400,
                 'available_capacity': 400,
                 'active': True
-            }}
+            }]
 
         claimable_resources = self.uut.get_is_claimable(estimates, claimable_resources_list)
 
@@ -1450,21 +1448,20 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase):
                 'storage': 100
             }
         }]
-        claimable_resources_list = {
-            self.cep4bandwidth_resource_id: {
+        claimable_resources_list = [{
                 'id': self.cep4bandwidth_resource_id,
                 'type_id': 3,
                 'claimable_capacity': 4000,
                 'available_capacity': 4000, 'active': True
             },
-            self.cep4storage_resource_id: {
+            {
                 'id': self.cep4storage_resource_id,
                 'type_id': 5,
                 'claimable_capacity': 300,
                 'available_capacity': 300,
                 'active': True
             }
-        }
+        ]
 
         with self.assertRaises(CouldNotFindClaimException):
             self.uut.get_is_claimable(estimates, claimable_resources_list)
@@ -1486,21 +1483,20 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase):
                 'bandwidth': 1000,
                 'storage': 100
             }}]
-        claimable_resources_list = {
-            self.cep4bandwidth_resource_id: {
+        claimable_resources_list = [{
                 'id': self.cep4bandwidth_resource_id,
                 'type_id': 3,
                 'claimable_capacity': 5000,
                 'available_capacity': 5000,
                 'active': True
             },
-            self.cep4storage_resource_id: {
+            {
                 'id': self.cep4storage_resource_id,
                 'type_id': 5,
                 'claimable_capacity': 500,
                 'available_capacity': 500,
                 'active': True
-            }}
+            }]
 
         # TODO: verify with Jan David whether this test case (returning a partial fit) should still succeed or whether
         # an exception is expected to be raised
diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_schedulers.py b/SAS/ResourceAssignment/ResourceAssigner/test/t_schedulers.py
index 8838598149a81f9a7d5035ed03d46eb3f6645001..94a43b5c872a56fec7f6466aa01b92b79baad94c 100755
--- a/SAS/ResourceAssignment/ResourceAssigner/test/t_schedulers.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_schedulers.py
@@ -52,6 +52,8 @@ class FakeRADatabase(object):
         # maximum capacity of our resource
         self.resource_capacity = resource_capacity
 
+        self.resources = [{"id": x} for x in xrange(6)]
+
     def addTask(self, id, task):
         self.tasks[id] = task
         self.tasks[id]["id"] = id
@@ -100,7 +102,7 @@ class FakeRADatabase(object):
 
     def getResources(self, *args, **kwargs):
         # we model six resources, can expand if needed
-        return [ { "id": x } for x in xrange(6) ]
+        return self.resources
 
     def getResourceGroupMemberships(self):
         # We model 4 stations: 2 remote, and 2 core
@@ -247,32 +249,39 @@ class FakeRADatabase(object):
         self.claims = deepcopy(self.committed_claims)
         self.tasks = deepcopy(self.committed_tasks)
 
+
 class FakeResourceAvailabilityChecker(object):
-  resource_types = {
-    "storage": 0,
-    "bandwidth": 1,
-  }
-
-  def get_is_claimable(self, requested_resources, available_resources):
-    if not available_resources:
-        raise CouldNotFindClaimException
-
-    # fullfil one request at a time to keep the code simple. We map it on
-    # the first available resource
-    r = requested_resources[0]
-
-    # use resource 0, or resource #stationnr
-    rid = int(r["station"][2:]) if "station" in r else available_resources[0]["id"]
-    if rid not in [x["id"] for x in available_resources]:
-      raise CouldNotFindClaimException
-
-    rtype = r["resource_types"].keys()[0]
-    return [{
-      'requested_resources': [r],
-      'claim_size': r["resource_types"][rtype],
-      'resource_id': rid,
-      'resource_type_id': self.resource_types[rtype]
-    }]
+    resource_types = {
+        "storage": 0,
+        "bandwidth": 1}
+
+    requested_resources = []
+    available_resources = []
+
+    def get_is_claimable(self, requested_resources, available_resources):
+        self.requested_resources = requested_resources
+        self.available_resources = available_resources
+
+        if not available_resources:
+            raise CouldNotFindClaimException
+
+        # fullfil one request at a time to keep the code simple. We map it on
+        # the first available resource
+        r = requested_resources[0]
+
+        # use resource 0, or resource #stationnr
+        rid = int(r["station"][2:]) if "station" in r else available_resources[0]["id"]
+        if rid not in [x["id"] for x in available_resources]:
+            raise CouldNotFindClaimException
+
+        rtype = r["resource_types"].keys()[0]
+        return [{
+          'requested_resources': [r],
+          'claim_size': r["resource_types"][rtype],
+          'resource_id': rid,
+          'resource_type_id': self.resource_types[rtype]
+        }]
+
 
 class SchedulerTest(unittest.TestCase):
     """ Setup mechanics to use a FakeRADatabase and FakeResourceAvailabilityChecker to simulate a system with
@@ -293,6 +302,7 @@ class SchedulerTest(unittest.TestCase):
         self.mock_ra_database()
         self.mock_resource_availability_checker()
 
+
 class BasicSchedulerTest(SchedulerTest):
     def new_task(self, task_id):
         self.fake_ra_database.addTask(task_id, {
@@ -337,7 +347,6 @@ class BasicSchedulerTest(SchedulerTest):
         self.assertEqual(claim["claim_size"],       512)
         self.assertEqual(claim["resource_type_id"], FakeResourceAvailabilityChecker.resource_types["bandwidth"])
 
-
     def test_multiple_resources(self):
         """ Whether a task (that fits) can be scheduled. """
 
@@ -358,10 +367,16 @@ class BasicSchedulerTest(SchedulerTest):
         estimates = [{ 'resource_types': {'bandwidth': 2048} }]
         allocation_succesful = self.new_scheduler(0, lambda _: estimates).allocate_resources()
 
-        # Allocation must fail, and rollback() called
-        self.assertFalse(allocation_succesful)
-        self.assertFalse(self.fake_ra_database.committed)
-        self.assertTrue(self.fake_ra_database.rolled_back)
+        if self.__class__ == BasicSchedulerTest: # This inheritence of test is not ideal
+            # Allocation must fail, and commit called so we get a conflicted state
+            self.assertFalse(allocation_succesful)
+            self.assertTrue(self.fake_ra_database.committed)
+            self.assertFalse(self.fake_ra_database.rolled_back)
+        else:
+            # Allocation must fail, and rollback called
+            self.assertFalse(allocation_succesful)
+            self.assertFalse(self.fake_ra_database.committed)
+            self.assertTrue(self.fake_ra_database.rolled_back)
 
     def test_schedule_two_tasks_too_large_task(self):
         """ Whether two tasks that fit individually but not together will be rejected by the scheduler. """
@@ -860,6 +875,33 @@ class DwellSchedulerTest(PrioritySchedulerTest):
         self.assertEqual(self.fake_ra_database.tasks[1]["starttime"], datetime.datetime(2017, 1, 1, 3, 1, 0))
         self.assertEqual(self.fake_ra_database.tasks[1]["endtime"],   datetime.datetime(2017, 1, 1, 4, 1, 0))
 
+    def test_dwellScheduler_should_give_all_available_resources_on_second_pass(self):
+        """
+        This tests bug LSRT-60 where the second observation of template two does not get scheduled
+        when dwelling is active. The basic scheduler keeps track of resources that can't be killed.
+        The guess is that its used for optimization purposes. The cause of the bug is that this list
+        does not get cleared and on dwelling to the next part it should fit. But the resources in
+        that list get subtracted from the list handed to the resource_availability checker.
+        This test verifies that the complete list should be provided on the second try.
+        """
+
+        # First task must succeed
+        self.new_task(0)
+        estimates = [{'resource_types': {'bandwidth': 512}}]
+        allocation_succesful = self.new_dwell_scheduler(0, lambda _: estimates).allocate_resources()
+        self.assertTrue(allocation_succesful)
+
+        # Second task must also succeed
+        self.new_task(1)
+        estimates = [{ 'resource_types': {'bandwidth': 513} }]
+        allocation_succesful = self.new_dwell_scheduler(1, lambda _: estimates).allocate_resources()
+        self.assertTrue(allocation_succesful)
+
+        # avialable resources can be limited by tracking unkillable resources. They should be
+        # cleared on the second try like in this test.
+        self.assertEqual(self.fake_resource_availability_checker.available_resources,
+                         self.fake_ra_database.resources)
+
 if __name__ == '__main__':
     logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
 
diff --git a/SAS/TriggerServices/config/dbcredentials_trigger_restinterface.ini b/SAS/TriggerServices/config/dbcredentials_trigger_restinterface.ini
new file mode 100644
index 0000000000000000000000000000000000000000..7db6c53863cbcd248656e8804a7179588a8043c8
--- /dev/null
+++ b/SAS/TriggerServices/config/dbcredentials_trigger_restinterface.ini
@@ -0,0 +1,7 @@
+[database:trigger_restinterface]
+type = mysql
+host = mysql1.control.lofar
+user = <redacted>
+password = <redacted>
+database = lofar_trigger_rest
+port = 3306
diff --git a/SAS/TriggerServices/django_rest/restinterface/settings.py b/SAS/TriggerServices/django_rest/restinterface/settings.py
index abd72b9b1d740a12d79e058ff558f5e4f6edf1b9..428cd551c6facb85e003e88eb19262fe60c1ae46 100644
--- a/SAS/TriggerServices/django_rest/restinterface/settings.py
+++ b/SAS/TriggerServices/django_rest/restinterface/settings.py
@@ -23,7 +23,7 @@ logger.setLevel(logging.INFO)
 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 
 dbc = dbcredentials.DBCredentials()
-mom_credentials = dbc.get("MoM")
+restinterface_credentials = dbc.get("trigger_restinterface")
 
 # Quick-start development settings - unsuitable for production
 # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
@@ -87,11 +87,11 @@ WSGI_APPLICATION = 'lofar.triggerservices.restinterface.wsgi.application'
 DATABASES = {
     'default': {
         'ENGINE': 'django.db.backends.mysql', 
-        'NAME': 'lofar_trigger_rest',
-        'USER': mom_credentials.user,
-        'PASSWORD': mom_credentials.password,
-        'HOST': mom_credentials.host,
-        'PORT': mom_credentials.port,
+        'NAME': restinterface_credentials.database,
+        'USER': restinterface_credentials.user,
+        'PASSWORD': restinterface_credentials.password,
+        'HOST': restinterface_credentials.host,
+        'PORT': restinterface_credentials.port,
     }
 }