diff --git a/SAS/DataManagement/StorageQueryService/cache.py b/SAS/DataManagement/StorageQueryService/cache.py
index b06e9cc78bf8f3f9c0783824cd213e83997c5c39..bec4c7ce4881ba2f5df07004e75fb41b84f8f5a6 100644
--- a/SAS/DataManagement/StorageQueryService/cache.py
+++ b/SAS/DataManagement/StorageQueryService/cache.py
@@ -30,7 +30,7 @@ from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_M
 
 logger = logging.getLogger(__name__)
 
-MAX_CACHE_ENTRY_AGE = datetime.timedelta(days=1)
+MAX_CACHE_ENTRY_AGE = datetime.timedelta(hours=8)
 
 class CacheManager:
     def __init__(self,
@@ -115,7 +115,7 @@ class CacheManager:
             with open(tmp_path, 'w') as file:
                 with self._cacheLock:
                     file.write(pformat(self._cache) + '\n')
-            os.replace(tmp_path, self._cache_path)
+            os.rename(tmp_path, self._cache_path)
         except Exception as e:
             logger.error("Error while writing du cache: %s", e)
 
@@ -156,9 +156,6 @@ class CacheManager:
 
         self._sendDiskUsageChangedNotification(path, du_result['disk_usage'], otdb_id)
 
-        if du_result.get('path') == self.disk_usage.path_resolver.projects_path:
-            self._updateProjectsDiskUsageInRADB()
-
     def _invalidateCacheEntryForPath(self, path):
         with self._cacheLock:
             path_cache = self._cache['path_du_results']
@@ -173,7 +170,7 @@ class CacheManager:
 
         result = {}
         for otdb_id in otdb_ids:
-            result[otdb_id] = self.getDiskUsageForOTDBId(otdb_id, force_update)
+            result[otdb_id] = self.getDiskUsageForOTDBId(otdb_id, force_update=force_update)
 
         return result
 
@@ -256,7 +253,7 @@ class CacheManager:
                         try:
                             path = cache_entry.get('path')
                             if path:
-                                logger.info('_updateOldEntriesInCache: examining entry %s/%s. timestamp:%s age:%s needs_update:%s path: %s',
+                                logger.info('_updateOldEntriesInCache: examining entry %s/%s. timestamp:%s age:%s needs_update:%s path: \'%s\'',
                                             i,
                                             len(updateable_entries),
                                             cache_entry['cache_timestamp'],
@@ -278,6 +275,9 @@ class CacheManager:
                             logger.info('skipping remaining %s old cache entries updates, they will be updated next time', len(updateable_entries)-i)
                             break
 
+                #update the CEP4 capacities in the RADB once in a while...
+                self._updateCEP4CapacitiesInRADB()
+
                 #sleep for a minute, (or stop if requested)
                 for i in range(60):
                     sleep(1)
@@ -287,28 +287,31 @@ class CacheManager:
             except Exception as e:
                 logger.error(str(e))
 
-    def _updateProjectsDiskUsageInRADB(self):
+    def _updateCEP4CapacitiesInRADB(self):
         try:
-            projects_du_result = self.getDiskUsageForPath(self.disk_usage.path_resolver.projects_path)
-            if projects_du_result['found']:
+            df_result = self.disk_usage.getDiskFreeSpace()
+            if df_result['found']:
                 #get the total used space, and update the resource availability in the radb
                 radbrpc = self.disk_usage.path_resolver.radbrpc
                 storage_resources = radbrpc.getResources(resource_types='storage', include_availability=True)
                 cep4_storage_resource = next(x for x in storage_resources if 'CEP4' in x['name'])
 
-                total_capacity = cep4_storage_resource.get('total_capacity')
-                used_capacity = projects_du_result.get('disk_usage')
-                if total_capacity != None and used_capacity != None:
-                    available_capacity = total_capacity - used_capacity
+                total_capacity = df_result.get('disk_size')
+                used_capacity = df_result.get('disk_usage')
+                available_capacity = df_result.get('disk_free')
 
-                    logger.info('updating availability capacity for %s (id=%s) to %s in the RADB',
-                                cep4_storage_resource['name'],
-                                cep4_storage_resource['id'],
-                                humanreadablesize(available_capacity))
+                logger.info('updating capacities for resource \'%s\' (id=%s) in the RADB: total=%s, used=%s, available=%s',
+                            cep4_storage_resource['name'],
+                            cep4_storage_resource['id'],
+                            humanreadablesize(total_capacity),
+                            humanreadablesize(used_capacity),
+                            humanreadablesize(available_capacity))
 
-                    radbrpc.updateResourceAvailability(cep4_storage_resource['id'], available_capacity=available_capacity)
+                radbrpc.updateResourceAvailability(cep4_storage_resource['id'],
+                                                   available_capacity=available_capacity,
+                                                   total_capacity=total_capacity)
         except Exception as e:
-            logger.error(e)
+            logger.error('_updateCEP4CapacitiesInRADB: %s', e)
 
     def open(self):
         self.disk_usage.open()
@@ -438,7 +441,7 @@ class CacheManager:
         return result
 
     def getDiskUsageForPath(self, path, force_update=False):
-        logger.info("cache.getDiskUsageForPath(%s, force_update=%s)", path, force_update)
+        logger.info("cache.getDiskUsageForPath('%s', force_update=%s)", path, force_update)
         needs_cache_update = False
         with self._cacheLock:
             needs_cache_update |= path not in self._cache['path_du_results']
@@ -457,7 +460,7 @@ class CacheManager:
                     result['message'] = 'No such path: %s' % path
 
         result['disk_usage_readable'] = humanreadablesize(result.get('disk_usage', 0))
-        logger.info('cache.getDiskUsageForPath result: %s' % result)
+        logger.info('cache.getDiskUsageForPath(\'%s\') result: %s', path, result)
         return result
 
     def getDiskUsageForTaskAndSubDirectories(self, radb_id=None, mom_id=None, otdb_id=None, force_update=False):
diff --git a/SAS/DataManagement/StorageQueryService/diskusage.py b/SAS/DataManagement/StorageQueryService/diskusage.py
index d5a0063f588dacfe73d7d6695a26b27d30581694..67a96da340ce7dc21ccf8ff24d68f5cbf7526e49 100644
--- a/SAS/DataManagement/StorageQueryService/diskusage.py
+++ b/SAS/DataManagement/StorageQueryService/diskusage.py
@@ -17,7 +17,10 @@ from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_M
 logger = logging.getLogger(__name__)
 
 def getDiskUsageForPath(path):
-    logger.info('getDiskUsageForPath(%s)', path)
+    logger.info('getDiskUsageForPath(\'%s\')', path)
+
+    result = {'found': False, 'path': path, 'disk_usage': None, 'name': path.split('/')[-1] }
+
     cmd = ['rbh-du', '-bd', path]
     hostname = socket.gethostname()
     if not 'mgmt0' in hostname:
@@ -27,51 +30,91 @@ def getDiskUsageForPath(path):
     proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     out, err = proc.communicate()
 
+    if proc.returncode == 0:
+        # example of out
+        # Using config file '/etc/robinhood.d/tmpfs/tmp_fs_mgr_basic.conf'.
+        # /data/projects/2016LOFAROBS/L522380
+        # dir count:3906, size:16048128, spc_used:16052224
+        # file count:17568, size:42274164368, spc_used:42327519232
+
+        #parse out
+        lines = [l.strip() for l in out.split('\n')]
+        file_lines = [l for l in lines if 'file count' in l]
+        if file_lines:
+            parts = [p.strip() for p in file_lines[0].split(',')]
+            partsDict = {p.split(':')[0].strip():p.split(':')[1].strip() for p in parts}
+
+            result['found'] = True
+
+            if 'size' in partsDict:
+                result['disk_usage'] = int(partsDict['size'])
+
+            if 'file count' in partsDict:
+                result['nr_of_files'] = int(partsDict['file count'])
+        else:
+            dir_lines = [l for l in lines if 'dir count' in l]
+            if dir_lines:
+                result['found'] = True
+                result['disk_usage'] = 0
+                result['nr_of_files'] = 0
+
+        try:
+            path_items = path.rstrip('/').split('/')
+            if len(path_items) >=3 and path_items[-1].startswith('L') and path_items[-1][1:].isdigit() and 'projects' in path_items[-3]:
+                logger.info('found path for otdb_id %s %s', path_items[-1][1:], path)
+                result['otdb_id'] = int(path_items[-1][1:])
+        except Exception as e:
+            logger.error('Could not parse otdb_id from path %s %s', path, e)
+    else:
+        logger.error(out + err)
+        result['message'] = out
+
+    result['disk_usage_readable'] = humanreadablesize(result['disk_usage'])
+
+    logger.info('getDiskUsageForPath(\'%s\') returning: %s', path, result)
+    return result
+
+def getDiskFreeSpaceForMountpoint(mountpoint=CEP4_DATA_MOUNTPOINT):
+    logger.info('getDiskFreeSpaceForMountpoint(\'%s\')', mountpoint)
+
+    result = {'found': False, 'mountpoint': mountpoint }
+
+    cmd = ['df', mountpoint]
+    hostname = socket.gethostname()
+    if not 'mgmt0' in hostname:
+        cmd = ['ssh', 'lofarsys@mgmt01.cep4.control.lofar'] + cmd
+    logger.info(' '.join(cmd) + ' ...waiting for result...')
+
+    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    out, err = proc.communicate()
+
     if proc.returncode != 0:
         logger.error(out + err)
-        return {'found': False, 'path': path, 'message': out}
+        result['message'] = out
+        return result
 
     # example of out
-    # Using config file '/etc/robinhood.d/tmpfs/tmp_fs_mgr_basic.conf'.
-    # /data/projects/2016LOFAROBS/L522380
-    # dir count:3906, size:16048128, spc_used:16052224
-    # file count:17568, size:42274164368, spc_used:42327519232
+    # Filesystem                                         1K-blocks          Used     Available Use% Mounted on
+    # 10.134.233.65@o2ib:10.134.233.66@o2ib:/cep4-fs 3369564904320 1460036416928 1737591103048  46% /data
 
     #parse out
     lines = [l.strip() for l in out.split('\n')]
-    file_lines = [l for l in lines if 'file count' in l]
-    if file_lines:
-        parts = [p.strip() for p in file_lines[0].split(',')]
-        partsDict = {p.split(':')[0].strip():p.split(':')[1].strip() for p in parts}
+    data_line = next(l for l in lines if mountpoint in l)
+    if data_line:
+        parts = [p.strip() for p in data_line.split(' ')]
 
-        result = {'found': True, 'disk_usage': None, 'path': path, 'name': path.split('/')[-1]}
-
-        if 'size' in partsDict:
-            result['disk_usage'] = int(partsDict['size'])
-
-        if 'file count' in partsDict:
-            result['nr_of_files'] = int(partsDict['file count'])
+        result['found'] = True
+        result['disk_size'] = 1024*int(parts[1])
+        result['disk_usage'] = 1024*int(parts[2])
+        result['disk_free'] = 1024*int(parts[3])
 
+        result['disk_size_readable'] = humanreadablesize(result['disk_size'])
         result['disk_usage_readable'] = humanreadablesize(result['disk_usage'])
-    else:
-        dir_lines = [l for l in lines if 'dir count' in l]
-        if dir_lines:
-            result = {'found': True, 'disk_usage': 0, 'nr_of_files': 0, 'path': path, 'name': path.split('/')[-1]}
-        else:
-            result = {'found': False, 'path': path }
+        result['disk_free_readable'] = humanreadablesize(result['disk_free'])
 
-    try:
-        path_items = path.rstrip('/').split('/')
-        if len(path_items) >=3 and path_items[-1].startswith('L') and path_items[-1][1:].isdigit() and 'projects' in path_items[-3]:
-            logger.info('found path for otdb_id %s %s', path_items[-1][1:], path)
-            result['otdb_id'] = int(path_items[-1][1:])
-    except Exception as e:
-        logger.error('Could not parse otdb_id from path %s %s', path, e)
-
-    logger.info('returning: %s' % result)
+    logger.info('getDiskFreeSpaceForMountpoint(\'%s\') returning: %s', mountpoint, result)
     return result
 
-
 class DiskUsage:
     def __init__(self,
                  mountpoint=CEP4_DATA_MOUNTPOINT,
@@ -149,6 +192,9 @@ class DiskUsage:
 
         return path_result
 
+    def getDiskFreeSpace(self):
+        return getDiskFreeSpaceForMountpoint(self.path_resolver.mountpoint)
+
 def main():
     # Check the invocation arguments
     parser = OptionParser("%prog [options] <path>",
diff --git a/SAS/MoM/MoMQueryService/MoMQueryServiceClient/momqueryrpc.py b/SAS/MoM/MoMQueryService/MoMQueryServiceClient/momqueryrpc.py
index 98ba9594d53337acd4f4e99913c57ab632b3efe0..6f09ea5ee587d12618b900fff0ed728261c8f2af 100644
--- a/SAS/MoM/MoMQueryService/MoMQueryServiceClient/momqueryrpc.py
+++ b/SAS/MoM/MoMQueryService/MoMQueryServiceClient/momqueryrpc.py
@@ -153,7 +153,7 @@ class MoMQueryRPC(RPCWrapper):
         ids = [str(x) for x in ids]
         ids_string = ', '.join(ids)
 
-        logger.info("Requesting details for mom objects: %s", (str(ids_string)))
+        logger.info("Requesting details for %s mom objects", len(ids))
         result = self.rpc('GetObjectDetails', mom_ids=ids_string)
         result = convertStringDigitKeysToInt(result)
         logger.info("Received details for %s mom objects" % (len(result)))
diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py b/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py
index 80475ecd80b6da65e85cab232579439e76df18fa..c8a7f6c205f97f43ea44bb40c2dadddb6ac6189a 100755
--- a/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py
+++ b/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py
@@ -354,8 +354,43 @@ class ResourceAssigner():
                 conflictingClaims = self.radbrpc.getResourceClaims(task_ids=taskId, status='conflict')
                 if conflictingClaims:
                     # Will do _announceStateChange(task, 'conflict'), and radb sets task status to conflict automatically
-                    logger.error('doAssignment: Task cannot be scheduled, because of %d conflicting claims: %s' %
-                                 (len(conflictingClaims), conflictingClaims))
+                    logger.error('doAssignment: Task cannot be scheduled, because of %d conflicting claims: %s',
+                                 len(conflictingClaims), conflictingClaims)
+
+                    try:
+                        for conflictingClaim in conflictingClaims:
+                            #gather some info on this resource, and log the reasons why this claim has a conflict with others
+                            resource_id = conflictingClaim['resource_id']
+                            resource = self.radbrpc.getResources(resource_ids=[resource_id],
+                                                                include_availability=True)[0]
+                            max_resource_usage = self.radbrpc.get_max_resource_usage_between(resource_id,
+                                                                                            conflictingClaim['starttime'],
+                                                                                            conflictingClaim['endtime'])
+                            claimable_capacity = self.radbrpc.get_resource_claimable_capacity(resource_id,
+                                                                                            conflictingClaim['starttime'],
+                                                                                            conflictingClaim['endtime'])['claimable_capacity']
+                            conflictingCausingClaims = self.radbrpc.get_conflicting_overlapping_claims(conflictingClaim['id'])
+                            conflictingCausingTasks = self.radbrpc.getTasks(task_ids=list(set(c['task_id'] for c in conflictingCausingClaims)))
+
+                            logger.info("doAssignment: Task cannot be scheduled: claim with id=%s, size=%s, start='%s', end='%s' on resource id=%s, name='%s', total_cap=%s, used(claimed)_cap=%s, misc_used_cap=%s, available_cap=%s, claimable_cap=%s has a conflict with %s other claims with max_resource_usage=%s at '%s' on the same resource for otdb_ids=%s",
+                                        conflictingClaim['id'],
+                                        conflictingClaim['claim_size'],
+                                        conflictingClaim['starttime'],
+                                        conflictingClaim['endtime'],
+                                        resource['id'],
+                                        resource['name'],
+                                        resource['total_capacity'],
+                                        resource['used_capacity'],
+                                        resource['misc_used_capacity'],
+                                        resource['available_capacity'],
+                                        claimable_capacity,
+                                        len(conflictingCausingClaims),
+                                        max_resource_usage['usage'],
+                                        max_resource_usage['as_of_timestamp'],
+                                        ','.join(str(t['otdb_id']) for t in conflictingCausingTasks))
+                    except Exception as e:
+                        logger.error(e)
+
                     self._announceStateChange(task, 'conflict')
                     return
 
diff --git a/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb.py b/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb.py
index 827ed6879081928da8f83adc13ad45db7a8e46bb..509411a84ba1da8cddf3dc1629e6405c1dbcc3f1 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb.py
+++ b/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb.py
@@ -1441,13 +1441,13 @@ class RADatabase:
         return self.getTasks(task_ids=task_ids)
 
     def get_max_resource_usage_between(self, resource_id, lower_bound, upper_bound, claim_status='claimed'):
-        result = {'usage': 0, 'status_id': claim_status_id, 'as_of_timestamp': lower_bound, 'resource_id': resource_id}
-
         if isinstance(claim_status, basestring):
             claim_status_id = self.getResourceClaimStatusId(claim_status)
         else:
             claim_status_id = claim_status
 
+        result = {'usage': 0, 'status_id': claim_status_id, 'as_of_timestamp': lower_bound, 'resource_id': resource_id}
+
         query = '''SELECT * from resource_allocation.get_max_resource_usage_between(%s, %s, %s, %s)'''
         qresult = self._executeQuery(query, (resource_id, claim_status_id, lower_bound, upper_bound), fetch=_FETCH_ONE)
 
diff --git a/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb/sql/add_functions_and_triggers.sql b/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb/sql/add_functions_and_triggers.sql
index e8389aeea5d35fe4362529e576e6c3dacf847f90..84897459e07aa280cd746276e0100782159bf37a 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb/sql/add_functions_and_triggers.sql
+++ b/SAS/ResourceAssignment/ResourceAssignmentDatabase/radb/sql/add_functions_and_triggers.sql
@@ -335,13 +335,25 @@ CREATE OR REPLACE FUNCTION resource_allocation.rebuild_resource_usages_table_fro
 $$
 DECLARE
     claim resource_allocation.resource_claim;
+    cntr bigint;
+    num_rows bigint;
+    step bigint;
 BEGIN
     LOCK TABLE resource_allocation.resource_claim IN EXCLUSIVE MODE;
     LOCK TABLE resource_allocation.resource_usage IN EXCLUSIVE MODE;
     TRUNCATE TABLE resource_allocation.resource_usage RESTART IDENTITY CASCADE;
 
-    FOR claim IN SELECT * FROM resource_allocation.resource_claim LOOP
+    SELECT COUNT(id) FROM resource_allocation.resource_claim INTO num_rows;
+    cntr := 0;
+    step := num_rows/100;
+
+    FOR claim IN (SELECT * FROM resource_allocation.resource_claim ORDER BY resource_id, starttime, endtime) LOOP
         PERFORM resource_allocation.process_new_claim_into_resource_usages(claim);
+
+        cntr := cntr+1;
+        IF cntr%step=0 THEN
+            RAISE NOTICE 'rebuild_resource_usages_table_from_claims... %%%', ROUND(100.0*cntr/num_rows,1);
+        END IF;
     END LOOP;
 END;
 $$ LANGUAGE plpgsql;
diff --git a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/cleanupcontroller.js b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/cleanupcontroller.js
index 5ccd61567093a89b30b436d434a4f1c524be721c..e973db2bd649e778de1b06d5b526ebe0ad28e213 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/cleanupcontroller.js
+++ b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/cleanupcontroller.js
@@ -382,14 +382,6 @@ cleanupControllerMod.controller('CleanupController', ['$scope', '$uibModal', '$m
                     loading: false
                 }
 
-                $scope.totalDiskUsageChartSeries = [];
-
-                var cep4storage_resource = dataService.resources.find(function(r) { return r.name == 'CEP4_storage:/data'; });
-                if(cep4storage_resource) {
-                    $scope.totalDiskUsageChartSeries = [{name:'Free', data:[100.0*cep4storage_resource.available_capacity/cep4storage_resource.total_capacity], color:'#a3f75c'},
-                                                        {name:'Used', data:[100.0*cep4storage_resource.used_capacity/cep4storage_resource.total_capacity], color:'#f45b5b'}];
-                }
-
                 $scope.totalDiskUsageChartConfig = {
                     options: {
                         chart: {
@@ -437,7 +429,7 @@ cleanupControllerMod.controller('CleanupController', ['$scope', '$uibModal', '$m
                             pointFormat: '{point.name}: <b>{point.percentage:.1f}%</b>'
                         },
                     },
-                    series: $scope.totalDiskUsageChartSeries,
+                    series: [],
                     title: {
                         text: 'CEP4 total disk usage'
                     },
@@ -447,6 +439,27 @@ cleanupControllerMod.controller('CleanupController', ['$scope', '$uibModal', '$m
                     loading: false
                 }
 
+                var cep4storage_resource = dataService.resources.find(function(r) { return r.name == 'CEP4_storage:/data'; });
+                if(cep4storage_resource) {
+                    dataService.getProjectsDiskUsage().then(function(result) {
+                        if(result.found) {
+                            var projects_du = result.projectdir.disk_usage;
+                            var misc_du = cep4storage_resource.used_capacity - projects_du;
+                            var total_cap = cep4storage_resource.total_capacity;
+
+                            $scope.totalDiskUsageChartConfig.series = [{name:'Free ' + dataService.humanreadablesize(cep4storage_resource.available_capacity, 1) + 'B',
+                                                                        data:[100.0*cep4storage_resource.available_capacity/total_cap],
+                                                                        color:'#66ff66'},
+                                                                       {name:'Misc ' + dataService.humanreadablesize(misc_du, 1) + 'B',
+                                                                        data:[100.0*misc_du/total_cap],
+                                                                        color:'#aaaaff'},
+                                                                       {name:'Projects ' + dataService.humanreadablesize(projects_du, 1) + 'B',
+                                                                        data:[100.0*projects_du/total_cap],
+                                                                        color:'#ff6666'}];
+                        }
+                    });
+                }
+
                 var loadTaskDiskUsage = function(otdb_id, force) {
                     $scope.current_otdb_id = otdb_id;
                     $scope.current_project_name = undefined;
diff --git a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/datacontroller.js b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/datacontroller.js
index 672853acfe81bdb301ae382df6038ebd56218fb9..88fee8beb924f412664c11aff40efb4e27068ae6 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/datacontroller.js
+++ b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/datacontroller.js
@@ -25,6 +25,7 @@ angular.module('raeApp').factory("dataService", ['$http', '$q', function($http,
     self.resourcesWithClaims = [];
 
     self.filteredTasks = [];
+    self.filteredTasksDict = {};
 
     self.taskTimes = {};
     self.resourceClaimTimes = {};
@@ -60,15 +61,15 @@ angular.module('raeApp').factory("dataService", ['$http', '$q', function($http,
     //loadResourceClaims is enabled when any controller using resourceclaims is enabled
     self.loadResourceClaims = false;
 
-    self.humanreadablesize = function(num) {
+    self.humanreadablesize = function(num, num_digits=3) {
         var units = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'];
         for(unit of units) {
             if(Math.abs(num) < 1000.0) {
-                return num.toPrecision(4).toString() + unit;
+                return num.toFixed(num_digits).toString() + unit;
             }
             num /= 1000.0;
         }
-        return num.toPrecision(5).toString() + 'Y';
+        return num.toFixed(num_digits).toString() + 'Y';
     }
 
     self.isTaskIdSelected = function(task_id) {
@@ -258,6 +259,11 @@ angular.module('raeApp').factory("dataService", ['$http', '$q', function($http,
         for(var i = 0; i < Math.min(3, self.loadingChunksQueue.length); i++) {
             setTimeout(self.loadNextChunk, i*250);
         }
+
+        //load the usages as well, if needed.
+        if (self.loadResourceClaims) {
+            self.getUsagesForSelectedResource();
+        }
     };
 
     self.loadNextChunk = function() {
@@ -636,7 +642,10 @@ angular.module('raeApp').factory("dataService", ['$http', '$q', function($http,
     self.getResources = function() {
         var defer = $q.defer();
         $http.get('/rest/resources').success(function(result) {
-            self.resources = result.resources;
+            //at this moment, we have way too many resources to show in a gantt-tree.
+            //this make the webscheduler way too slow.
+            //so, only show the relevant resources, 116 and 117 which are CEP4 bandwith and storage.
+            self.resources = result.resources.filter(function(r) { return r.id==116 ||  r.id==117;});
             self.resourceDict = self.toIdBasedDict(self.resources);
 
             defer.resolve();
@@ -757,7 +766,10 @@ angular.module('raeApp').factory("dataService", ['$http', '$q', function($http,
     self.getResourceGroups = function() {
         var defer = $q.defer();
         $http.get('/rest/resourcegroups').success(function(result) {
-            self.resourceGroups = result.resourcegroups;
+            //at this moment, we have way too many resources to show in a gantt-tree.
+            //this make the webscheduler way too slow.
+            //so, only show the relevant resource groups, 1, which is the CEP4 group.
+            self.resourceGroups = result.resourcegroups.filter(function(r) { return r.id==1;});
             self.resourceGroupsDict = self.toIdBasedDict(self.resourceGroups);
 
             defer.resolve();
@@ -1329,7 +1341,10 @@ dataControllerMod.controller('DataController',
         });
     }, true);
 
-    $scope.$watch('dataService.filteredTaskChangeCntr', dataService.computeMinMaxTaskTimes);
+    $scope.$watch('dataService.filteredTaskChangeCntr', function() {
+        dataService.computeMinMaxTaskTimes();
+        dataService.filteredTasksDict = dataService.toIdBasedDict(dataService.filteredTasks);
+    });
 
     $scope.$watch('dataService.lofarTime', function() {
         if(dataService.autoFollowNow && (Math.round(dataService.lofarTime.getTime()/1000))%5==0) {
diff --git a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/ganttresourcecontroller.js b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/ganttresourcecontroller.js
index 6df74997511eed35e938da801069f054ce7281fe..37006404dcb1c7441c09ff1df3eb99d629afd3de 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/ganttresourcecontroller.js
+++ b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/ganttresourcecontroller.js
@@ -153,7 +153,7 @@ ganttResourceControllerMod.controller('GanttResourceController', ['$scope', 'dat
 
         var resourceGroupMemberships = $scope.dataService.resourceGroupMemberships;
 
-        var taskDict = $scope.dataService.taskDict;
+        var tasksDict = $scope.dataService.filteredTasksDict;
         var numTasks = $scope.dataService.filteredTasks.length;
 
         var resourceClaimDict = $scope.dataService.resourceClaimDict;
@@ -316,7 +316,7 @@ ganttResourceControllerMod.controller('GanttResourceController', ['$scope', 'dat
             //and finally assign each resourceclaim to its resource in each group
             for(var claim of resourceClaims) {
                 var resourceId = claim.resource_id;
-                var task = taskDict[claim.task_id];
+                var task = tasksDict[claim.task_id];
 
                 if(!task) {
                     continue;
@@ -368,24 +368,26 @@ ganttResourceControllerMod.controller('GanttResourceController', ['$scope', 'dat
                     if(claims) {
                         for(var claim of claims) {
                             var taskId = claim.task_id;
-                            var task = taskDict[taskId];
-                            if(taskId in aggregatedClaims) {
-                                if(claim.starttime < aggregatedClaims[taskId].starttime) {
-                                    aggregatedClaims[taskId].starttime = claim.starttime.getTime() > task.starttime.getTime() ? claim.starttime : task.starttime;
-                                }
-                                if(claim.endtime > aggregatedClaims[taskId].endtime) {
-                                    aggregatedClaims[taskId].endtime = claim.endtime.getTime() < task.endtime.getTime() ? claim.endtime: task.endtime;
-                                }
-                                if(claim.status == 'conflict') {
-                                    aggregatedClaims[taskId].status = 'conflict';
-                                } else if(claim.status != aggregatedClaims[taskId].status && aggregatedClaims[taskId].status != 'conflict') {
-                                    aggregatedClaims[taskId].status = 'mixed';
+                            var task = tasksDict[taskId];
+                            if(task) {
+                                if(taskId in aggregatedClaims) {
+                                    if(claim.starttime < aggregatedClaims[taskId].starttime) {
+                                        aggregatedClaims[taskId].starttime = claim.starttime.getTime() > task.starttime.getTime() ? claim.starttime : task.starttime;
+                                    }
+                                    if(claim.endtime > aggregatedClaims[taskId].endtime) {
+                                        aggregatedClaims[taskId].endtime = claim.endtime.getTime() < task.endtime.getTime() ? claim.endtime: task.endtime;
+                                    }
+                                    if(claim.status == 'conflict') {
+                                        aggregatedClaims[taskId].status = 'conflict';
+                                    } else if(claim.status != aggregatedClaims[taskId].status && aggregatedClaims[taskId].status != 'conflict') {
+                                        aggregatedClaims[taskId].status = 'mixed';
+                                    }
+                                } else {
+                                    aggregatedClaims[taskId] = { starttime: claim.starttime,
+                                                                endtime: claim.endtime,
+                                                                status: claim.status
+                                    };
                                 }
-                            } else {
-                                aggregatedClaims[taskId] = { starttime: claim.starttime,
-                                                             endtime: claim.endtime,
-                                                             status: claim.status
-                                };
                             }
                         }
                     }
@@ -424,7 +426,7 @@ ganttResourceControllerMod.controller('GanttResourceController', ['$scope', 'dat
                     for(var ganttRow of ganttRows) {
                         for(var taskId in aggregatedClaims) {
                             var aggClaimForTask = aggregatedClaims[taskId];
-                            var task = taskDict[taskId];
+                            var task = tasksDict[taskId];
                             if(task) {
                                 var claimTask = {
                                     id: 'aggregatedClaimForTask_' + taskId + '_' + ganttRow.id,
diff --git a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/gridcontroller.js b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/gridcontroller.js
index 9661a7db76a5dfd3fc8abeca9688ebd176722d38..e0c916cbdfd29ee745e1e7295950f20e40c567e5 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/gridcontroller.js
+++ b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/static/app/controllers/gridcontroller.js
@@ -872,30 +872,30 @@ gridControllerMod.directive('contextMenu', ['$document', '$window', function($do
                 });
             }
 
-            var schedulable_cep4_tasks = selected_cep4_tasks.filter(function(t) { return t.status == 'approved' || t.status == 'conflict' || t.status == 'error'; });
+            var schedulable_tasks = selected_tasks.filter(function(t) { return t.status == 'approved' || t.status == 'conflict' || t.status == 'error'; });
 
-            if(schedulable_cep4_tasks.length > 0) {
-                var liContent = '<li><a href="#">Schedule CEP4 task(s)</a></li>'
+            if(schedulable_tasks.length > 0) {
+                var liContent = '<li><a href="#">(Re)schedule CEP4 task(s)</a></li>'
                 var liElement = angular.element(liContent);
                 ulElement.append(liElement);
                 liElement.on('click', function() {
                     closeContextMenu();
-                    for(var pl of schedulable_cep4_tasks) {
+                    for(var pl of schedulable_tasks) {
                         var newTask = { id: pl.id, status: 'prescheduled' };
                         dataService.putTask(newTask);
                     }
                 });
             }
 
-            var unschedulable_selected_cep4_tasks = selected_cep4_tasks.filter(function(t) { return t.status == 'prescheduled' || t.status == 'scheduled' || t.status == 'queued' || t.status == 'error'; });
+            var unschedulable_selected_tasks = selected_tasks.filter(function(t) { return t.status == 'prescheduled' || t.status == 'scheduled' || t.status == 'queued' || t.status == 'error' || t.status == 'conflict'; });
 
-            if(unschedulable_selected_cep4_tasks.length > 0) {
-                var liContent = '<li><a href="#">Unschedule (pre)scheduled/queued/error tasks</a></li>'
+            if(unschedulable_selected_tasks.length > 0) {
+                var liContent = '<li><a href="#">Unschedule (pre)scheduled/queued/error/conflict tasks</a></li>'
                 var liElement = angular.element(liContent);
                 ulElement.append(liElement);
                 liElement.on('click', function() {
                     closeContextMenu();
-                    for(var pl of unschedulable_selected_cep4_tasks) {
+                    for(var pl of unschedulable_selected_tasks) {
                         if(pl.status == 'queued') {
                             var newTask = { id: pl.id, status: 'aborted' };
                             dataService.putTask(newTask);
diff --git a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/webservice.py b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/webservice.py
index 672bacc4bb82d61850775b2200e9677f423f8c3e..4dfb8c8341834face23c1b729560696a2ab47535 100755
--- a/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/webservice.py
+++ b/SAS/ResourceAssignment/ResourceAssignmentEditor/lib/webservice.py
@@ -215,6 +215,32 @@ def resource(resource_id):
         return jsonify(result[0])
     return jsonify({})
 
+@app.route('/rest/resources/<int:resource_id>/resourceclaims')
+@gzipped
+def resourceclaimsForResource(resource_id):
+    return resourceclaimsForResourceFromUntil(resource_id, None, None)
+
+@app.route('/rest/resources/<int:resource_id>/resourceclaims/<string:fromTimestamp>')
+@gzipped
+def resourceclaimsForResourceFrom(resource_id, fromTimestamp=None):
+    return resourceclaimsForResourceFromUntil(resource_id, fromTimestamp, None)
+
+@app.route('/rest/resources/<int:resource_id>/resourceclaims/<string:fromTimestamp>/<string:untilTimestamp>')
+@gzipped
+def resourceclaimsForResourceFromUntil(resource_id, fromTimestamp=None, untilTimestamp=None):
+    if fromTimestamp and isinstance(fromTimestamp, basestring):
+        fromTimestamp = asDatetime(fromTimestamp)
+
+    if untilTimestamp and isinstance(untilTimestamp, basestring):
+        untilTimestamp = asDatetime(untilTimestamp)
+
+    claims = radb().getResourceClaims(lower_bound=fromTimestamp,
+                                      upper_bound=untilTimestamp,
+                                      resource_ids=[resource_id],
+                                      extended=False,
+                                      include_properties=True)
+    return jsonify({'resourceclaims': claims})
+
 @app.route('/rest/resourcegroups')
 @gzipped
 def resourcegroups():
@@ -626,7 +652,7 @@ def getParsetByOTDBId(otdb_id):
 @app.route('/rest/tasks/<int:task_id>/resourceclaims')
 @gzipped
 def taskResourceClaims(task_id):
-    return jsonify({'taskResourceClaims': radb().getResourceClaims(task_id=task_id, include_properties=True)})
+    return jsonify({'taskResourceClaims': radb().getResourceClaims(task_ids=[task_id], include_properties=True)})
 
 @app.route('/rest/tasktypes')
 @gzipped
diff --git a/SAS/ResourceAssignment/ResourceAssignmentService/rpc.py b/SAS/ResourceAssignment/ResourceAssignmentService/rpc.py
index 7e53e6011166bb3a3f87d6f1f0442f440c4ad1af..a4ac1ff7b780a6660109f30c42090f80abade043 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentService/rpc.py
+++ b/SAS/ResourceAssignment/ResourceAssignmentService/rpc.py
@@ -304,17 +304,19 @@ class RARPC(RPCWrapper):
         return self.rpc('get_conflicting_overlapping_claims',
                         claim_id=claim_id)
 
-    def get_conflicting_overlapping_tasks(self, claim_ids):
+    def get_conflicting_overlapping_tasks(self, claim_id):
         '''returns a list of tasks which overlap with given claim(s) and which prevent the given claim(s) to be claimed (cause it to be in conflict)'''
         return self.rpc('get_conflicting_overlapping_tasks',
-                        claim_ids=claim_ids)
+                        claim_id=claim_id)
 
     def get_max_resource_usage_between(self, resource_id, lower_bound, upper_bound, claim_status='claimed'):
-        return self.rpc('get_max_resource_usage_between',
-                        resource_id=resource_id,
-                        lower_bound=lower_bound,
-                        upper_bound=upper_bound,
-                        claim_status=claim_status)
+        result = self.rpc('get_max_resource_usage_between',
+                          resource_id=resource_id,
+                          lower_bound=lower_bound,
+                          upper_bound=upper_bound,
+                          claim_status=claim_status)
+        result['as_of_timestamp'] = result['as_of_timestamp'].datetime()
+        return result
 
     def get_resource_claimable_capacity(self, resource_id, lower_bound, upper_bound):
         '''get the claimable capacity for the given resource within the timewindow given by lower_bound and upper_bound.
diff --git a/SAS/ResourceAssignment/ResourceAssignmentService/service.py b/SAS/ResourceAssignment/ResourceAssignmentService/service.py
index 94bef62b4137bb6fadee6506f6fe99a4dd5cff8a..3209e9bf39edaed028b76b31ffb7d17ba9a19e18 100644
--- a/SAS/ResourceAssignment/ResourceAssignmentService/service.py
+++ b/SAS/ResourceAssignment/ResourceAssignmentService/service.py
@@ -388,7 +388,7 @@ class RADBHandler(MessageHandlerInterface):
         return self.radb.get_conflicting_overlapping_claims(claim_id=kwargs.get('claim_id'))
 
     def _get_conflicting_overlapping_tasks(self, **kwargs):
-        return self.radb.get_conflicting_overlapping_tasks(claim_ids=kwargs.get('claim_ids'))
+        return self.radb.get_conflicting_overlapping_tasks(claim_id=kwargs.get('claim_id'))
 
     def _get_max_resource_usage_between(self, **kwargs):
         logger.info('get_max_resource_usage_between: %s' % kwargs)