diff --git a/devices/statistics_writer/README.md b/devices/statistics_writer/README.md
index ea722c6cf552443364b196264034d768690955be..62e940bacb6512eb702cc4fdd816b8ba61153958 100644
--- a/devices/statistics_writer/README.md
+++ b/devices/statistics_writer/README.md
@@ -12,10 +12,11 @@ and writing those matrices (as well as a bunch of metadata) to hdf5.
 The TCP statistics writer can be called with the `tcp_hdf5_writer.py` script.
 This script can be called with the following arguments:
   ```
-  --address     the address to connect to
+  --host        the address to connect to
   --port        the port to use
+  --file        file to read from (as opposed to host and port)
   --interval    The time between creating new files in hours
-  --location    specifies the folder to write all the files
+  --output_dir  specifies the folder to write all the files
   --mode        sets the statistics type to be decoded options: "SST", "XST", "BST"
   --debug       takes no arguments, when used prints a lot of extra data to help with debugging
   ```
diff --git a/devices/statistics_writer/hdf5_writer.py b/devices/statistics_writer/hdf5_writer.py
index 3a59566219ef4b1a32ce20e0baedf0e3fa8128d3..8d89258fe59795fc1f47d11127c93229a7fa1690 100644
--- a/devices/statistics_writer/hdf5_writer.py
+++ b/devices/statistics_writer/hdf5_writer.py
@@ -6,7 +6,6 @@ import pytz
 import h5py
 
 import numpy
-import json
 import logging
 
 # import statistics classes with workaround
@@ -23,10 +22,14 @@ __all__ = ["hdf5_writer"]
 
 class hdf5_writer:
 
+    SST_MODE = "SST"
+    XST_MODE = "XST"
+    BST_MODE = "BST"
+
 
     def __init__(self, new_file_time_interval, file_location, statistics_mode):
 
-        # all variables that deal with the SST matrix that's currently being decoded
+        # all variables that deal with the matrix that's currently being decoded
         self.current_matrix = None
         self.current_timestamp = datetime.min.replace(tzinfo=pytz.UTC)
 
@@ -36,13 +39,14 @@ class hdf5_writer:
 
         # file handing
         self.file_location = file_location
-        self.new_file_time_interval = timedelta(hours=new_file_time_interval)
+        self.new_file_time_interval = timedelta(seconds=new_file_time_interval)
         self.last_file_time = datetime.min.replace(tzinfo=pytz.UTC)
         self.file = None
 
-        # config the writer for the correct statistics type
-        self.collector = None
+        # parameters that are configured depending on the mode the statistics writer is in (SST,XST,BST)
         self.decoder = None
+        self.collector = None
+        self.store_function = None
         self.mode = statistics_mode.upper()
         self.config_mode()
 
@@ -117,8 +121,8 @@ class hdf5_writer:
         # create the new hdf5 group based on the timestamp of packets
         current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]))
 
-        # store the statistics values
-        current_group.create_dataset(name=f"{self.mode}_values", data=self.current_matrix.parameters["sst_values"])
+        # store the statistics values for the current group
+        self.store_function(current_group)
 
         # might be optional, but they're easy to add.
         current_group.create_dataset(name="nof_payload_errors", data=self.current_matrix.parameters["nof_payload_errors"])
@@ -138,6 +142,17 @@ class hdf5_writer:
             else:
                 current_group.attrs[k] = v
 
+    def write_sst_matrix(self, current_group):
+        # store the SST values
+        current_group.create_dataset(name="sst_values", data=self.current_matrix.parameters["sst_values"])
+
+    def write_xst_matrix(self, current_group):
+        # requires a function call to transform the xst_blocks in to the right structure
+        current_group.create_dataset(name="xst_values", data=self.current_matrix.xst_values())
+
+    def write_bst_matrix(self, current_group):
+        raise NotImplementedError("BST values not implemented")
+
 
     def process_packet(self, packet):
         logger.debug(f"Processing packet")
@@ -170,19 +185,26 @@ class hdf5_writer:
 
         """
         Configures the object for the correct statistics type to be used.
+        decoder:            the class to decode a single packet
+        collector:          the class to collect statistics packets
+        store_function:     the function to write the mode specific data to file
         """
 
-        if self.mode == 'SST':
+        if self.mode == self.SST_MODE:
             self.decoder = SSTPacket
             self.collector = statistics_collector.SSTCollector
-        elif self.mode == 'BST':
-            # self.decoder = XSTPacket
-            raise NotImplementedError("BST collector has not yet been implemented")
-        elif self.mode == 'XST':
-            # self.decoder = XSTPacket
+            self.store_function = self.write_sst_matrix
+
+        elif self.mode == self.XST_MODE:
+            self.decoder = XSTPacket
+            self.collector = statistics_collector.XSTCollector
+            self.store_function = self.write_xst_matrix
+
+        elif self.mode == self.BST_MODE:
+            self.store_function = self.write_bst_matrix
             raise NotImplementedError("BST collector has not yet been implemented")
+
         else:
-            # make sure the mode is valid
             raise ValueError("invalid statistics mode specified '{}', please use 'SST', 'XST' or 'BST' ".format(self.mode))
 
     def close_writer(self):
diff --git a/devices/statistics_writer/statistics_writer.py b/devices/statistics_writer/statistics_writer.py
index 1bf9618df5714c8af710168637ddce3d82146859..444ee2323e950a0428513cb4506d8b2b2376fc27 100644
--- a/devices/statistics_writer/statistics_writer.py
+++ b/devices/statistics_writer/statistics_writer.py
@@ -11,7 +11,7 @@ logger = logging.getLogger("statistics_writer")
 
 parser = argparse.ArgumentParser(description='Converts a stream of statistics packets into HDF5 files.')
 parser.add_argument('--host', type=str, help='the host to connect to')
-parser.add_argument('--port', type=int, default=5101, help='the port to connect to (default: %(default)s)')
+parser.add_argument('--port', type=int, default=0, help='the port to connect to, or 0 to use default port for the selected mode (default: %(default)s)')
 parser.add_argument('--file', type=str, help='the file to read from')
 
 parser.add_argument('--mode', type=str, choices=['SST', 'XST', 'BST'], default='SST', help='sets the statistics type to be decoded options (default: %(default)s)')
@@ -33,6 +33,10 @@ if __name__ == "__main__":
     mode = args.mode
     debug = args.debug
 
+    if port == 0:
+        default_ports = { "SST": 5101, "XST": 5102, "BST": 5103 }
+        port = default_ports[mode]
+
     if debug:
         logger.setLevel(logging.DEBUG)
         logger.debug("Setting loglevel to DEBUG")
diff --git a/devices/statistics_writer/test/hdf5_explorer.py b/devices/statistics_writer/test/hdf5_explorer.py
index 29cc88049086f5bea22c441d1ca12f91769c7135..102c36b79f7beeb6a34ffba9b95a495a85a76f6e 100644
--- a/devices/statistics_writer/test/hdf5_explorer.py
+++ b/devices/statistics_writer/test/hdf5_explorer.py
@@ -19,80 +19,43 @@ class statistics_data:
     the datasets in them.
     """
 
-
-    NOF_PAYLOAD_ERRORS = "nof_payload_errors"
-    NOF_VALID_PAYLOADS = "nof_valid_payloads"
-    FIRST_PACKET_HEADER = "first_packet_header"
-    STATISTICS_VALUES = "statistics_values"
-
-    def __init__(self, file, statistics_name):
-        self.nof_valid_payloads = file.get(f"{statistics_name}/{statistics_data.NOF_VALID_PAYLOADS}")
-        self.nof_payload_errors = file.get(f"{statistics_name}/{statistics_data.NOF_PAYLOAD_ERRORS}")
-        self.first_packet_header = file.get(f"{statistics_name}/{statistics_data.FIRST_PACKET_HEADER}")
-        self.statistics_values = file.get(f"{statistics_name}/{statistics_data.STATISTICS_VALUES}")
-
-
 class explorer:
     """
     This class serves both as a tool to test and verify the content of HDF5 files as well as provide an example
     of how you can go through HDF5 files.
-
-
-    The first 2 functions, print_high_level and print_full both call the hdf5 file.visititems function. this function
-    takes another function as argument and then calls that function for each and every group and dataset in the file.
-
-    The last 2 functions do this without this file.visititems function and instead have knowledge of how we structure the
-    statistics data.
     """
 
 
     def __init__(self, filename):
         self.file = h5py.File(filename, 'r')
 
-    def print_high_level(self):
-        """Calls a function that will go through all groups and datasets in the file and pass data along to another specified function"""
-        self.file.visititems(self._high_level_explorer)
-
-    def print_full(self):
-        """Calls a function that will go through all groups and datasets in the file and pass data along to another specified function"""
-        self.file.visititems(self._full_explorer)
-
-    def _full_explorer(self, name, obj):
-        """
-        Called by the file.visititems(func) function. Gets called for each and every group and dataset.
-        Prints all groups and datasets including their content.
-        """
-
-        shift = name.count('/') * '    '
-        data = self.file.get(name)
-        logger.debug(f"{shift}{name}: {data}")
-        logger.debug(numpy.array(data))
-
-    def _high_level_explorer(self, name, obj):
-        """
-        Called by the file.visititems(func) function. Gets called for each and every group and dataset.
-        Only lists the groups and datasets without the actual content.
-        """
-        shift = name.count('/') * '    '
-        data = self.file.get(name)
-        logger.debug(f"{shift}{name}: {data}")
-
     def print_all_statistics_full(self):
         """
         Explores the file with knowledge of the file structure. assumes all top level groups are statistics
         and that all statistics groups are made up of datasets.
         Prints the groups, the datasets and the content of the datasets.
-        """
 
-        # List all groups
-        logger.debug("Keys: %s" % self.file.keys())
+        Can easily be modified to instead of just logging all the data, store it in whatever structure is needed.
+        """
 
         for group_key in self.file.keys():
             dataset = list(self.file[group_key])
+
+            #print group name
+            logger.debug(f" \n\ngroup: {group_key}")
+
+            # Go through all the datasets
             for i in dataset:
                 data = self.file.get(f"{group_key}/{i}")
-                logger.debug(group_key)
-                logger.debug(numpy.array(data))
+                logger.debug(f" dataset: {i}")
+                logger.debug(f" Data: {numpy.array(data)}")
+
+            # go through all the attributes in the group (This is the header info)
+            attr_keys = self.file[group_key].attrs.keys()
+            for i in attr_keys:
+                attr = self.file[group_key].attrs[i]
+
+                logger.debug(f" {i}: {attr}")
 
     def print_all_statistics_top_level(self):
         """
@@ -108,7 +71,6 @@ class explorer:
             logger.debug(group_key)
 
 
-
 # create a data dumper that creates a new file every 10s (for testing)
 if __name__ == "__main__":
     args = parser.parse_args()
@@ -122,6 +84,7 @@ if __name__ == "__main__":
     """
     Print only the names of all the statistics in this file
     """
+    logger.debug("--------------Top level groups--------------")
     Explorer.print_all_statistics_top_level()