diff --git a/SAS/TMSS/test/CMakeLists.txt b/SAS/TMSS/test/CMakeLists.txt
index 0df3c7d58b658d2358bc94157165a40a77e8ef9f..b19ddcd546e283f0e176ecaf57711bb3b8b8f03c 100644
--- a/SAS/TMSS/test/CMakeLists.txt
+++ b/SAS/TMSS/test/CMakeLists.txt
@@ -29,7 +29,7 @@ if(BUILD_TESTING)
     lofar_add_test(t_tmssapp_specification_permissions)
     lofar_add_test(t_tmss_session_auth)
     lofar_add_test(t_subtasks)
-    lofar_add_test(t_parset_adapter)
+    lofar_add_test(t_adapter)
     lofar_add_test(t_tasks)
     lofar_add_test(t_scheduling)
 
diff --git a/SAS/TMSS/test/t_parset_adapter.py b/SAS/TMSS/test/t_adapter.py
similarity index 71%
rename from SAS/TMSS/test/t_parset_adapter.py
rename to SAS/TMSS/test/t_adapter.py
index 5d1c905240233b742f8cfab22fe7488c9e09d750..43c8b010e4a3ef0f17003b3c0e0665a016e60b4e 100755
--- a/SAS/TMSS/test/t_parset_adapter.py
+++ b/SAS/TMSS/test/t_adapter.py
@@ -41,6 +41,8 @@ rest_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
 from lofar.sas.tmss.tmss.tmssapp import models
 from lofar.sas.tmss.tmss.tmssapp.adapters.parset import convert_to_parset
 from lofar.common.json_utils import get_default_json_object_for_schema
+from lofar.sas.tmss.tmss.tmssapp.adapters.sip import generate_sip_for_dataproduct
+
 
 class ParsetAdapterTest(unittest.TestCase):
     def test_01(self):
@@ -55,6 +57,23 @@ class ParsetAdapterTest(unittest.TestCase):
 
         parset = convert_to_parset(subtask)
 
+
+class SIPdapterTest(unittest.TestCase):
+    def test_simple_sip_generate_from_dataproduct(self):
+        subtask_template = models.SubtaskTemplate.objects.get(name='observationcontrol schema')
+        specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
+        for dp in specifications_doc['stations']['digital_pointings']:
+            dp['subbands'] = list(range(8))
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
+
+        sip = generate_sip_for_dataproduct(dataproduct)
+        # TODO: Although validate succeed at this step, would be interesting to check some xml values
+        # print(sip.get_prettyxml())
+
+
 if __name__ == "__main__":
     os.environ['TZ'] = 'UTC'
     unittest.main()
diff --git a/SAS/TMSS/test/t_parset_adapter.run b/SAS/TMSS/test/t_adapter.run
similarity index 54%
rename from SAS/TMSS/test/t_parset_adapter.run
rename to SAS/TMSS/test/t_adapter.run
index 0ec442af1b13271e84299c07fc7966bc7560ac82..f5d8bcd080af64a1006f5aff4846f4be6c40c101 100755
--- a/SAS/TMSS/test/t_parset_adapter.run
+++ b/SAS/TMSS/test/t_adapter.run
@@ -2,5 +2,5 @@
 
 # Run the unit test
 source python-coverage.sh
-python_coverage_test "*tmss*" t_parset_adapter.py
+python_coverage_test "*tmss*" t_adapter.py
 
diff --git a/SAS/TMSS/test/t_adapter.sh b/SAS/TMSS/test/t_adapter.sh
new file mode 100755
index 0000000000000000000000000000000000000000..6ac64453a78317185c89c01ab422cb52077227c5
--- /dev/null
+++ b/SAS/TMSS/test/t_adapter.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+./runctest.sh t_adapter
\ No newline at end of file
diff --git a/SAS/TMSS/test/t_parset_adapter.sh b/SAS/TMSS/test/t_parset_adapter.sh
deleted file mode 100755
index d75b13ea9fd228e65ff103625be8bda753a84b6a..0000000000000000000000000000000000000000
--- a/SAS/TMSS/test/t_parset_adapter.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-
-./runctest.sh t_parset_adapter
\ No newline at end of file
diff --git a/SAS/TMSS/test/tmss_test_data_django_models.py b/SAS/TMSS/test/tmss_test_data_django_models.py
index 21ee23b0d2e0330a9a7660f91a2e8e3c72b9f66a..f0de0f4042b50353cc90d762e1c13f33263aba4e 100644
--- a/SAS/TMSS/test/tmss_test_data_django_models.py
+++ b/SAS/TMSS/test/tmss_test_data_django_models.py
@@ -326,11 +326,12 @@ def Subtask_test_data(task_blueprint: models.TaskBlueprint=None, subtask_templat
     if specifications_doc is None:
         specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
 
+     # Type need to be a datetime object not a str so do not add .isoformat()
     if start_time is None:
-        start_time = datetime.utcnow().isoformat()
+        start_time = datetime.utcnow()
 
     if stop_time is None:
-        stop_time = datetime.utcnow().isoformat()
+        stop_time = datetime.utcnow()
 
     if cluster is None:
         cluster = models.Cluster.objects.create(name="dummy cluster", location="downstairs", tags=[])
@@ -345,7 +346,7 @@ def Subtask_test_data(task_blueprint: models.TaskBlueprint=None, subtask_templat
              "task_blueprint": task_blueprint,
              "specifications_template": subtask_template,
              "tags": ["TMSS", "TESTING"],
-             "do_cancel": datetime.utcnow().isoformat(),
+             "do_cancel": datetime.utcnow(),
              "priority": 1,
              "schedule_method": models.ScheduleMethod.objects.get(value='manual'),
              "cluster": cluster}