diff --git a/applications/arts/doc/python/arts_sc1.py b/applications/arts/doc/python/arts_sc1.py
index 86df064dbb55ba1e43091dff5381141518b6185b..4d1172f74b5dabffa610c08c776369dc64786ab7 100644
--- a/applications/arts/doc/python/arts_sc1.py
+++ b/applications/arts/doc/python/arts_sc1.py
@@ -59,16 +59,54 @@ serial_definition = (('interval', nof_intervals, T_INT_X),('timesample', N_INT_X
 
 CB444 = StreamArray(parallel_definition, serial_definition, data_width, block_size=1024, nof_blocks=1)
 
+print CB444.shape
+
 # Print dish 0, pol 0, band (front node) 0:
-for i in CB444[0][0][0]:
-    print i['slot']
+#for dish in CB444:
+#    for pol in dish:
+#        for band in pol:
+#            for i in band:
+#                print i[['dish','band']]
 
 
 ###############################################################################
-# Equation 2: transpose the band and dish (physical) dimensions of CB444: flip dimensions 0 and 2
+# Equation 2: transpose the band and dish (physical) dimensions of CB444
+#             . flip dimensions 0 and 2
 ###############################################################################
 CB444_T = CB444.transpose((2,1,0))
 
-for i in CB444_T[0][0][0]:
-    print i['dish']
+print CB444_T.shape
+
+#for band in CB444_T:
+#    for pol in band:
+#        for dish in pol:
+#            for i in dish:
+#                print i[['dish','band']
+
+###############################################################################
+# Equation 7: resize dimensions polarizations*dishes = 2*12 to 
+#             processing_nodes
+###############################################################################
+N_PROCESSING_NODES=8
+N_10G_RX=3
 
+CB444_TR = CB444_T.reshape((N_BAND, N_PROCESSING_NODES, N_10G_RX))
+
+print CB444_TR.shape
+
+###############################################################################
+# Equation 9: Select 240/256 beamlets
+# . FIXME In the doc, here we have 4 substreams/10G stream all of the sudden.
+#         That implies a serial->parallel transpose 1x1024->4x256 which 
+#         is not the case. These 4 substreams are already present before the
+#         transpose, so should have been in CB444 already.
+# . FIXME If we use a Component here (which we should as this operation affects
+#         serial data), we should provide it with properly defined data. Since
+#         we don't have that at this point, we need two Components:
+#         1) transpose 1024 to 4x256
+#         2) Forward 240/256 beamlets.
+#         . This introces a non-existent component (1) and places an
+#           existent component (2) at the wrong place in the stream.
+#         Therefore, we will not continue here but redo this in arts_sc1_v1.py.
+###############################################################################
+#NOTE Redone in arts_sc1_v1.py due to above issues.
diff --git a/applications/arts/doc/python/stream.py b/applications/arts/doc/python/stream.py
index e60759c49561cf43f1f23a3e63555ca8bb4c850e..e7979ec81b66192b046072c85131689d3fd372b1 100644
--- a/applications/arts/doc/python/stream.py
+++ b/applications/arts/doc/python/stream.py
@@ -113,7 +113,7 @@ class StreamArray(np.ndarray):
     User can limit the generated output using nof_intervals (highest dimension = still large amount of output) 
     or nof_blocks (lowest dimension = small amount of output). 0=unlimited.
     """
-    def __new__(cls, parallel_definition, serial_definition, data_width, block_size, nof_blocks):
+    def __new__(cls, parallel_definition, serial_definition, data_width, block_size, nof_blocks, streams=None):
         # We need the parallel dimensions here, but we'll pass the parallel tags to the serial Stream instances.
         parallel_tags       = [pair[0] for pair in parallel_definition] 
         parallel_dimensions = [pair[1] for pair in parallel_definition] 
@@ -121,15 +121,17 @@ class StreamArray(np.ndarray):
         # Needed to subclass Numpy array
         nof_parallel_streams = cm.multiply_list_elements(parallel_dimensions)
 
-        streams = []
-        for index in np.ndindex(tuple(parallel_dimensions)):
-            parallel_definition = zip(parallel_tags, index)
-              
-            # Convert array indices / coordinates to flat global index
-            stream_index = np.ravel_multi_index(index, parallel_dimensions)
-
-            # Replace the dimension size in the parallel_definition with the actual stream index
-            streams.append(Stream(stream_index, parallel_definition, serial_definition, data_width, block_size, nof_blocks))
+        if streams==None:
+            streams = []
+            for index in np.ndindex(tuple(parallel_dimensions)):
+                parallel_definition = zip(parallel_tags, index)
+                  
+                # Convert array indices / coordinates to flat global index
+                stream_index = np.ravel_multi_index(index, parallel_dimensions)
+    
+                # Replace the dimension size in the parallel_definition with the actual stream index
+                streams.append(Stream(stream_index, parallel_definition, serial_definition, data_width, block_size, nof_blocks))
+
         input_array = np.array(streams)
 
         input_array.resize(parallel_dimensions)
@@ -157,3 +159,64 @@ class StreamArray(np.ndarray):
         return self.get_nof_parallel_streams()*stream_0.data_rate
         
 
+
+
+
+
+
+
+
+
+
+
+#def dp_split(source_array, split_size):
+#    # Single stream function
+#    def func(source_):
+#        for i in 
+#
+#    # The dimensions etc of this StreamArray could also be very different. But dp_split outputs almost the same but throws away data.
+#    serial_definition= (('interval', nof_intervals, T_INT_X),('timesample', N_INT_X), ('slot', split_size))
+#    return StreamArray(parallel_definition, serial_definition, data_width, block_size=split_size, nof_blocks=1, source_stream=source_stream, source_function=func)
+
+
+class dp_split_stream:
+    """
+    Single dp_split
+    """
+    def __init__(self, output_size, inputs):
+        self.output_size=output_size
+        self.data_rate=3686.4*240/256/1536
+        self.inputs = inputs
+        self.nof_blocks = 1
+        self.block_count = 0
+
+    def next(self):
+        for block in self.inputs:
+            if self.block_count<=self.nof_blocks-1:   
+                self.block_count+=1
+                return block[0:self.output_size]
+            else:
+                raise StopIteration
+
+    def __iter__(self):
+        return self
+
+    def get_data_rate(self):
+        return self.data_rate
+
+
+class dp_split(StreamArray):
+    def __new__(self, input_array, output_len):
+
+        parallel_definition = (('dish', 12), ('polarization', 2), ('band', 16), ('BU', 4))
+        serial_definition = (('interval', 0, 1.024),('timesample', 800000), ('slot', output_len))
+
+        streams = []
+        for index in np.ndindex(input_array.shape):
+            streams.append(dp_split_stream(output_len, input_array[index]))
+
+        return StreamArray.__new__(self, parallel_definition, serial_definition, 6, output_len, 1, streams)
+
+
+
+