diff --git a/applications/arts/doc/python/arts_sc1.py b/applications/arts/doc/python/arts_sc1.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ddc34a4b9ee50174151aad4c72d3c5379d0b445
--- /dev/null
+++ b/applications/arts/doc/python/arts_sc1.py
@@ -0,0 +1,81 @@
+###############################################################################
+#
+# Copyright (C) 2016
+# ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
+# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+###############################################################################
+
+# Author
+# . Daniel van der Schuur
+# Purpose
+# . Use stream.py to model the ARTS SC1 data path
+# Description
+# . This stream model matches ASTRON-SP-062, chapter 5.
+
+###############################################################################
+# Import our StreamArray class
+###############################################################################
+from stream import *
+
+###############################################################################
+# Constants: definitions from SP-062, page 8
+###############################################################################
+# Streaming array update interval
+T_INT_X = 1.024 # Correlator intergration period
+# Parallel (physical) dimensions
+N_DISH = 12 # Number of dishes
+N_POL = 2   # Number of polarizations
+N_BAND = 16 # Number of bands
+# Serial (time) dimensions
+N_INT_X = 800000 # Number of time samples per corrrelator intergration period
+N_SLOT = 1024 #FIXME we need global indices here, don't we?
+W_BEAMLET = 6 # Complex beamlet data width
+
+nof_intervals = 0 # Unlimited runtime
+
+###############################################################################
+# Equation 1 
+###############################################################################
+parallel_dimensions= (N_DISH, N_POL, N_BAND)
+data_width = N_POL*W_BEAMLET
+
+# StreamArray definition
+parallel_definition = (('dish', N_DISH), ('polarization', N_POL), ('band', N_BAND))
+serial_definition = (('interval', nof_intervals, T_INT_X),('timesample', N_INT_X), ('slot', N_SLOT))
+
+CB444 = StreamArray(parallel_definition, serial_definition, data_width, block_size=1024)
+
+# Print dish 0, pol 0, band (front node) 0:
+for i in CB444[0][0][0]:
+    print i['dish']
+    break
+for i in CB444[0][0][1]:
+    print i['dish']
+    break
+
+###############################################################################
+# Equation 2: transpose the band and dish (physical) dimensions of CB444: flip dimensions 0 and 2
+###############################################################################
+CB444_T = CB444.transpose((2,1,0)) #NOTE transpose works. How to print this nicely?
+
+for i in CB444_T[0][0][0]:
+    print i['dish']
+    break
+for i in CB444_T[0][0][1]:
+    print i['dish']
+    break
+
diff --git a/applications/arts/doc/python/stream.py b/applications/arts/doc/python/stream.py
new file mode 100644
index 0000000000000000000000000000000000000000..6929ed5a8158517b2b116ef35ecb66fa0a23d006
--- /dev/null
+++ b/applications/arts/doc/python/stream.py
@@ -0,0 +1,114 @@
+import common as cm
+import numpy as np
+np.core.arrayprint._line_width = 200 # Let Numpy print more than 75 chars per line
+import traceback
+
+class Stream:
+    """
+    Single serial stream generator 
+    """
+    def __init__(self, parallel_definition, serial_definition, data_width, block_size):
+
+        # Parallel definition: physical stream tags and indices for this serial stream
+        self.parallel_definition = parallel_definition
+        self.parallel_tags       = [pair[0] for pair in parallel_definition]
+        self.parallel_indices    = [pair[1] for pair in parallel_definition] 
+
+        # Interval definition
+        self.interval_tag = serial_definition[0][0] # Tag (name) for the interval dimension
+        self.interval_nof = serial_definition[0][1] # Modeled number of intervals (0=unlimited)
+        self.interval_sec = serial_definition[0][2] # Functional Interval in seconds
+
+        # Dimensions of serial array
+        self.serial_tags       = [pair[0] for pair in serial_definition]
+        self.serial_dimensions = [pair[1] for pair in serial_definition[1:]] # Don't include the interval dimension as it is not finite
+        self.nof_serial_dimensions = len(self.serial_dimensions)
+
+        # data rate in Gpbs
+        self.nof_serial_data_per_interval = cm.multiply_list_elements(self.serial_dimensions)
+        self.data_rate = (self.nof_serial_data_per_interval*data_width)/self.interval_sec/1000/1000/1000
+
+        # Create an array of serial data out index counters, initialize all to maximum so __next__ starts at 0
+        self.serial_data_out = []
+        for dimension in self.serial_dimensions:
+            self.serial_data_out.append(dimension-1)
+        # Create an interval out counter. Initialize to -1 as there is no maximum
+        self.interval_out = -1
+        self.block_size = block_size
+        self.last_block_lo = 0
+        self.last_block_hi = block_size
+
+    def next(self):
+        block = []
+        for i in range(self.block_size):
+            # Start with the fastest changing dimension (index -1). When we have e.g. 2 dimensions, don't go beyond index -2.
+            for dimension_index in range(-1, -(self.nof_serial_dimensions+1), -1):
+                if self.serial_data_out[dimension_index]==self.serial_dimensions[dimension_index]-1:
+                    # Max of this dimension reached; reset to 0
+                    self.serial_data_out[dimension_index]=0
+                    # If this is the highest dimension, this is the last value of this interval.
+                    if dimension_index==-(self.nof_serial_dimensions):
+                        self.interval_out+=1
+                else:
+                    # Max not reached; increment index
+                    self.serial_data_out[dimension_index]+=1
+                    break
+            block.append(tuple(self.parallel_indices)+tuple([self.interval_out]+(self.serial_data_out)))
+
+        # Zip the tags with datatype 'int' (fixed for now) to pass to np.array. This makes array dimensions viewable
+        # by name, which is essential.
+        np_dtype = zip(self.parallel_tags+self.serial_tags, (len(self.parallel_tags)+self.nof_serial_dimensions+1)*['int']) # Do add the interval dimension here
+        np_block = np.array(block, np_dtype)
+        return np_block
+
+    def __iter__(self):
+        return self
+
+
+class StreamArray(np.ndarray):
+    """
+    NOTE: Use get() functions instead of fixed attributes as indexing a subset of StreamArray should yield e.g.
+          a different data rate than the full array.
+    NOTE: An ESSENTIAL property of subclassing numpy array is that method return values *DEPEND ON THE INDEXED SUBSTREAMS*!
+          . e.g. pass substream to Component without the difficulties of component_class.py!
+    """
+    def __new__(cls, parallel_definition, serial_definition, data_width, block_size):
+        # We need the parallel dimensions here, but we'll pass the parallel tags to the serial Stream instances.
+        parallel_tags       = [pair[0] for pair in parallel_definition] 
+        parallel_dimensions = [pair[1] for pair in parallel_definition] 
+
+        # Needed to subclass Numpy array
+        nof_parallel_streams = cm.multiply_list_elements(parallel_dimensions)
+
+        streams = []
+        for index in np.ndindex(tuple(parallel_dimensions)):
+            parallel_definition = zip(parallel_tags, index)
+            # Replace the dimension size in the parallel_definition with the actual stream index
+            streams.append(Stream(parallel_definition, serial_definition, data_width, block_size))
+        input_array = np.array(streams)
+
+        input_array.resize(parallel_dimensions)
+        obj = np.asarray(input_array).view(cls)
+        return obj
+
+    def __array_finalize__(self, obj):
+        if obj is None: return
+
+    def get_nof_parallel_streams(self):
+        """
+        Return the total number of parallel streams of this StreamArray instance (also a slice of it!)
+        """
+        parallel_dimensions = np.shape(self)
+        nof_parallel_streams = cm.multiply_list_elements(parallel_dimensions)
+        return nof_parallel_streams
+
+    def get_data_rate(self):
+        """
+        Return the data rate in Gbps
+        """
+        # Extract a Stream instance by flattening the nd array and indexing stream [0]
+        stream_0 = self.flatten()[0]
+        # Multiply the number of parallel streams by the data rate of a single stream
+        return self.get_nof_parallel_streams()*stream_0.data_rate
+        
+