diff --git a/libraries/io/ddr/src/vhdl/io_ddr_driver.vhd b/libraries/io/ddr/src/vhdl/io_ddr_driver.vhd
index 32ad80ecf3458b0b0e92b43baa3d19edb425eba4..d6942f5eac12b0ce9a7d7289c9c5d1e69bb081b0 100644
--- a/libraries/io/ddr/src/vhdl/io_ddr_driver.vhd
+++ b/libraries/io/ddr/src/vhdl/io_ddr_driver.vhd
@@ -35,6 +35,25 @@
 --   g_tech_ddr.maxburstsize and eg. 64 ctlr data words. The maximum burst size
 --   of this driver is as large as the entire ctlr address span. The burst size
 --   of driver depends on the block size of the application.
+-- . The DDR IP has a command queue dept of eg 4 for DDR3 and 8 for DDR4.
+--   - In the simulation for DDR3 it seems that the queue is only 1 deep for
+--     both wr and rd. The p_state state machine does not support multiple
+--     write bursts, because it uses burst_wr_cnt. For read the p_state
+--     statemachine could generate multiple rd bursts, but then the DDR3 IP
+--     the waitrequest_n remains low until the end of each rd burst.
+--   - For DDR4 IP the waitrequest_n remains active during most of the write
+--     access so apparently it could accept mutliple wr bursts, but these are
+--     not issued due to burst_wr_cnt mechanism in wr_burst state.
+--     For DDR4 IP the waitrequest_n does allow 3 rd bursts, so then care must
+--     be taken that the external read FIFO can indeed handle these bursts, as
+--     set by the almost full margin that drives rd_src_in.ready. Alternatively
+--     it could become necessary to also add a rd_burst state that with
+--     burst_rd_cnt, to ensure that a new rd burst is only issued when the
+--     previous has finished so that the read FIFO can not run full. In that
+--     case the burst_wr_cnt and burst_rd_cnt could be combined in a single
+--     counter, because they are not used at the same time. The read FIFO can
+--     run full if the rd data width is smaller than the ctlr data width and/or
+--     if the rd side can notread on every rd_clk cycle.
 
 
 LIBRARY IEEE, tech_ddr_lib, common_lib, dp_lib;
@@ -146,13 +165,12 @@ BEGIN
                     burst_size, burst_wr_cnt, cur_address, address_cnt, address_cnt_is_0)
   BEGIN  
     nxt_state              <= state;
-    
-    ctlr_mosi.address      <= RESIZE_MEM_CTLR_ADDRESS(cur_address);
+    ctlr_mosi.address      <= RESIZE_MEM_CTLR_ADDRESS(cur_address);  -- no need to hold during burst, because the Avalon constantBurstBehaviour=FALSE (default) of the DDR IP slave
     ctlr_mosi.wrdata       <= RESIZE_MEM_CTLR_DATA(wr_snk_in.data);
     ctlr_mosi.wr           <= '0';
     ctlr_mosi.rd           <= '0';
-    ctlr_mosi.burstbegin   <= '0'; 
-    ctlr_mosi.burstsize    <= (OTHERS => '0');
+    ctlr_mosi.burstbegin   <= '0';                                   -- only used for legacy DDR controllers, because the controller can derive it internally by counting wr and rd accesses
+    ctlr_mosi.burstsize    <= (OTHERS => '0');                       -- no need to hold during burst, because the Avalon constantBurstBehaviour=FALSE (default) of the DDR IP slave
     
     wr_snk_out.ready       <= '0';
     nxt_dvr_done           <= '0';
@@ -169,7 +187,7 @@ BEGIN
             ctlr_mosi.wr     <= '1';
             nxt_burst_wr_cnt <= burst_wr_cnt-1;
             IF burst_wr_cnt = 1 THEN            -- check for the last cycle of this burst sequence
-              nxt_state <= s_wr_request;        -- initiate a new wr burst or goto idle via the wr_request state, simulation shows this does not cost a cycle
+              nxt_state <= s_wr_request;        -- initiate a new wr burst or goto idle via the wr_request state, simulation shows going directly idle by checking address_cnt_is_0 here does not save a cycle
             END IF;
           END IF;
         END IF;