From 39b51a3835093ddb391659b7e116f06e5f7d4c5d Mon Sep 17 00:00:00 2001 From: Pepping <pepping> Date: Fri, 16 Jan 2015 11:05:22 +0000 Subject: [PATCH] Initial --- .../reorder/tb/python/tc_reorder_transpose.py | 306 ++++++++++++++++++ 1 file changed, 306 insertions(+) create mode 100644 libraries/base/reorder/tb/python/tc_reorder_transpose.py diff --git a/libraries/base/reorder/tb/python/tc_reorder_transpose.py b/libraries/base/reorder/tb/python/tc_reorder_transpose.py new file mode 100644 index 0000000000..9ce74f593f --- /dev/null +++ b/libraries/base/reorder/tb/python/tc_reorder_transpose.py @@ -0,0 +1,306 @@ +#! /usr/bin/env python +############################################################################### +# +# Copyright (C) 2012 +# ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/> +# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +############################################################################### + +"""Test case for the ddr3_transpose entity. + + Description: + + + Usage: + + > python tc_ddr3_transpose.py --unb 0 --fn 0 --sim + +""" + +############################################################################### +# System imports +import test_case +import node_io +import unb_apertif as apr +import pi_diag_block_gen +import pi_diag_data_buffer +import pi_ss_ss_wide +import dsp_test + +import sys, os +import subprocess +import time +import pylab as pl +import numpy as np +import scipy as sp +import random +from tools import * +from common import * +import mem_init_file + +############################################################################### + +# Create a test case object +tc = test_case.Testcase('TB - ', '') + +# Constants/Generics that are shared between VHDL and Python +# Name Value Default Description +# START_VHDL_GENERICS +g_wr_chunksize = 64 +g_wr_nof_chunks = 1 +g_rd_chunksize = 16 +g_rd_nof_chunks = 4 +g_gapsize = 0 +g_nof_blocks = 4 +g_nof_blk_per_sync = 64 +# END_VHDL_GENERICS + +# Overwrite generics with argumented generics from autoscript or command line. +if tc.generics != None: + g_wr_chunksize = tc.generics['g_wr_chunksize'] + g_wr_nof_chunks = tc.generics['g_wr_nof_chunks'] + g_rd_chunksize = tc.generics['g_rd_chunksize'] + g_rd_nof_chunks = tc.generics['g_rd_nof_chunks'] + g_gapsize = tc.generics['g_gapsize'] + g_nof_blocks = tc.generics['g_nof_blocks'] + g_nof_blk_per_sync = tc.generics['g_nof_blk_per_sync'] + +c_blocksize = (g_wr_chunksize + g_gapsize) * g_wr_nof_chunks +c_pagesize = c_blocksize * g_nof_blocks +c_bg_nof_streams = 4 +c_bg_ram_size = g_wr_chunksize * g_wr_nof_chunks * g_rd_chunksize +c_in_dat_w = 8 +c_db_nof_streams = c_bg_nof_streams +c_db_ram_size = c_bg_ram_size #g_rd_chunksize * g_rd_nof_chunks * g_nof_blocks +c_frame_size = g_wr_chunksize +c_nof_int_streams = 1 +c_ena_pre_transpose = False +c_gap_size = 0 #g_rd_chunksize + +tc.append_log(3, '>>>') +tc.append_log(1, '>>> Title : Test bench for ddr3_transpose' ) +tc.append_log(3, '>>>') +tc.append_log(3, '') +tc.set_result('PASSED') + +# Create access object for nodes +io = node_io.NodeIO(tc.nodeImages, tc.base_ip) + +# Create block generator instance +bg = pi_diag_block_gen.PiDiagBlockGen(tc, io, c_bg_nof_streams, c_bg_ram_size) + +# Create databuffer instances +db_re = pi_diag_data_buffer.PiDiagDataBuffer(tc, io, instanceName = 'REAL', nofStreams=c_db_nof_streams, ramSizePerStream=c_db_ram_size) +db_im = pi_diag_data_buffer.PiDiagDataBuffer(tc, io, instanceName = 'IMAG', nofStreams=c_db_nof_streams, ramSizePerStream=c_db_ram_size) + +# Create subandselect instance for pre-transpose. +ss = pi_ss_ss_wide.PiSsSsWide (tc, io, c_frame_size*g_rd_chunksize, c_nof_int_streams) + +# Create dsp_test instance for helpful methods +dsp_test_bg = dsp_test.DspTest(inDatW=c_in_dat_w) + +# Function for generating stimuli and generating hex files. +def gen_bg_hex_files(c_nof_values = 1024, c_nof_streams = 4): + data = [] + for i in range(c_nof_streams): + stream_re = [] + stream_im = [] + for j in range(c_nof_values): + stream_re.append(j) + stream_im.append(i) + data_concat = dsp_test_bg.concatenate_two_lists(stream_re, stream_im, c_in_dat_w) + data.append(data_concat) + filename = "../../src/hex/tb_bg_dat_" + str(i) + ".hex" + mem_init_file.list_to_hex(list_in=data_concat, filename=filename, mem_width=c_nof_complex*c_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size))) + return data + +if __name__ == "__main__": + ############################################################################### + # + # Create setting for the pre-transpose (subbandselect) + # + ############################################################################### + ss_list = [] + for i in range(c_frame_size): + for j in range(g_rd_chunksize): + ss_list.append(i + j*c_frame_size) + + + for i in ss_list: + print i + ss.write_selects(ss_list) + + ############################################################################### + # + # Create stimuli for the BG + # + ############################################################################### + # Prepare x stimuli for block generator + bg_data = gen_bg_hex_files(c_bg_ram_size, c_bg_nof_streams) + + ################################################################################ + ## + ## Write data and settings to block generator + ## + ################################################################################ + # Write setting for the block generator: + bg.write_block_gen_settings(samplesPerPacket=c_frame_size, blocksPerSync=g_nof_blk_per_sync, gapSize=c_gap_size, memLowAddr=0, memHighAddr=c_bg_ram_size-1, BSNInit=10) + + # Write the stimuli to the block generator and enable the block generator + for i in range(c_bg_nof_streams): + bg.write_waveform_ram(data=bg_data[i], channelNr= i) + + # Concatenate all channels + t=2 + while len(bg_data) > 1: + concat_data = [] + for i in range(len(bg_data)/2): + concat_data.append(dsp_test_bg.concatenate_two_lists(bg_data[2*i], bg_data[2*i+1], c_in_dat_w*t)) + bg_data = concat_data + t=t*2 + + bg_data = flatten(bg_data) + +# for i in range(len(bg_data)): +# print ">%X<" % bg_data[i] + + # Wait until the DDR3 model is initialized. + do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=110000, s_timeout=13600) # 110000 + + # Enable the blockgenerator + bg.write_enable() + #do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=50000, s_timeout=13600) # 110000 + #bg.write_disable() + #do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=60000, s_timeout=13600) # 110000 + #bg.write_enable() + # + #do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=120000, s_timeout=13600) # 110000 + #bg.write_disable() + #do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=140000, s_timeout=13600) # 110000 + #bg.write_enable() + + ############################################################################### + # + # Calculate reference data + # + ############################################################################### + # Subband Select pre-transpose + print "len(ss_list)" + print len(ss_list) + if c_ena_pre_transpose: + bg_data = ss.subband_select(bg_data, ss_list) + + ref_data_total = [] + # Check how many data there is and how many pages will be used: + for t in range(len(bg_data)/c_pagesize): + bg_data_single_page = bg_data[t*c_pagesize:(t+1)*c_pagesize] + # Write to memory + mem_page = [0] * c_pagesize + print "len(mem_page)" + print len(mem_page) + print "len(bg_data_single_page)" + print len(bg_data_single_page) + + for i in range(g_nof_blocks): + for j in range(g_wr_nof_chunks): + for k in range(g_wr_chunksize): + mem_page[i*c_blocksize*g_wr_nof_chunks + j*c_blocksize + k] = bg_data_single_page[i*g_wr_chunksize*g_wr_nof_chunks + j*g_wr_chunksize + k] + + # Read from memory + ref_data = [0] * g_nof_blocks * g_rd_nof_chunks * g_rd_chunksize + chunk_cnt = 0 + chunk_offset = 0 + for i in range(g_nof_blocks): + for j in range(g_rd_nof_chunks): + if chunk_cnt == g_nof_blocks: + chunk_cnt = 0 + chunk_offset = chunk_offset + 1 + for k in range(g_rd_chunksize): + #ref_data[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks)+ chunk_offset*g_rd_chunksize + k] = mem_page[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks+g_gapsize)+ chunk_offset*g_rd_chunksize + k] + ref_data[i*(g_rd_chunksize*g_rd_nof_chunks)+j*g_rd_chunksize + k] = mem_page[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks+g_gapsize)+ chunk_offset*g_rd_chunksize + k] + chunk_cnt = chunk_cnt + 1 + ref_data_total.append(ref_data) + + ref_data_total=flatten(ref_data_total) + + # Split the data again in individual channels + ref_data_split = [] + ref_data_split.append(ref_data_total) + t = c_bg_nof_streams + while len(ref_data_split) < c_bg_nof_streams: + ref_data_temp = [] + for i in range(len(ref_data_split)): + [data_a, data_b] = dsp_test_bg.split_in_two_lists(ref_data_split[i], c_in_dat_w*t) + ref_data_temp.append(data_a) + ref_data_temp.append(data_b) + ref_data_split = ref_data_temp + t = t/2 + + # Split the data in real and imaginary + ref_data_re = [] + ref_data_im = [] + + for i in range(c_bg_nof_streams): + [data_re, data_im] = dsp_test_bg.split_in_two_lists(ref_data_split[i], c_in_dat_w) + ref_data_re.append(data_re) + ref_data_im.append(data_im) + +# print "real + imag" +# for i in range(len(ref_data_re)): +# for j in range(len(ref_data_re[i])): +# print "concat: >%X< real: >%X< imag: >%X< " % (ref_data_split[i][j], ref_data_re[i][j], ref_data_im[i][j]) +# print +# + # Poll the databuffer to check if the response is there. + # Retry after 3 seconds so we don't issue too many MM reads in case of simulation. + do_until_ge(db_re.read_nof_words, ms_retry=3000, val=c_db_ram_size, s_timeout=3600) + + ############################################################################### + # + # Read transposed data from data buffer + # + ############################################################################### + db_out_re = [] + db_out_im = [] + for i in range(c_bg_nof_streams): + db_out_re.append(flatten(db_re.read_data_buffer(streamNr=i, n=c_db_ram_size, radix='uns', width=c_in_dat_w, nofColumns=8))) + db_out_im.append(flatten(db_im.read_data_buffer(streamNr=i, n=c_db_ram_size, radix='uns', width=c_in_dat_w, nofColumns=8))) + + ############################################################################### + # + # Verify output data + # + ############################################################################### + for i in range(c_bg_nof_streams): + for j in range(c_db_ram_size): + if db_out_re[i][j] != ref_data_re[i][j]: + tc.append_log(2, 'Error in real output data. Expected data: %d Data read: %d Iteration nr: %d %d' % (ref_data_re[i][j], db_out_re[i][j], i, j)) + tc.set_result('FAILED') + if db_out_im[i][j] != ref_data_im[i][j]: + tc.append_log(2, 'Error in imag output data. Expected data: %d Data read: %d Iteration nr: %d %d' % (ref_data_im[i][j], db_out_im[i][j], i, j)) + tc.set_result('FAILED') + + ############################################################################### + # End + tc.set_section_id('') + tc.append_log(3, '') + tc.append_log(3, '>>>') + tc.append_log(0, '>>> Test bench result: %s' % tc.get_result()) + tc.append_log(3, '>>>') + + sys.exit(tc.get_result()) + -- GitLab