Skip to content
Snippets Groups Projects
Commit 37940f66 authored by Pepping's avatar Pepping
Browse files

-Added g_rd_interval

- removed g_wr_nof_chunks
- Changed the data generation
- UPdated the creation of reference data.
parent 5abb1ed5
No related branches found
No related tags found
No related merge requests found
......@@ -60,29 +60,31 @@ tc = test_case.Testcase('TB - ', '')
# Constants/Generics that are shared between VHDL and Python
# Name Value Default Description
# START_VHDL_GENERICS
g_wr_chunksize = 64
g_wr_chunksize = 16
g_wr_nof_chunks = 1
g_rd_chunksize = 16
g_rd_chunksize = 4
g_rd_nof_chunks = 4
g_rd_interval = 4
g_gapsize = 0
g_nof_blocks = 4
g_nof_blk_per_sync = 64
g_nof_blocks = 32
g_nof_blk_per_sync = 32
# END_VHDL_GENERICS
# Overwrite generics with argumented generics from autoscript or command line.
if tc.generics != None:
g_wr_chunksize = tc.generics['g_wr_chunksize']
g_wr_nof_chunks = tc.generics['g_wr_nof_chunks']
g_rd_chunksize = tc.generics['g_rd_chunksize']
g_rd_nof_chunks = tc.generics['g_rd_nof_chunks']
g_rd_interval = tc.generics['g_rd_interval']
g_gapsize = tc.generics['g_gapsize']
g_nof_blocks = tc.generics['g_nof_blocks']
g_nof_blk_per_sync = tc.generics['g_nof_blk_per_sync']
c_blocksize = (g_wr_chunksize + g_gapsize) * g_wr_nof_chunks
c_blocksize = (g_wr_chunksize + g_gapsize)
c_pagesize = c_blocksize * g_nof_blocks
c_rd_increment = g_rd_interval * c_blocksize
c_bg_nof_streams = 4
c_bg_ram_size = g_wr_chunksize * g_wr_nof_chunks * g_rd_chunksize
c_bg_ram_size = g_wr_chunksize * g_nof_blocks
c_in_dat_w = 8
c_db_nof_streams = c_bg_nof_streams
c_db_ram_size = c_bg_ram_size #g_rd_chunksize * g_rd_nof_chunks * g_nof_blocks
......@@ -117,20 +119,36 @@ ss = pi_ss_ss_wide.PiSsSsWide (tc, io, c_frame_size*g_rd_chunksize, c_nof_int_st
dsp_test_bg = dsp_test.DspTest(inDatW=c_in_dat_w)
# Function for generating stimuli and generating hex files.
def gen_bg_hex_files(c_nof_values = 1024, c_nof_streams = 4):
#def gen_bg_hex_files(c_nof_values = 1024, c_nof_streams = 4):
# data = []
# for i in range(c_nof_streams):
# stream_re = []
# stream_im = []
# for j in range(c_nof_values):
# stream_re.append(j)
# stream_im.append(i)
# data_concat = dsp_test_bg.concatenate_two_lists(stream_re, stream_im, c_in_dat_w)
# data.append(data_concat)
# filename = "../../src/hex/tb_bg_dat_" + str(i) + ".hex"
# mem_init_file.list_to_hex(list_in=data_concat, filename=filename, mem_width=c_nof_complex*c_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size)))
# return data
def gen_bg_hex_files(c_framesize = 64, c_nof_frames = 32, c_nof_streams = 4):
data = []
for i in range(c_nof_streams):
stream_re = []
stream_im = []
for j in range(c_nof_values):
stream_re.append(j)
stream_im.append(i)
for j in range(c_nof_frames):
for k in range(c_framesize):
stream_re.append(k)
stream_im.append(j)
data_concat = dsp_test_bg.concatenate_two_lists(stream_re, stream_im, c_in_dat_w)
data.append(data_concat)
filename = "../../src/hex/tb_bg_dat_" + str(i) + ".hex"
mem_init_file.list_to_hex(list_in=data_concat, filename=filename, mem_width=c_nof_complex*c_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size)))
return data
if __name__ == "__main__":
###############################################################################
#
......@@ -153,7 +171,7 @@ if __name__ == "__main__":
#
###############################################################################
# Prepare x stimuli for block generator
bg_data = gen_bg_hex_files(c_bg_ram_size, c_bg_nof_streams)
bg_data = gen_bg_hex_files(c_frame_size, g_nof_blocks, c_bg_nof_streams)
################################################################################
##
......@@ -191,14 +209,6 @@ if __name__ == "__main__":
do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=180000, s_timeout=13600) # 110000
bg.write_block_gen_settings(samplesPerPacket=c_frame_size, blocksPerSync=g_nof_blk_per_sync-1, gapSize=c_gap_size, memLowAddr=0, memHighAddr=c_bg_ram_size-1, BSNInit=10)
#do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=182000, s_timeout=13600) # 110000
#bg.write_enable()
#
#do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=120000, s_timeout=13600) # 110000
#bg.write_disable()
#do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=140000, s_timeout=13600) # 110000
#bg.write_enable()
###############################################################################
#
# Calculate reference data
......@@ -206,7 +216,11 @@ if __name__ == "__main__":
###############################################################################
# Subband Select pre-transpose
if c_ena_pre_transpose:
bg_data = ss.subband_select(bg_data, ss_list)
bg_data_ss =[]
for i in range(len(bg_data)/len(ss_list)):
bg_data_ss.append(ss.subband_select(bg_data[i*len(ss_list):(i+1)*len(ss_list)], ss_list))
bg_data = bg_data_ss
bg_data = flatten(bg_data)
ref_data_total = []
# Check how many data there is and how many pages will be used:
......@@ -215,25 +229,22 @@ if __name__ == "__main__":
# Write to memory
mem_page = [0] * c_pagesize
for i in range(g_nof_blocks):
for j in range(g_wr_nof_chunks):
for k in range(g_wr_chunksize):
mem_page[i*c_blocksize*g_wr_nof_chunks + j*c_blocksize + k] = bg_data_single_page[i*g_wr_chunksize*g_wr_nof_chunks + j*g_wr_chunksize + k]
for j in range(g_wr_chunksize):
mem_page[i*c_blocksize + j] = bg_data_single_page[i*g_wr_chunksize + j]
# Read from memory
ref_data = [0] * g_nof_blocks * g_rd_nof_chunks * g_rd_chunksize
chunk_cnt = 0
chunk_offset = 0
for i in range(g_nof_blocks):
for j in range(g_rd_nof_chunks):
if chunk_cnt == g_nof_blocks:
chunk_cnt = 0
chunk_offset = chunk_offset + 1
rd_block_offset = 0
rd_chunk_offset = 0
for i in range(g_nof_blocks*g_rd_nof_chunks):
rd_offset = rd_block_offset + rd_chunk_offset
for k in range(g_rd_chunksize):
#ref_data[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks)+ chunk_offset*g_rd_chunksize + k] = mem_page[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks+g_gapsize)+ chunk_offset*g_rd_chunksize + k]
ref_data[i*(g_rd_chunksize*g_rd_nof_chunks)+j*g_rd_chunksize + k] = mem_page[chunk_cnt*(g_rd_chunksize*g_rd_nof_chunks+g_gapsize)+ chunk_offset*g_rd_chunksize + k]
chunk_cnt = chunk_cnt + 1
ref_data[i*g_rd_chunksize + k] = mem_page[rd_offset + k]
rd_block_offset = rd_block_offset + c_rd_increment
if(rd_block_offset >= c_pagesize):
rd_chunk_offset = rd_chunk_offset + g_rd_chunksize
rd_block_offset = rd_block_offset - c_pagesize
ref_data_total.append(ref_data)
ref_data_total=flatten(ref_data_total)
# Split the data again in individual channels
......@@ -303,3 +314,5 @@ if __name__ == "__main__":
sys.exit(tc.get_result())
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment