Skip to content
Snippets Groups Projects
Commit 5bf39435 authored by Pepping's avatar Pepping
Browse files

UPated the code. Added checkers for writing to file and hardware.

parent aa0b0051
No related branches found
No related tags found
No related merge requests found
...@@ -41,6 +41,7 @@ import pi_diag_data_buffer ...@@ -41,6 +41,7 @@ import pi_diag_data_buffer
import pi_ss_ss_wide import pi_ss_ss_wide
import pi_bf_bf import pi_bf_bf
import pi_io_ddr import pi_io_ddr
import pi_bsn_monitor
import dsp_test import dsp_test
import sys, os import sys, os
...@@ -70,7 +71,7 @@ g_nof_bf_units = 4 # 4 ...@@ -70,7 +71,7 @@ g_nof_bf_units = 4 # 4
g_in_dat_w = 16 # 16 g_in_dat_w = 16 # 16
g_in_weight_w = 16 # 16 g_in_weight_w = 16 # 16
g_blocks_per_sync = 32 # 781250 g_blocks_per_sync = 781250 #32 # 781250
g_wr_chunksize = 240 g_wr_chunksize = 240
g_rd_chunksize = 16 g_rd_chunksize = 16
g_rd_nof_chunks = 15 g_rd_nof_chunks = 15
...@@ -109,7 +110,14 @@ c_bf_in_dat_w = 16 ...@@ -109,7 +110,14 @@ c_bf_in_dat_w = 16
c_tp_in_dat_w = 8 c_tp_in_dat_w = 8
tc.append_log(3, '>>>') c_write_bg_data = False
c_write_bg_data_to_file = False
c_write_weights = False
c_write_weights_to_file = False
c_write_bf_ss_wide = False
c_write_bf_ss_wide_to_file = False
tc.append_log(3, '>>>')
tc.append_log(1, '>>> Title : Test script for apertif_unb1_fn_beamformer_tp_bg' ) tc.append_log(1, '>>> Title : Test script for apertif_unb1_fn_beamformer_tp_bg' )
tc.append_log(3, '>>>') tc.append_log(3, '>>>')
tc.append_log(3, '') tc.append_log(3, '')
...@@ -127,6 +135,8 @@ for i in range(tc.nofFnNodes): ...@@ -127,6 +135,8 @@ for i in range(tc.nofFnNodes):
for j in xrange(g_nof_bf_units): for j in xrange(g_nof_bf_units):
bf.append(pi_bf_bf.PiBfBf(tc, io, g_nof_weights, g_nof_signal_paths, g_nof_input_streams, xstEnable=True, instanceNr=j, nodeNr=tc.nodeFnNrs[i])) bf.append(pi_bf_bf.PiBfBf(tc, io, g_nof_weights, g_nof_signal_paths, g_nof_input_streams, xstEnable=True, instanceNr=j, nodeNr=tc.nodeFnNrs[i]))
# BSN monitor
bsn_out = pi_bsn_monitor.PiBsnMonitor(tc, io, instanceName='OUTPUT', nofStreams=2)
# Create subandselect instance for pre-transpose. # Create subandselect instance for pre-transpose.
ss = pi_ss_ss_wide.PiSsSsWide (tc, io, g_wr_chunksize*g_rd_chunksize, c_nof_int_streams) ss = pi_ss_ss_wide.PiSsSsWide (tc, io, g_wr_chunksize*g_rd_chunksize, c_nof_int_streams)
...@@ -137,23 +147,22 @@ ddr = pi_io_ddr.PiIoDdr(tc, io, nof_inst = 1) ...@@ -137,23 +147,22 @@ ddr = pi_io_ddr.PiIoDdr(tc, io, nof_inst = 1)
# Create dsp_test instance for helpful methods # Create dsp_test instance for helpful methods
dsp_test_bg = dsp_test.DspTest(inDatW=c_bf_in_dat_w) dsp_test_bg = dsp_test.DspTest(inDatW=c_bf_in_dat_w)
# Function for generating stimuli and generating hex files.
def gen_bg_hex_files(c_framesize = 64, c_nof_frames = 32, c_nof_streams = 4):
data = []
for i in range(c_nof_streams):
stream_re = []
stream_im = []
for j in range(c_nof_frames):
for k in range(c_framesize):
stream_re.append(k)
stream_im.append(j)
data_concat = dsp_test_bg.concatenate_two_lists(stream_re, stream_im, c_tp_in_dat_w)
data.append(data_concat)
filename = "../../src/hex/tb_bg_dat_" + str(i) + ".hex"
mem_init_file.list_to_hex(list_in=data_concat, filename=filename, mem_width=c_nof_complex*c_tp_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size)))
return data
if __name__ == "__main__": if __name__ == "__main__":
# Wait until the DDR3 model is initialized.
if tc.sim == True:
do_until_eq(ddr.read_init_done, ms_retry=1000, val=1, s_timeout=13600)
ddr.read_usedw_wr_fifo()
ddr.read_usedw_rd_fifo()
ddr.read_wait_request_n()
ddr.read_cal_success()
ddr.read_cal_fail()
bsn_out.read_bsn_monitor(0)
bsn_out.read_bsn_monitor(1)
################################################################################ ################################################################################
## ##
## Initialize the blockgenerators ## Initialize the blockgenerators
...@@ -169,15 +178,17 @@ if __name__ == "__main__": ...@@ -169,15 +178,17 @@ if __name__ == "__main__":
tc.append_log(3, '>>>') tc.append_log(3, '>>>')
tc.append_log(3, '>>> Write data to the waveform RAM of all channels') tc.append_log(3, '>>> Write data to the waveform RAM of all channels')
tc.append_log(3, '>>>') tc.append_log(3, '>>>')
inputData = [] inputData = []
for i in xrange(g_nof_input_streams): for i in xrange(g_nof_input_streams):
dataList = bg.generate_data_list(c_nof_sp_per_input_stream, g_nof_subbands, 2048*i*4, i, c_bf_in_dat_w) dataList = bg.generate_data_list(c_nof_sp_per_input_stream, g_nof_subbands, 2048*i*4, i, c_bf_in_dat_w)
# bg.write_waveform_ram(dataList, i) if c_write_bg_data == True:
filename = "../../src/hex/bg_in_data_" + str(i) + ".hex" bg.write_waveform_ram(dataList, i)
mem_init_file.list_to_hex(list_in=dataList, filename=filename, mem_width=c_nof_complex*c_bf_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size))) if c_write_bg_data_to_file == True:
filename = "../../src/hex/bg_in_data_" + str(i) + ".hex"
mem_init_file.list_to_hex(list_in=dataList, filename=filename, mem_width=c_nof_complex*c_bf_in_dat_w, mem_depth=2**(ceil_log2(c_bg_ram_size)))
dataListComplex = bg.convert_concatenated_to_complex(dataList, c_bf_in_dat_w) dataListComplex = bg.convert_concatenated_to_complex(dataList, c_bf_in_dat_w)
inputData.append(dataListComplex) inputData.append(dataListComplex)
################################################################################ ################################################################################
## ##
## Create and Write the weight factors ## Create and Write the weight factors
...@@ -193,13 +204,12 @@ if __name__ == "__main__": ...@@ -193,13 +204,12 @@ if __name__ == "__main__":
for i in range(g_nof_bf_units): for i in range(g_nof_bf_units):
weightsBfUnit=[] weightsBfUnit=[]
for j in range(g_nof_signal_paths): for j in range(g_nof_signal_paths):
weightsSignalPath = bf[k*g_nof_bf_units+i].generate_weights(g_nof_weights, i+j, i, g_in_weight_w) weightsSignalPath = bf[k*g_nof_bf_units+i].generate_weights(g_nof_weights, i+j, i, g_in_weight_w)
if c_write_weights_to_file == True:
filename = "../../src/hex/bf_weights_" + str(i) + "_" + str(j) + ".hex" filename = "../../src/hex/bf_weights_" + str(i) + "_" + str(j) + ".hex"
mem_init_file.list_to_hex(list_in=weightsSignalPath, filename=filename, mem_width=c_nof_complex*g_in_weight_w, mem_depth=g_nof_weights) mem_init_file.list_to_hex(list_in=weightsSignalPath, filename=filename, mem_width=c_nof_complex*g_in_weight_w, mem_depth=g_nof_weights)
if c_write_weights == True:
# bf[k*g_nof_bf_units+i].write_weights(weightsSignalPath, j) bf[k*g_nof_bf_units+i].write_weights(weightsSignalPath, j)
weightsSignalPathComplex = bg.convert_concatenated_to_complex(weightsSignalPath, g_in_weight_w) weightsSignalPathComplex = bg.convert_concatenated_to_complex(weightsSignalPath, g_in_weight_w)
weightsBfUnit.append(weightsSignalPathComplex) weightsBfUnit.append(weightsSignalPathComplex)
weightsBf.append(weightsBfUnit) weightsBf.append(weightsBfUnit)
...@@ -210,6 +220,10 @@ if __name__ == "__main__": ...@@ -210,6 +220,10 @@ if __name__ == "__main__":
## Create and Write the selection buffers ## Create and Write the selection buffers
## ##
################################################################################ ################################################################################
tc.append_log(3, '>>>')
tc.append_log(3, '>>> Create and write selection settings for ss_wides in all bf_units ')
tc.append_log(3, '>>>')
select_buf = [] select_buf = []
for b in xrange(g_nof_bf_units): for b in xrange(g_nof_bf_units):
for i in range(c_nof_sp_per_input_stream): for i in range(c_nof_sp_per_input_stream):
...@@ -217,31 +231,29 @@ if __name__ == "__main__": ...@@ -217,31 +231,29 @@ if __name__ == "__main__":
for j in range(4): for j in range(4):
for k in range(g_nof_weights/4): for k in range(g_nof_weights/4):
select_buf_line.append(i*g_nof_subbands + j) select_buf_line.append(i*g_nof_subbands + j)
select_buf.append(select_buf_line) select_buf.append(select_buf_line)
filename = "../../src/hex/bf_ss_wide_" + str(b) + "_" + str(i) + ".hex" if c_write_bf_ss_wide_to_file == True:
mem_init_file.list_to_hex(list_in=flatten(select_buf_line), filename=filename, mem_width=ceil_log2(c_nof_subbands_per_stream), mem_depth=g_nof_weights) filename = "../../src/hex/bf_ss_wide_" + str(b) + "_" + str(i) + ".hex"
mem_init_file.list_to_hex(list_in=flatten(select_buf_line), filename=filename, mem_width=ceil_log2(c_nof_subbands_per_stream), mem_depth=g_nof_weights)
print len(flatten(select_buf))
# for i in range(tc.nofFnNodes):
# for j in xrange(g_nof_bf_units):
# for k in range(g_nof_input_streams):
## bf[i*g_nof_bf_units + j].ss_wide[k].write_selects(flatten(select_buf));
# - Enable the block generator if c_write_bf_ss_wide == True:
tc.append_log(3, '>>>') for i in range(tc.nofFnNodes):
tc.append_log(3, '>>> Enable the block generator') for j in xrange(g_nof_bf_units):
tc.append_log(3, '>>>') for k in range(g_nof_input_streams):
tc.append_log(3, '') bf[i*g_nof_bf_units + j].ss_wide[k].write_selects(flatten(select_buf));
bg.write_enable()
############################################################################### ###############################################################################
# #
# Create setting for the pre-transpose (subbandselect) # Create setting for the pre-transpose (subbandselect)
# #
############################################################################### ###############################################################################
tc.append_log(3, '>>>')
tc.append_log(3, '>>> Create settings for pre-transpose')
tc.append_log(3, '>>>')
tc.append_log(3, '')
ss_list = [] ss_list = []
for i in range(g_wr_chunksize): for i in range(g_wr_chunksize):
for j in range(g_rd_chunksize): for j in range(g_rd_chunksize):
...@@ -249,141 +261,18 @@ if __name__ == "__main__": ...@@ -249,141 +261,18 @@ if __name__ == "__main__":
if c_ena_pre_transpose: if c_ena_pre_transpose:
ss.write_selects(ss_list) ss.write_selects(ss_list)
###############################################################################
#
# Create stimuli for the BG
#
###############################################################################
# Prepare x stimuli for block generator
# bg_data = gen_bg_hex_files(g_frame_size_in, g_nof_blocks, c_bg_nof_streams)
################################################################################
##
## Write data and settings to block generator
##
################################################################################
# Write setting for the block generator:
# bg.write_block_gen_settings(samplesPerPacket=g_frame_size_in, blocksPerSync=g_nof_blocks, gapSize=c_gap_size, memLowAddr=0, memHighAddr=c_bg_ram_size-1, BSNInit=10)
# Write the stimuli to the block generator and enable the block generator
# for i in range(c_bg_nof_streams):
# bg.write_waveform_ram(data=bg_data[i], channelNr= i)
# Concatenate all channels
# t=2
# while len(bg_data) > 1:
# concat_data = []
# for i in range(len(bg_data)/2):
# concat_data.append(dsp_test_bg.concatenate_two_lists(bg_data[2*i], bg_data[2*i+1], c_in_dat_w*t))
# bg_data = concat_data
# t=t*2
#
# bg_data = flatten(bg_data)
# Wait until the DDR3 model is initialized.
if tc.sim == True:
do_until_eq(ddr.read_init_done, ms_retry=1000, val=1, s_timeout=13600) # 110000
# Enable the blockgenerator
bg.write_enable()
if(c_force_late_sync == 1):
do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=180000, s_timeout=13600) # 110000
bg.write_block_gen_settings(samplesPerPacket=g_frame_size_in, blocksPerSync=g_nof_blocks+1, gapSize=c_gap_size, memLowAddr=0, memHighAddr=c_bg_ram_size-1, BSNInit=10)
elif(c_force_early_sync == 1):
do_until_gt(io.simIO.getSimTime, ms_retry=1000, val=180000, s_timeout=13600) # 110000
bg.write_block_gen_settings(samplesPerPacket=g_frame_size_in, blocksPerSync=g_nof_blocks-1, gapSize=c_gap_size, memLowAddr=0, memHighAddr=c_bg_ram_size-1, BSNInit=10)
###############################################################################
#
# Calculate reference data
#
###############################################################################
# Subband Select pre-transpose
if c_ena_pre_transpose:
bg_data_ss =[]
for i in range(len(bg_data)/c_ss_pagesize): # len(ss_list)):
bg_data_ss.append(ss.subband_select(bg_data[i*c_ss_pagesize:(i+1)*c_ss_pagesize], ss_list))
bg_data = bg_data_ss
bg_data = flatten(bg_data)
ref_data_total = []
# Check how many data there is and how many pages will be used:
for t in range(len(bg_data)/c_pagesize):
bg_data_single_page = bg_data[t*c_pagesize:(t+1)*c_pagesize]
# Write to memory
mem_page = [0] * c_pagesize
for i in range(g_nof_blocks):
for j in range(g_wr_chunksize):
mem_page[i*c_blocksize + j] = bg_data_single_page[i*g_wr_chunksize + j]
# Read from memory
ref_data = [0] * g_nof_blocks * g_rd_nof_chunks * g_rd_chunksize
rd_block_offset = 0
rd_chunk_offset = 0
for i in range(g_nof_blocks*g_rd_nof_chunks):
rd_offset = rd_block_offset + rd_chunk_offset
for k in range(g_rd_chunksize):
ref_data[i*g_rd_chunksize + k] = mem_page[rd_offset + k]
rd_block_offset = rd_block_offset + c_rd_increment
if(rd_block_offset >= c_pagesize):
rd_chunk_offset = rd_chunk_offset + g_rd_chunksize
rd_block_offset = rd_block_offset - c_pagesize
ref_data_total.append(ref_data)
ref_data_total=flatten(ref_data_total)
# Split the data again in individual channels
ref_data_split = []
ref_data_split.append(ref_data_total)
t = c_bg_nof_streams
while len(ref_data_split) < c_bg_nof_streams:
ref_data_temp = []
for i in range(len(ref_data_split)):
[data_a, data_b] = dsp_test_bg.split_in_two_lists(ref_data_split[i], c_in_dat_w*t)
ref_data_temp.append(data_a)
ref_data_temp.append(data_b)
ref_data_split = ref_data_temp
t = t/2
# Split the data in real and imaginary
ref_data_re = []
ref_data_im = []
for i in range(c_bg_nof_streams):
[data_re, data_im] = dsp_test_bg.split_in_two_lists(ref_data_split[i], c_in_dat_w)
ref_data_re.append(data_re)
ref_data_im.append(data_im)
# Poll the databuffer to check if the response is there.
# Retry after 3 seconds so we don't issue too many MM reads in case of simulation.
do_until_ge(db_re.read_nof_words, ms_retry=3000, val=c_db_ram_size, s_timeout=3600)
###############################################################################
#
# Read transposed data from data buffer
#
###############################################################################
db_out_re = []
db_out_im = []
for i in range(c_bg_nof_streams):
db_out_re.append(flatten(db_re.read_data_buffer(streamNr=i, n=c_db_ram_size, radix='uns', width=c_in_dat_w, nofColumns=8)))
db_out_im.append(flatten(db_im.read_data_buffer(streamNr=i, n=c_db_ram_size, radix='uns', width=c_in_dat_w, nofColumns=8)))
############################################################################### # - Enable the block generator
# tc.append_log(3, '>>>')
# Verify output data tc.append_log(3, '>>> Enable the block generator')
# tc.append_log(3, '>>>')
############################################################################### tc.append_log(3, '')
for i in range(c_bg_nof_streams): bg.write_enable()
for j in range(len(ref_data_re[0])):
if db_out_re[i][j] != ref_data_re[i][j]: bsn_out.read_bsn_monitor(0)
tc.append_log(2, 'Error in real output data. Expected data: %d Data read: %d Iteration nr: %d %d' % (ref_data_re[i][j], db_out_re[i][j], i, j)) bsn_out.read_bsn_monitor(1)
tc.set_result('FAILED')
if db_out_im[i][j] != ref_data_im[i][j]:
tc.append_log(2, 'Error in imag output data. Expected data: %d Data read: %d Iteration nr: %d %d' % (ref_data_im[i][j], db_out_im[i][j], i, j))
tc.set_result('FAILED')
############################################################################### ###############################################################################
# End # End
tc.set_section_id('') tc.set_section_id('')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment