-
Pieter Donker authoredPieter Donker authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
tech_ddr_arria10_e1sg.vhd 18.83 KiB
--------------------------------------------------------------------------------
--
-- Copyright (C) 2015
-- ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
-- JIVE (Joint Institute for VLBI in Europe) <http://www.jive.nl/>
-- P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
--------------------------------------------------------------------------------
-- Purpose: DDR4 memory access component for Arria10.
-- Description:
-- Remarks:
-- . The local_init_done goes high some time after power up. It could have been
-- AND-ed with ctlr_miso.waitrequest_n. However the timing closure for
-- ctlr_miso.waitrequest_n can be critical, so therefore it is better not
-- to combinatorially load it with the AND local_init_done. Instead a
-- ctlr_miso.done field was added and used to pass on local_init_done. In fact
-- for normal operation it is sufficient to only wait for
-- ctlr_miso.waitrequest_n. The ctlr_miso.init_done is then only used for
-- DDR interface monitoring purposes.
-- Declare IP libraries to ensure default binding in simulation. The IP library clause is ignored by synthesis.
LIBRARY ip_arria10_e1sg_ddr4_4g_1600_altera_emif_180;
LIBRARY ip_arria10_e1sg_ddr4_8g_1600_altera_emif_180;
LIBRARY ip_arria10_e1sg_ddr4_4g_2000_altera_emif_180;
LIBRARY ip_arria10_e1sg_ddr4_8g_2400_altera_emif_180;
LIBRARY IEEE, technology_lib, common_lib;
USE IEEE.STD_LOGIC_1164.ALL;
USE common_lib.common_pkg.ALL;
USE common_lib.common_mem_pkg.ALL;
USE technology_lib.technology_pkg.ALL;
USE work.tech_ddr_pkg.ALL;
USE work.tech_ddr_component_pkg.ALL;
ENTITY tech_ddr_arria10_e1sg IS
GENERIC (
g_tech_ddr : t_c_tech_ddr
);
PORT (
-- PLL reference clock
ref_clk : IN STD_LOGIC;
ref_rst : IN STD_LOGIC;
-- Controller user interface
ctlr_gen_clk : OUT STD_LOGIC;
ctlr_gen_rst : OUT STD_LOGIC;
ctlr_mosi : IN t_mem_ctlr_mosi;
ctlr_miso : OUT t_mem_ctlr_miso;
-- PHY interface
phy_in : IN t_tech_ddr4_phy_in;
phy_io : INOUT t_tech_ddr4_phy_io;
phy_ou : OUT t_tech_ddr4_phy_ou
);
END tech_ddr_arria10_e1sg;
ARCHITECTURE str OF tech_ddr_arria10_e1sg IS
CONSTANT c_gigabytes : NATURAL := func_tech_ddr_module_size(g_tech_ddr);
CONSTANT c_ctlr_address_w : NATURAL := func_tech_ddr_ctlr_address_w(g_tech_ddr);
CONSTANT c_ctlr_data_w : NATURAL := 576;--func_tech_ddr_ctlr_data_w( g_tech_ddr);
SIGNAL i_ctlr_gen_clk : STD_LOGIC;
SIGNAL ref_rst_n : STD_LOGIC;
SIGNAL ctlr_gen_rst_n : STD_LOGIC := '0';
SIGNAL local_cal_success : STD_LOGIC;
SIGNAL local_cal_fail : STD_LOGIC;
BEGIN
ctlr_gen_clk <= i_ctlr_gen_clk;
ref_rst_n <= NOT ref_rst;
ctlr_gen_rst <= NOT ctlr_gen_rst_n;
gen_ip_arria10_e1sg_ddr4_4g_2000 : IF g_tech_ddr.name="DDR4" AND c_gigabytes=4 AND g_tech_ddr.mts=2000 GENERATE
phy_ou.cs_n(1) <= '1';
phy_ou.cke(1) <= '0';
phy_ou.odt(1) <= '0';
u_ip_arria10_e1sg_ddr4_4g_2000 : ip_arria10_e1sg_ddr4_4g_2000
PORT MAP (
amm_ready_0 => ctlr_miso.waitrequest_n, -- ctrl_amm_avalon_slave_0.waitrequest_n
amm_read_0 => ctlr_mosi.rd, -- .read
amm_write_0 => ctlr_mosi.wr, -- .write
amm_address_0 => ctlr_mosi.address(c_ctlr_address_w-1 DOWNTO 0), -- .address
amm_readdata_0 => ctlr_miso.rddata(c_ctlr_data_w-1 DOWNTO 0), -- .readdata
amm_writedata_0 => ctlr_mosi.wrdata(c_ctlr_data_w-1 DOWNTO 0), -- .writedata
amm_burstcount_0 => ctlr_mosi.burstsize(g_tech_ddr.maxburstsize_w-1 DOWNTO 0), -- .burstcount
amm_byteenable_0 => (OTHERS=>'1'), -- .byteenable
amm_readdatavalid_0 => ctlr_miso.rdval, -- .readdatavalid
emif_usr_clk => i_ctlr_gen_clk, -- emif_usr_clk_clock_source.clk
emif_usr_reset_n => ctlr_gen_rst_n, -- emif_usr_reset_reset_source.reset_n
global_reset_n => ref_rst_n, -- global_reset_reset_sink.reset_n
mem_ck => phy_ou.ck(g_tech_ddr.ck_w-1 DOWNTO 0), -- mem_conduit_end.mem_ck
mem_ck_n => phy_ou.ck_n(g_tech_ddr.ck_w-1 DOWNTO 0), -- .mem_ck_n
mem_a => phy_ou.a(g_tech_ddr.a_w-1 DOWNTO 0), -- .mem_a
sl(mem_act_n) => phy_ou.act_n, -- .mem_act_n
mem_ba => phy_ou.ba(g_tech_ddr.ba_w-1 DOWNTO 0), -- .mem_ba
mem_bg => phy_ou.bg(g_tech_ddr.bg_w-1 DOWNTO 0), -- .mem_bg
mem_cke => phy_ou.cke(g_tech_ddr.cke_w-1 DOWNTO 0), -- .mem_cke
mem_cs_n => phy_ou.cs_n(g_tech_ddr.cs_w-1 DOWNTO 0), -- .mem_cs_n
mem_odt => phy_ou.odt(g_tech_ddr.odt_w-1 DOWNTO 0), -- .mem_odt
sl(mem_reset_n) => phy_ou.reset_n, -- .mem_reset_n
sl(mem_par) => phy_ou.par, -- .mem_par
mem_alert_n => slv(phy_in.alert_n), -- .mem_alert_n
mem_dqs => phy_io.dqs(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs
mem_dqs_n => phy_io.dqs_n(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs_n
mem_dq => phy_io.dq(g_tech_ddr.dq_w-1 DOWNTO 0), -- .mem_dq
mem_dbi_n => phy_io.dbi_n(g_tech_ddr.dbi_w-1 DOWNTO 0), -- .mem_dbi_n
oct_rzqin => phy_in.oct_rzqin, -- oct_conduit_end.oct_rzqin
pll_ref_clk => ref_clk, -- pll_ref_clk_clock_sink.clk
local_cal_success => local_cal_success, -- status_conduit_end.local_cal_success
local_cal_fail => local_cal_fail -- .local_cal_fail
);
-- Signals in DDR3 that are not available with DDR4:
--
--avl_burstbegin => ctlr_mosi.burstbegin, -- .beginbursttransfer
-- beginbursttransfer is obselete for new Avalon designs, because the slave can count valid data itself to know when a new burst starts
--
--local_init_done => ctlr_miso.done, -- status.local_init_done
-- local_init_done = ctlr_init_done originally and mapped to ctlr_miso.done for the DDR3 IP. For the DDR4 IP the local_cal_success and
-- NOT local_cal_fail seem to serve as local_init_done
ctlr_miso.done <= local_cal_success AND NOT local_cal_fail WHEN rising_edge(i_ctlr_gen_clk);
ctlr_miso.cal_ok <= local_cal_success;
ctlr_miso.cal_fail <= local_cal_fail;
END GENERATE;
gen_ip_arria10_e1sg_ddr4_4g_1600 : IF g_tech_ddr.name="DDR4" AND c_gigabytes=4 AND g_tech_ddr.mts=1600 GENERATE
phy_ou.cs_n(1) <= '1';
phy_ou.cke(1) <= '0';
phy_ou.odt(1) <= '0';
u_ip_arria10_e1sg_ddr4_4g_1600 : ip_arria10_e1sg_ddr4_4g_1600
PORT MAP (
amm_ready_0 => ctlr_miso.waitrequest_n, -- ctrl_amm_avalon_slave_0.waitrequest_n
amm_read_0 => ctlr_mosi.rd, -- .read
amm_write_0 => ctlr_mosi.wr, -- .write
amm_address_0 => ctlr_mosi.address(c_ctlr_address_w-1 DOWNTO 0), -- .address
amm_readdata_0 => ctlr_miso.rddata(c_ctlr_data_w-1 DOWNTO 0), -- .readdata
amm_writedata_0 => ctlr_mosi.wrdata(c_ctlr_data_w-1 DOWNTO 0), -- .writedata
amm_burstcount_0 => ctlr_mosi.burstsize(g_tech_ddr.maxburstsize_w-1 DOWNTO 0), -- .burstcount
amm_byteenable_0 => (OTHERS=>'1'), -- .byteenable
amm_readdatavalid_0 => ctlr_miso.rdval, -- .readdatavalid
emif_usr_clk => i_ctlr_gen_clk, -- emif_usr_clk_clock_source.clk
emif_usr_reset_n => ctlr_gen_rst_n, -- emif_usr_reset_reset_source.reset_n
global_reset_n => ref_rst_n, -- global_reset_reset_sink.reset_n
mem_ck => phy_ou.ck(g_tech_ddr.ck_w-1 DOWNTO 0), -- mem_conduit_end.mem_ck
mem_ck_n => phy_ou.ck_n(g_tech_ddr.ck_w-1 DOWNTO 0), -- .mem_ck_n
mem_a => phy_ou.a(g_tech_ddr.a_w-1 DOWNTO 0), -- .mem_a
sl(mem_act_n) => phy_ou.act_n, -- .mem_act_n
mem_ba => phy_ou.ba(g_tech_ddr.ba_w-1 DOWNTO 0), -- .mem_ba
mem_bg => phy_ou.bg(g_tech_ddr.bg_w-1 DOWNTO 0), -- .mem_bg
mem_cke => phy_ou.cke(g_tech_ddr.cke_w-1 DOWNTO 0), -- .mem_cke
mem_cs_n => phy_ou.cs_n(g_tech_ddr.cs_w-1 DOWNTO 0), -- .mem_cs_n
mem_odt => phy_ou.odt(g_tech_ddr.odt_w-1 DOWNTO 0), -- .mem_odt
sl(mem_reset_n) => phy_ou.reset_n, -- .mem_reset_n
sl(mem_par) => phy_ou.par, -- .mem_par
mem_alert_n => slv(phy_in.alert_n), -- .mem_alert_n
mem_dqs => phy_io.dqs(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs
mem_dqs_n => phy_io.dqs_n(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs_n
mem_dq => phy_io.dq(g_tech_ddr.dq_w-1 DOWNTO 0), -- .mem_dq
mem_dbi_n => phy_io.dbi_n(g_tech_ddr.dbi_w-1 DOWNTO 0), -- .mem_dbi_n
oct_rzqin => phy_in.oct_rzqin, -- oct_conduit_end.oct_rzqin
pll_ref_clk => ref_clk, -- pll_ref_clk_clock_sink.clk
local_cal_success => local_cal_success, -- status_conduit_end.local_cal_success
local_cal_fail => local_cal_fail -- .local_cal_fail
);
-- Signals in DDR3 that are not available with DDR4:
--
--avl_burstbegin => ctlr_mosi.burstbegin, -- .beginbursttransfer
-- beginbursttransfer is obselete for new Avalon designs, because the slave can count valid data itself to know when a new burst starts
--
--local_init_done => ctlr_miso.done, -- status.local_init_done
-- local_init_done = ctlr_init_done originally and mapped to ctlr_miso.done for the DDR3 IP. For the DDR4 IP the local_cal_success and
-- NOT local_cal_fail seem to serve as local_init_done
ctlr_miso.done <= local_cal_success AND NOT local_cal_fail WHEN rising_edge(i_ctlr_gen_clk);
ctlr_miso.cal_ok <= local_cal_success;
ctlr_miso.cal_fail <= local_cal_fail;
END GENERATE;
gen_ip_arria10_e1sg_ddr4_8g_1600 : IF g_tech_ddr.name="DDR4" AND c_gigabytes=8 AND g_tech_ddr.mts=1600 GENERATE
u_ip_arria10_e1sg_ddr4_8g_1600 : ip_arria10_e1sg_ddr4_8g_1600
PORT MAP (
amm_ready_0 => ctlr_miso.waitrequest_n, -- ctrl_amm_avalon_slave_0.waitrequest_n
amm_read_0 => ctlr_mosi.rd, -- .read
amm_write_0 => ctlr_mosi.wr, -- .write
amm_address_0 => ctlr_mosi.address(c_ctlr_address_w-1 DOWNTO 0), -- .address
amm_readdata_0 => ctlr_miso.rddata(c_ctlr_data_w-1 DOWNTO 0), -- .readdata
amm_writedata_0 => ctlr_mosi.wrdata(c_ctlr_data_w-1 DOWNTO 0), -- .writedata
amm_burstcount_0 => ctlr_mosi.burstsize(g_tech_ddr.maxburstsize_w-1 DOWNTO 0), -- .burstcount
amm_byteenable_0 => (OTHERS=>'1'), -- .byteenable
amm_readdatavalid_0 => ctlr_miso.rdval, -- .readdatavalid
emif_usr_clk => i_ctlr_gen_clk, -- emif_usr_clk_clock_source.clk
emif_usr_reset_n => ctlr_gen_rst_n, -- emif_usr_reset_reset_source.reset_n
global_reset_n => ref_rst_n, -- global_reset_reset_sink.reset_n
mem_ck => phy_ou.ck(g_tech_ddr.ck_w-1 DOWNTO 0), -- mem_conduit_end.mem_ck
mem_ck_n => phy_ou.ck_n(g_tech_ddr.ck_w-1 DOWNTO 0), -- .mem_ck_n
mem_a => phy_ou.a(g_tech_ddr.a_w-1 DOWNTO 0), -- .mem_a
sl(mem_act_n) => phy_ou.act_n, -- .mem_act_n
mem_ba => phy_ou.ba(g_tech_ddr.ba_w-1 DOWNTO 0), -- .mem_ba
mem_bg => phy_ou.bg(g_tech_ddr.bg_w-1 DOWNTO 0), -- .mem_bg
mem_cke => phy_ou.cke(g_tech_ddr.cke_w-1 DOWNTO 0), -- .mem_cke
mem_cs_n => phy_ou.cs_n(g_tech_ddr.cs_w-1 DOWNTO 0), -- .mem_cs_n
mem_odt => phy_ou.odt(g_tech_ddr.odt_w-1 DOWNTO 0), -- .mem_odt
sl(mem_reset_n) => phy_ou.reset_n, -- .mem_reset_n
sl(mem_par) => phy_ou.par, -- .mem_par
mem_alert_n => slv(phy_in.alert_n), -- .mem_alert_n
mem_dqs => phy_io.dqs(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs
mem_dqs_n => phy_io.dqs_n(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs_n
mem_dq => phy_io.dq(g_tech_ddr.dq_w-1 DOWNTO 0), -- .mem_dq
mem_dbi_n => phy_io.dbi_n(g_tech_ddr.dbi_w-1 DOWNTO 0), -- .mem_dbi_n
oct_rzqin => phy_in.oct_rzqin, -- oct_conduit_end.oct_rzqin
pll_ref_clk => ref_clk, -- pll_ref_clk_clock_sink.clk
local_cal_success => local_cal_success, -- status_conduit_end.local_cal_success
local_cal_fail => local_cal_fail -- .local_cal_fail
);
-- Signals in DDR3 that are not available with DDR4:
--
--avl_burstbegin => ctlr_mosi.burstbegin, -- .beginbursttransfer
-- beginbursttransfer is obselete for new Avalon designs, because the slave can count valid data itself to know when a new burst starts
--
--local_init_done => ctlr_miso.done, -- status.local_init_done
-- local_init_done = ctlr_init_done originally and mapped to ctlr_miso.done for the DDR3 IP. For the DDR4 IP the local_cal_success and
-- NOT local_cal_fail seem to serve as local_init_done
ctlr_miso.done <= local_cal_success AND NOT local_cal_fail WHEN rising_edge(i_ctlr_gen_clk);
ctlr_miso.cal_ok <= local_cal_success;
ctlr_miso.cal_fail <= local_cal_fail;
END GENERATE;
END str;