Select Git revision
eventgridcontroller.js
-
Jorrit Schaap authoredJorrit Schaap authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
tech_ddr_mem_model.vhd 6.92 KiB
--------------------------------------------------------------------------------
--
-- Copyright (C) 2014
-- ASTRON (Netherlands Institute for Radio Astronomy) <http://www.astron.nl/>
-- JIVE (Joint Institute for VLBI in Europe) <http://www.jive.nl/>
-- P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
--
--------------------------------------------------------------------------------
-- Purpose: DDR3 or DDR4 memory model for simulation.
-- Description:
-- Dependent on g_tech_ddr.name either a DDR3 memory or a DDR4 memory model
-- gets instantiated.
-- If DDR3 is selected then the mem4 ports remain unused.
-- If DDR4 is selected then the mem3 ports remain unused.
-- The DDR3 memory model is not FPGA specific, but it was created by the example design for ip_stratixiv_ddr3_uphy_4g_800_master.
-- Therefore the ip_stratixiv_ddr3_uphy_4g_800_master_lib is needed if the model is used.
-- Declare IP libraries to ensure default binding in simulation.
LIBRARY ip_stratixiv_ddr3_mem_model_lib;
LIBRARY ed_sim_altera_emif_mem_model_core_ddr4_141;
LIBRARY IEEE, common_lib;
USE IEEE.STD_LOGIC_1164.ALL;
USE common_lib.common_pkg.ALL;
USE work.tech_ddr_pkg.ALL;
USE work.tech_ddr_mem_model_component_pkg.ALL;
ENTITY tech_ddr_memory_model IS
GENERIC (
g_tech_ddr : t_c_tech_ddr
);
PORT (
-- DDR3 PHY interface
mem3_in : IN t_tech_ddr3_phy_ou := c_tech_ddr3_phy_ou_x;
mem3_io : INOUT t_tech_ddr3_phy_io;
mem3_ou : OUT t_tech_ddr3_phy_in;
-- DDR4 PHY interface
mem4_in : IN t_tech_ddr4_phy_ou := c_tech_ddr4_phy_ou_x;
mem4_io : INOUT t_tech_ddr4_phy_io;
mem4_ou : OUT t_tech_ddr4_phy_in
);
END tech_ddr_memory_model;
ARCHITECTURE str OF tech_ddr_memory_model IS
CONSTANT c_gigabytes : NATURAL := func_tech_ddr_module_size(g_tech_ddr);
SIGNAL dbg_g_tech_ddr : t_c_tech_ddr := g_tech_ddr;
SIGNAL dbg_c_gigabytes : NATURAL := c_gigabytes;
BEGIN
gen_ip_stratixiv_ddr_memory_model : IF g_tech_ddr.name="DDR3" GENERATE
u_ip_stratixiv_ddr_memory_model : alt_mem_if_ddr3_mem_model_top_ddr3_mem_if_dm_pins_en_mem_if_dqsn_en
GENERIC MAP (
MEM_IF_CLK_EN_WIDTH => g_tech_ddr.cke_w,
MEM_IF_CK_WIDTH => g_tech_ddr.ck_w,
MEM_IF_BANKADDR_WIDTH => g_tech_ddr.ba_w,
MEM_IF_ADDR_WIDTH => g_tech_ddr.a_w,
MEM_IF_ROW_ADDR_WIDTH => g_tech_ddr.a_row_w,
MEM_IF_COL_ADDR_WIDTH => g_tech_ddr.a_col_w,
MEM_IF_CS_WIDTH => g_tech_ddr.cs_w,
MEM_IF_CONTROL_WIDTH => 1, -- cas_n, ras_n, we_n
MEM_IF_ODT_WIDTH => g_tech_ddr.odt_w,
DEVICE_DEPTH => 1,
DEVICE_WIDTH => 1,
MEM_IF_CS_PER_RANK => 1,
MEM_IF_DQS_WIDTH => g_tech_ddr.dqs_w,
MEM_IF_DQ_WIDTH => g_tech_ddr.dq_w,
MEM_MIRROR_ADDRESSING_DEC => 0,
MEM_TRTP => 8,
MEM_TRCD => 6,
MEM_DQS_TO_CLK_CAPTURE_DELAY => 100,
MEM_CLK_TO_DQS_CAPTURE_DELAY => 100000,
MEM_REGDIMM_ENABLED => 0,
MEM_INIT_EN => 0,
MEM_INIT_FILE => "",
MEM_GUARANTEED_WRITE_INIT => 0,
DAT_DATA_WIDTH => 32,
MEM_VERBOSE => 1
)
PORT MAP (
mem_a => mem3_in.a(g_tech_ddr.a_w-1 DOWNTO 0), -- MEM_IF_ADDR_WIDTH
mem_ba => mem3_in.ba(g_tech_ddr.ba_w-1 DOWNTO 0), -- MEM_IF_BANKADDR_WIDTH
mem_ck => mem3_in.ck(g_tech_ddr.ck_w-1 DOWNTO 0), -- MEM_IF_CK_WIDTH
mem_ck_n => mem3_in.ck_n(g_tech_ddr.ck_w-1 DOWNTO 0), -- MEM_IF_CK_WIDTH
mem_cke => mem3_in.cke(g_tech_ddr.cke_w-1 DOWNTO 0), -- MEM_IF_CLK_EN_WIDTH
mem_cs_n => mem3_in.cs_n(g_tech_ddr.cs_w-1 DOWNTO 0), -- MEM_IF_CS_WIDTH
mem_ras_n => slv(mem3_in.ras_n), -- MEM_IF_CONTROL_WIDTH
mem_cas_n => slv(mem3_in.cas_n), -- MEM_IF_CONTROL_WIDTH
mem_we_n => slv(mem3_in.we_n), -- MEM_IF_CONTROL_WIDTH
mem_reset_n => mem3_in.reset_n,
mem_dm => mem3_in.dm(g_tech_ddr.dqs_w-1 DOWNTO 0), -- MEM_IF_DQS_WIDTH
mem_dq => mem3_io.dq(g_tech_ddr.dq_w-1 DOWNTO 0), -- MEM_IF_DQ_WIDTH
mem_dqs => mem3_io.dqs(g_tech_ddr.dqs_w-1 DOWNTO 0), -- MEM_IF_DQS_WIDTH
mem_dqs_n => mem3_io.dqs_n(g_tech_ddr.dqs_w-1 DOWNTO 0), -- MEM_IF_DQS_WIDTH
mem_odt => mem3_in.odt(g_tech_ddr.odt_w-1 DOWNTO 0) -- MEM_IF_ODT_WIDTH
);
END GENERATE;
gen_ip_arria10_ddr_memory_model : IF g_tech_ddr.name="DDR4" AND c_gigabytes=4 GENERATE
u_ip_arria10_ddr_memory_model : ed_sim_altera_emif_mem_model_141_z3tvrmq
PORT MAP (
mem_ck => mem4_in.ck(g_tech_ddr.ck_w-1 DOWNTO 0), -- mem_conduit_end.mem_ck
mem_ck_n => mem4_in.ck_n(g_tech_ddr.ck_w-1 DOWNTO 0), -- .mem_ck_n
mem_a => mem4_in.a(g_tech_ddr.a_w-1 DOWNTO 0), -- .mem_a
mem_act_n => slv(mem4_in.act_n), -- .mem_act_n
mem_ba => mem4_in.ba(g_tech_ddr.ba_w-1 DOWNTO 0), -- .mem_ba
mem_bg => mem4_in.bg(g_tech_ddr.bg_w-1 DOWNTO 0), -- .mem_bg
mem_cke => mem4_in.cke(g_tech_ddr.cke_w-1 DOWNTO 0), -- .mem_cke
mem_cs_n => mem4_in.cs_n(g_tech_ddr.cs_w-1 DOWNTO 0), -- .mem_cs_n
mem_odt => mem4_in.odt(g_tech_ddr.odt_w-1 DOWNTO 0), -- .mem_odt
mem_reset_n => slv(mem4_in.reset_n), -- .mem_reset_n
mem_par => slv(mem4_in.par), -- .mem_par
sl(mem_alert_n) => mem4_ou.alert_n, -- .mem_alert_n
mem_dqs => mem4_io.dqs(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs
mem_dqs_n => mem4_io.dqs_n(g_tech_ddr.dqs_w-1 DOWNTO 0), -- .mem_dqs_n
mem_dq => mem4_io.dq(g_tech_ddr.dq_w-1 DOWNTO 0), -- .mem_dq
mem_dbi_n => mem4_io.dbi_n(g_tech_ddr.dbi_w-1 DOWNTO 0) -- .mem_dbi_n
);
END GENERATE;
END str;