Massive update containing several months of changes from the now-defunct private chip repo.
* Adds support for a L2 cache with directory bits for tracking L1 coherence (DefaultL2Config), and new metadata-based coherence API. * Additional tests. * New virtual memory implementation, priviliged architecture (1.7), custom CSRs, FDivSqrt unit * Updated TileLink protocol, NASTI protocol SHIMs. * Lays groundwork for multiple top-level memory channels, superscalar fetch. * Bump all submodules.
This commit is contained in:
parent
12d8d8c5e3
commit
d3ccec1044
255
Makefrag
255
Makefrag
@ -11,12 +11,13 @@ CXX := g++
|
||||
CXXFLAGS := -O1
|
||||
|
||||
SBT := java -Xmx2048M -Xss8M -XX:MaxPermSize=128M -jar sbt-launch.jar
|
||||
SHELL := /bin/bash
|
||||
|
||||
src_path = src/main/scala
|
||||
chisel_srcs = $(base_dir)/$(src_path)/*.scala $(base_dir)/rocket/$(src_path)/*.scala $(base_dir)/uncore/$(src_path)/*.scala $(SRC_EXTENSION)
|
||||
|
||||
disasm := 2>
|
||||
which_disasm := $(shell which riscv-dis)
|
||||
which_disasm := $(shell which spike-dasm)
|
||||
ifneq ($(which_disasm),)
|
||||
disasm := 3>&1 1>&2 2>&3 | $(which_disasm) $(DISASM_EXTENSION) >
|
||||
endif
|
||||
@ -28,7 +29,7 @@ timeout_cycles = 100000000
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
$(generated_dir)/$(MODEL).$(CONFIG).v: $(chisel_srcs)
|
||||
cd $(base_dir) && mkdir -p $(generated_dir) && $(SBT) "project rocketchip" "elaborate $(MODEL) --backend $(BACKEND) --targetDir $(generated_dir) --noInlineMem --configInstance rocketchip.$(CONFIG) --configDump"
|
||||
cd $(base_dir) && mkdir -p $(generated_dir) && $(SBT) "project rocketchip" "elaborate $(MODEL) --backend $(BACKEND) --targetDir $(generated_dir) --configDump --noInlineMem --configInstance rocketchip.$(CONFIG)"
|
||||
cd $(generated_dir) && \
|
||||
if [ -a $(MODEL).$(CONFIG).conf ]; then \
|
||||
$(mem_gen) $(generated_dir)/$(MODEL).$(CONFIG).conf >> $(generated_dir)/$(MODEL).$(CONFIG).v; \
|
||||
@ -151,11 +152,123 @@ asm_p_tests = \
|
||||
rv64uf-p-fadd \
|
||||
rv64uf-p-fmin \
|
||||
rv64uf-p-fmadd \
|
||||
rv64uf-p-fdiv \
|
||||
rv64uf-p-structural \
|
||||
rv64si-p-coreid \
|
||||
rv64si-p-csr \
|
||||
rv64si-pm-ipi \
|
||||
rv64si-p-wfi \
|
||||
rv64si-p-illegal \
|
||||
rv64si-p-ma_fetch \
|
||||
rv64si-p-ma_addr \
|
||||
rv64si-p-scall \
|
||||
rv64si-p-sbreak \
|
||||
rv64si-p-timer \
|
||||
rv64ui-pm-lrsc \
|
||||
rv64mi-p-csr \
|
||||
rv64mi-p-mcsr \
|
||||
rv64mi-p-wfi \
|
||||
rv64mi-p-dirty \
|
||||
rv64mi-p-illegal \
|
||||
rv64mi-p-ma_addr \
|
||||
rv64mi-p-ma_fetch \
|
||||
rv64mi-pm-ipi \
|
||||
rv64mi-p-sbreak \
|
||||
rv64mi-p-scall \
|
||||
rv64mi-p-timer \
|
||||
|
||||
asm_pt_tests = \
|
||||
rv64ui-pt-add \
|
||||
rv64ui-pt-addi \
|
||||
rv64ui-pt-amoadd_d \
|
||||
rv64ui-pt-amoadd_w \
|
||||
rv64ui-pt-amoand_d \
|
||||
rv64ui-pt-amoand_w \
|
||||
rv64ui-pt-amoor_d \
|
||||
rv64ui-pt-amoor_w \
|
||||
rv64ui-pt-amoxor_d \
|
||||
rv64ui-pt-amoxor_w \
|
||||
rv64ui-pt-amoswap_d \
|
||||
rv64ui-pt-amoswap_w \
|
||||
rv64ui-pt-amomax_d \
|
||||
rv64ui-pt-amomax_w \
|
||||
rv64ui-pt-amomaxu_d \
|
||||
rv64ui-pt-amomaxu_w \
|
||||
rv64ui-pt-amomin_d \
|
||||
rv64ui-pt-amomin_w \
|
||||
rv64ui-pt-amominu_d \
|
||||
rv64ui-pt-amominu_w \
|
||||
rv64ui-pt-auipc \
|
||||
rv64ui-pt-fence_i \
|
||||
rv64ui-pt-sb \
|
||||
rv64ui-pt-sd \
|
||||
rv64ui-pt-sh \
|
||||
rv64ui-pt-sw \
|
||||
rv64ui-pt-addiw \
|
||||
rv64ui-pt-addw \
|
||||
rv64ui-pt-and \
|
||||
rv64ui-pt-andi \
|
||||
rv64ui-pt-beq \
|
||||
rv64ui-pt-bge \
|
||||
rv64ui-pt-bgeu \
|
||||
rv64ui-pt-blt \
|
||||
rv64ui-pt-bltu \
|
||||
rv64ui-pt-bne \
|
||||
rv64ui-pt-div \
|
||||
rv64ui-pt-divu \
|
||||
rv64ui-pt-divuw \
|
||||
rv64ui-pt-divw \
|
||||
rv64ui-pt-j \
|
||||
rv64ui-pt-jal \
|
||||
rv64ui-pt-jalr \
|
||||
rv64ui-pt-lb \
|
||||
rv64ui-pt-lbu \
|
||||
rv64ui-pt-ld \
|
||||
rv64ui-pt-lh \
|
||||
rv64ui-pt-lhu \
|
||||
rv64ui-pt-lui \
|
||||
rv64ui-pt-lw \
|
||||
rv64ui-pt-lwu \
|
||||
rv64ui-pt-mul \
|
||||
rv64ui-pt-mulh \
|
||||
rv64ui-pt-mulhsu \
|
||||
rv64ui-pt-mulhu \
|
||||
rv64ui-pt-mulw \
|
||||
rv64ui-pt-or \
|
||||
rv64ui-pt-ori \
|
||||
rv64ui-pt-rem \
|
||||
rv64ui-pt-remu \
|
||||
rv64ui-pt-remuw \
|
||||
rv64ui-pt-remw \
|
||||
rv64ui-pt-simple \
|
||||
rv64ui-pt-sll \
|
||||
rv64ui-pt-slli \
|
||||
rv64ui-pt-slliw \
|
||||
rv64ui-pt-sllw \
|
||||
rv64ui-pt-slt \
|
||||
rv64ui-pt-slti \
|
||||
rv64ui-pt-sltiu \
|
||||
rv64ui-pt-sltu \
|
||||
rv64ui-pt-sra \
|
||||
rv64ui-pt-srai \
|
||||
rv64ui-pt-sraiw \
|
||||
rv64ui-pt-sraw \
|
||||
rv64ui-pt-srliw \
|
||||
rv64ui-pt-srlw \
|
||||
rv64ui-pt-sub \
|
||||
rv64ui-pt-subw \
|
||||
rv64ui-pt-xor \
|
||||
rv64ui-pt-xori \
|
||||
rv64uf-pt-ldst \
|
||||
rv64uf-pt-move \
|
||||
rv64uf-pt-fsgnj \
|
||||
rv64uf-pt-fcmp \
|
||||
rv64uf-pt-fcvt \
|
||||
rv64uf-pt-fcvt_w \
|
||||
rv64uf-pt-fclass \
|
||||
rv64uf-pt-fadd \
|
||||
rv64uf-pt-fmin \
|
||||
rv64uf-pt-fmadd \
|
||||
rv64uf-pt-fdiv \
|
||||
rv64uf-pt-structural \
|
||||
|
||||
asm_v_tests = \
|
||||
rv64ui-v-add \
|
||||
@ -248,6 +361,7 @@ asm_v_tests = \
|
||||
rv64uf-v-fadd \
|
||||
rv64uf-v-fmin \
|
||||
rv64uf-v-fmadd \
|
||||
rv64uf-v-fdiv \
|
||||
rv64uf-v-structural \
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
@ -264,11 +378,11 @@ bmarks = \
|
||||
mm.riscv \
|
||||
dhrystone.riscv \
|
||||
spmv.riscv \
|
||||
mt-vvadd.riscv \
|
||||
mt-matmul.riscv \
|
||||
#vec-vvadd.riscv \
|
||||
#vec-cmplxmult.riscv \
|
||||
#vec-matmul.riscv \
|
||||
#mt-vvadd.riscv \
|
||||
#mt-matmul.riscv \
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# Multi-threaded Benchmark Tests
|
||||
@ -276,72 +390,55 @@ bmarks = \
|
||||
|
||||
test_mt_bmark_dir = $(base_dir)/riscv-tools/riscv-tests/mt
|
||||
mt_bmarks = \
|
||||
ab_matmul.riscv \
|
||||
ab_vvadd.riscv \
|
||||
ad_matmul.riscv \
|
||||
ad_vvadd.riscv \
|
||||
ae_matmul.riscv \
|
||||
ae_vvadd.riscv \
|
||||
af_matmul.riscv \
|
||||
af_vvadd.riscv \
|
||||
ag_matmul.riscv \
|
||||
ag_vvadd.riscv \
|
||||
ai_matmul.riscv \
|
||||
ai_vvadd.riscv \
|
||||
aj_vvadd.riscv \
|
||||
ak_matmul.riscv \
|
||||
ak_vvadd.riscv \
|
||||
al_matmul.riscv \
|
||||
al_vvadd.riscv \
|
||||
am_matmul.riscv \
|
||||
am_vvadd.riscv \
|
||||
an_matmul.riscv \
|
||||
ap_matmul.riscv \
|
||||
ap_vvadd.riscv \
|
||||
aq_matmul.riscv \
|
||||
aq_vvadd.riscv \
|
||||
ar_matmul.riscv \
|
||||
ar_vvadd.riscv \
|
||||
as_matmul.riscv \
|
||||
as_vvadd.riscv \
|
||||
at_matmul.riscv \
|
||||
at_vvadd.riscv \
|
||||
av_matmul.riscv \
|
||||
av_vvadd.riscv \
|
||||
ay_matmul.riscv \
|
||||
ay_vvadd.riscv \
|
||||
az_matmul.riscv \
|
||||
az_vvadd.riscv \
|
||||
ba_matmul.riscv \
|
||||
ba_vvadd.riscv \
|
||||
bb_matmul.riscv \
|
||||
bb_vvadd.riscv \
|
||||
bc_matmul.riscv \
|
||||
bc_vvadd.riscv \
|
||||
be_matmul.riscv \
|
||||
be_vvadd.riscv \
|
||||
bf_matmul.riscv \
|
||||
bf_vvadd.riscv \
|
||||
bh_matmul.riscv \
|
||||
bh_vvadd.riscv \
|
||||
bj_matmul.riscv \
|
||||
bj_vvadd.riscv \
|
||||
bk_matmul.riscv \
|
||||
bk_vvadd.riscv \
|
||||
bm_matmul.riscv \
|
||||
bm_vvadd.riscv \
|
||||
bn_matmul.riscv \
|
||||
bn_vvadd.riscv \
|
||||
bo_matmul.riscv \
|
||||
bo_vvadd.riscv \
|
||||
bp_matmul.riscv \
|
||||
bp_vvadd.riscv \
|
||||
br_matmul.riscv \
|
||||
br_vvadd.riscv \
|
||||
bs_matmul.riscv \
|
||||
bs_vvadd.riscv \
|
||||
bt_matmul.riscv \
|
||||
bt_vvadd.riscv \
|
||||
vvadd0.riscv \
|
||||
vvadd1.riscv \
|
||||
vvadd2.riscv \
|
||||
vvadd3.riscv \
|
||||
vvadd4.riscv \
|
||||
ad_matmul.riscv \
|
||||
ae_matmul.riscv \
|
||||
af_matmul.riscv \
|
||||
ag_matmul.riscv \
|
||||
ai_matmul.riscv \
|
||||
ak_matmul.riscv \
|
||||
al_matmul.riscv \
|
||||
am_matmul.riscv \
|
||||
an_matmul.riscv \
|
||||
ap_matmul.riscv \
|
||||
aq_matmul.riscv \
|
||||
ar_matmul.riscv \
|
||||
at_matmul.riscv \
|
||||
av_matmul.riscv \
|
||||
ay_matmul.riscv \
|
||||
az_matmul.riscv \
|
||||
bb_matmul.riscv \
|
||||
bc_matmul.riscv \
|
||||
bf_matmul.riscv \
|
||||
bh_matmul.riscv \
|
||||
bj_matmul.riscv \
|
||||
bk_matmul.riscv \
|
||||
bm_matmul.riscv \
|
||||
bo_matmul.riscv \
|
||||
br_matmul.riscv \
|
||||
bs_matmul.riscv \
|
||||
ce_matmul.riscv \
|
||||
cf_matmul.riscv \
|
||||
cg_matmul.riscv \
|
||||
ci_matmul.riscv \
|
||||
ck_matmul.riscv \
|
||||
cl_matmul.riscv \
|
||||
cm_matmul.riscv \
|
||||
cs_matmul.riscv \
|
||||
cv_matmul.riscv \
|
||||
cy_matmul.riscv \
|
||||
dc_matmul.riscv \
|
||||
df_matmul.riscv \
|
||||
dm_matmul.riscv \
|
||||
do_matmul.riscv \
|
||||
dr_matmul.riscv \
|
||||
ds_matmul.riscv \
|
||||
du_matmul.riscv \
|
||||
dv_matmul.riscv \
|
||||
|
||||
#--------------------------------------------------------------------
|
||||
# Build Tests
|
||||
@ -353,7 +450,7 @@ mt_bmarks = \
|
||||
%.riscv.hex: %
|
||||
$(MAKE) -C $(dir $@) $(notdir $@)
|
||||
|
||||
$(addprefix $(output_dir)/, $(addsuffix .hex, $(asm_p_tests) $(asm_v_tests))): $(output_dir)/%.hex: $(tests_isa_dir)/%.hex
|
||||
$(addprefix $(output_dir)/, $(addsuffix .hex, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests))): $(output_dir)/%.hex: $(tests_isa_dir)/%.hex
|
||||
mkdir -p $(output_dir)
|
||||
ln -fs $< $@
|
||||
|
||||
@ -364,3 +461,15 @@ $(addprefix $(output_dir)/, $(addsuffix .hex, $(bmarks))): $(output_dir)/%.hex:
|
||||
$(addprefix $(output_dir)/, $(addsuffix .hex, $(mt_bmarks))): $(output_dir)/%.hex: $(test_mt_bmark_dir)/%.hex
|
||||
mkdir -p $(output_dir)
|
||||
ln -fs $< $@
|
||||
|
||||
$(addprefix $(output_dir)/, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests)): $(output_dir)/%: $(tests_isa_dir)/%
|
||||
mkdir -p $(output_dir)
|
||||
ln -fs $< $@
|
||||
|
||||
$(addprefix $(output_dir)/, $(bmarks)): $(output_dir)/%: $(tests_bmark_dir)/%
|
||||
mkdir -p $(output_dir)
|
||||
ln -fs $< $@
|
||||
|
||||
$(addprefix $(output_dir)/, $(mt_bmarks)): $(output_dir)/%: $(test_mt_bmark_dir)/%
|
||||
mkdir -p $(output_dir)
|
||||
ln -fs $< $@
|
||||
|
2
chisel
2
chisel
@ -1 +1 @@
|
||||
Subproject commit 208ffc62bcefda287152873f3d496cb0091b7a25
|
||||
Subproject commit cc51775dda66db587b01fee4d29d22c49121fdb5
|
@ -61,7 +61,7 @@ int main(int argc, char** argv)
|
||||
// The chisel generated code
|
||||
Top_t tile;
|
||||
srand(random_seed);
|
||||
tile.init(random_seed != 0);
|
||||
tile.init(random_seed);
|
||||
|
||||
// Instantiate and initialize main memory
|
||||
mm_t* mm = dramsim2 ? (mm_t*)(new mm_dramsim2_t) : (mm_t*)(new mm_magic_t);
|
||||
@ -76,17 +76,17 @@ int main(int argc, char** argv)
|
||||
|
||||
signal(SIGTERM, handle_sigterm);
|
||||
|
||||
// reset for a few cycles to support pipelined reset
|
||||
// reset for one host_clk cycle to handle pipelined reset
|
||||
tile.Top__io_host_in_valid = LIT<1>(0);
|
||||
tile.Top__io_host_out_ready = LIT<1>(0);
|
||||
tile.Top__io_mem_backup_en = LIT<1>(0);
|
||||
for (int i = 0; i < 10; i++)
|
||||
tile.Top__io_mem_backup_ctrl_en = LIT<1>(0);
|
||||
for (int i = 0; i < 3; i += tile.Top__io_host_clk_edge.to_bool())
|
||||
{
|
||||
tile.clock_lo(LIT<1>(1));
|
||||
tile.clock_hi(LIT<1>(1));
|
||||
}
|
||||
|
||||
while (!htif->done() && trace_count < max_cycles)
|
||||
while (!htif->done() && trace_count < max_cycles && ret == 0)
|
||||
{
|
||||
tile.Top__io_mem_req_cmd_ready = LIT<1>(mm->req_cmd_ready());
|
||||
tile.Top__io_mem_req_data_ready = LIT<1>(mm->req_data_ready());
|
||||
@ -94,7 +94,13 @@ int main(int argc, char** argv)
|
||||
tile.Top__io_mem_resp_bits_tag = LIT<64>(mm->resp_tag());
|
||||
memcpy(tile.Top__io_mem_resp_bits_data.values, mm->resp_data(), tile.Top__io_mem_resp_bits_data.width()/8);
|
||||
|
||||
tile.clock_lo(LIT<1>(0));
|
||||
try {
|
||||
tile.clock_lo(LIT<1>(0));
|
||||
} catch (std::runtime_error& e) {
|
||||
max_cycles = trace_count; // terminate cleanly after this cycle
|
||||
ret = 1;
|
||||
std::cerr << e.what() << std::endl;
|
||||
}
|
||||
|
||||
mm->tick(
|
||||
tile.Top__io_mem_req_cmd_valid.lo_word(),
|
||||
@ -142,7 +148,7 @@ int main(int argc, char** argv)
|
||||
}
|
||||
else if (trace_count == max_cycles)
|
||||
{
|
||||
fprintf(stderr, "*** FAILED *** (timeout) after %lld cycles\n", (long long)trace_count);
|
||||
fprintf(stderr, "*** FAILED *** (timeout, seed %d) after %lld cycles\n", random_seed, (long long)trace_count);
|
||||
ret = 2;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ LDFLAGS := $(LDFLAGS) -L$(RISCV)/lib -Wl,-rpath,$(RISCV)/lib -L. -ldramsim -lfes
|
||||
OBJS := $(addsuffix .o,$(CXXSRCS) $(MODEL).$(CONFIG))
|
||||
DEBUG_OBJS := $(addsuffix .debug.o,$(CXXSRCS) $(MODEL).$(CONFIG))
|
||||
|
||||
CHISEL_ARGS := $(MODEL) --noIoDebug --backend c --configInstance $(PROJECT).$(CONFIG) --targetDir emulator/generated-src
|
||||
CHISEL_ARGS := $(MODEL) --W0W --noIoDebug --backend c --configInstance $(PROJECT).$(CONFIG) --compileInitializationUnoptimized --targetDir emulator/generated-src
|
||||
CHISEL_ARGS_DEBUG := $(CHISEL_ARGS)-debug --debug --vcd --ioDebug
|
||||
|
||||
generated-src/$(MODEL).$(CONFIG).h: $(chisel_srcs)
|
||||
@ -38,10 +38,10 @@ $(MODEL).$(CONFIG).debug.o: %.debug.o: generated-src-debug/%.h
|
||||
$(MAKE) -j $(patsubst %.cpp,%.o,$(shell ls generated-src-debug/$(MODEL).$(CONFIG)-*.cpp))
|
||||
$(LD) -r $(patsubst %.cpp,%.o,$(shell ls generated-src-debug/$(MODEL).$(CONFIG)-*.cpp)) -o $@
|
||||
|
||||
$(wildcard generated-src/*.o): %.o: %.cpp generated-src/$(MODEL).$(CONFIG).h
|
||||
generated-src/%.o: generated-src/%.cpp generated-src/$(MODEL).$(CONFIG).h
|
||||
$(CXX) $(CXXFLAGS) -Igenerated-src -c -o $@ $<
|
||||
|
||||
$(wildcard generated-src-debug/*.o): %.o: %.cpp generated-src-debug/$(MODEL).$(CONFIG).h
|
||||
generated-src-debug/%.o: generated-src-debug/%.cpp generated-src-debug/$(MODEL).$(CONFIG).h
|
||||
$(CXX) $(CXXFLAGS) -Igenerated-src-debug -c -o $@ $<
|
||||
|
||||
$(addsuffix .o,$(CXXSRCS)): %.o: $(base_dir)/csrc/%.cc $(base_dir)/csrc/*.h generated-src/$(MODEL).$(CONFIG).h
|
||||
@ -80,14 +80,14 @@ $(output_dir)/%.vpd: $(output_dir)/%.hex emulator-$(CONFIG)-debug
|
||||
vcd2vpd $@.vcd $@ > /dev/null &
|
||||
./emulator-$(CONFIG)-debug +dramsim +max-cycles=$(timeout_cycles) +verbose -v$@.vcd +loadmem=$< none $(disasm) $(patsubst %.vpd,%.out,$@) && [ $$PIPESTATUS -eq 0 ]
|
||||
|
||||
run-asm-tests: $(addprefix $(output_dir)/, $(addsuffix .out, $(asm_p_tests) $(asm_v_tests)))
|
||||
run-asm-tests: $(addprefix $(output_dir)/, $(addsuffix .out, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests)))
|
||||
@echo; perl -ne 'print " [$$1] $$ARGV \t$$2\n" if /\*{3}(.{8})\*{3}(.*)/' $^; echo;
|
||||
run-bmarks-test: $(addprefix $(output_dir)/, $(addsuffix .out, $(bmarks)))
|
||||
@echo; perl -ne 'print " [$$1] $$ARGV \t$$2\n" if /\*{3}(.{8})\*{3}(.*)/' $^; echo;
|
||||
run-mt-tests: $(addprefix $(output_dir)/, $(addsuffix .out, $(mt_bmarks)))
|
||||
@echo; perl -ne 'print " [$$1] $$ARGV \t$$2\n" if /\*{3}(.{8})\*{3}(.*)/' $^; echo;
|
||||
|
||||
run-asm-tests-debug: $(addprefix $(output_dir)/, $(addsuffix .vpd, $(asm_p_tests) $(asm_v_tests)))
|
||||
run-asm-tests-debug: $(addprefix $(output_dir)/, $(addsuffix .vpd, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests)))
|
||||
@echo; perl -ne 'print " [$$1] $$ARGV \t$$2\n" if /\*{3}(.{8})\*{3}(.*)/' $(patsubst %.vpd,%.out,$^); echo;
|
||||
run-bmarks-test-debug: $(addprefix $(output_dir)/, $(addsuffix .vpd, $(bmarks)))
|
||||
@echo; perl -ne 'print " [$$1] $$ARGV \t$$2\n" if /\*{3}(.{8})\*{3}(.*)/' $(patsubst %.vpd,%.out,$^); echo;
|
||||
|
@ -56,6 +56,7 @@ simv = $(sim_dir)/simv-$(CONFIG)
|
||||
$(simv) : $(sim_vsrcs) $(sim_csrcs) $(sim_dir)/libdramsim.a
|
||||
cd $(sim_dir) && \
|
||||
$(VCS) $(VCS_OPTS) -o $(simv) \
|
||||
-debug_pp \
|
||||
|
||||
simv_debug = $(sim_dir)/simv-$(CONFIG)-debug
|
||||
$(simv_debug) : $(sim_vsrcs) $(sim_csrcs) $(sim_dir)/libdramsim.a
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 319d8d08fb074d83a3a11b39fbd1c9b2a4fba46f
|
||||
Subproject commit e2e06ff615d031be6c7d696b52718ca16cd9c87b
|
5
project/plugins.sbt
Normal file
5
project/plugins.sbt
Normal file
@ -0,0 +1,5 @@
|
||||
resolvers += "jgit-repo" at "http://download.eclipse.org/jgit/maven"
|
||||
|
||||
addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.5.3")
|
||||
|
||||
addSbtPlugin("com.typesafe.sbt" % "sbt-site" % "0.8.1")
|
31
regression.sh
Executable file
31
regression.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#! /bin/bash
|
||||
#
|
||||
# See LICENSE for license details.
|
||||
|
||||
# Script to setup submodules, build rocket-chip, and run asm tests, and optionally run torture
|
||||
|
||||
echo "Starting Rocket-chip regression test"
|
||||
if [ $# -lt 1 ]
|
||||
then
|
||||
echo "Usage: ./regression.sh config [torture_config] [torture_output_dir]"
|
||||
exit
|
||||
fi
|
||||
|
||||
git submodule update --init --recursive riscv-tools
|
||||
export RISCV="$(pwd)/install"; export PATH=$PATH:$RISCV/bin
|
||||
cd riscv-tools; ./build.sh; cd ..
|
||||
git submodule update --init
|
||||
git submodule status --recursive
|
||||
cd emulator; make CONFIG=$1 run-asm-tests; cd ..
|
||||
if [ $# -ge 2 ]
|
||||
then
|
||||
git clone git@github.com:ucb-bar/riscv-torture.git
|
||||
cd riscv-torture; git submodule update --init;
|
||||
if [ $# -eq 3 ]
|
||||
then
|
||||
make cnight RTL_CONFIG=$1 OPTIONS="-C $2 -p $3 -m 30 -t 10"
|
||||
else
|
||||
make cnight RTL_CONFIG=$1 OPTIONS="-C $2 -m 30 -t 10"
|
||||
fi
|
||||
fi
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 99c6c32bc67c363ee5d5fa714e4122a8a45cbb91
|
||||
Subproject commit 8a16e3481018623bc954caeba67e2f532db5f9a9
|
2
rocket
2
rocket
@ -1 +1 @@
|
||||
Subproject commit 8449254fbdde0a1380c16eb31cf1e573bd69aae2
|
||||
Subproject commit 4ad41b4b63ac14989b70bffb651491737bb0d4e8
|
@ -4,88 +4,133 @@ package rocketchip
|
||||
|
||||
import Chisel._
|
||||
import uncore._
|
||||
import scala.reflect._
|
||||
import scala.reflect.runtime.universe._
|
||||
|
||||
object TileLinkHeaderOverwriter {
|
||||
def apply[T <: ClientSourcedMessage](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, passThrough: Boolean): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = in.clone.asDirectionless
|
||||
out.bits.payload := in.bits.payload
|
||||
out.bits.header.src := UInt(clientId)
|
||||
out.bits.header.dst := (if(passThrough) in.bits.header.dst else UInt(0))
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
/** RocketChipNetworks combine a TileLink protocol with a particular physical
|
||||
* network implementation and chip layout.
|
||||
*
|
||||
* Specifically, they provide mappings between ClientTileLinkIO/
|
||||
* ManagerTileLinkIO channels and LogicalNetwork ports (i.e. generic
|
||||
* TileLinkIO with networking headers). Channels coming into the network have
|
||||
* appropriate networking headers appended and outgoing channels have their
|
||||
* headers stripped.
|
||||
*
|
||||
* @constructor base class constructor for Rocket NoC
|
||||
* @param addrToManagerId a mapping from a physical address to the network
|
||||
* id of a coherence manager
|
||||
* @param sharerToClientId a mapping from the id of a particular coherent
|
||||
* client (as determined by e.g. the directory) and the network id
|
||||
* of that client
|
||||
* @param clientDepths the depths of the queue that should be used to buffer
|
||||
* each channel on the client side of the network
|
||||
* @param managerDepths the depths of the queue that should be used to buffer
|
||||
* each channel on the manager side of the network
|
||||
*/
|
||||
abstract class RocketChipNetwork(
|
||||
addrToManagerId: UInt => UInt,
|
||||
sharerToClientId: UInt => UInt,
|
||||
clientDepths: TileLinkDepths,
|
||||
managerDepths: TileLinkDepths) extends TLModule {
|
||||
val nClients = params(TLNClients)
|
||||
val nManagers = params(TLNManagers)
|
||||
val io = new Bundle {
|
||||
val clients = Vec.fill(nClients){new ClientTileLinkIO}.flip
|
||||
val managers = Vec.fill(nManagers){new ManagerTileLinkIO}.flip
|
||||
}
|
||||
def apply[T <: ClientSourcedMessage with HasPhysicalAddress](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: UInt => UInt): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out: DecoupledIO[LogicalNetworkIO[T]] = apply(in, clientId, false)
|
||||
out.bits.header.dst := (if(nBanks > 1) addrConvert(in.bits.payload.addr) else UInt(0))
|
||||
out
|
||||
|
||||
val clients = io.clients.zipWithIndex.map {
|
||||
case (c, i) => {
|
||||
val p = Module(new ClientTileLinkNetworkPort(i, addrToManagerId))
|
||||
val q = Module(new TileLinkEnqueuer(clientDepths))
|
||||
p.io.client <> c
|
||||
q.io.client <> p.io.network
|
||||
q.io.manager
|
||||
}
|
||||
}
|
||||
|
||||
val managers = io.managers.zipWithIndex.map {
|
||||
case (m, i) => {
|
||||
val p = Module(new ManagerTileLinkNetworkPort(i, sharerToClientId))
|
||||
val q = Module(new TileLinkEnqueuer(managerDepths))
|
||||
m <> p.io.manager
|
||||
p.io.network <> q.io.manager
|
||||
q.io.client
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class RocketChipCrossbarNetwork extends LogicalNetwork {
|
||||
val io = new Bundle {
|
||||
val clients = Vec.fill(params(LNClients)){(new TileLinkIO).flip}
|
||||
val masters = Vec.fill(params(LNMasters)){new TileLinkIO}
|
||||
/** A simple arbiter for each channel that also deals with header-based routing.
|
||||
* Assumes a single manager agent. */
|
||||
class RocketChipTileLinkArbiter(
|
||||
sharerToClientId: UInt => UInt = (u: UInt) => u,
|
||||
clientDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0),
|
||||
managerDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0))
|
||||
extends RocketChipNetwork(u => UInt(0), sharerToClientId, clientDepths, managerDepths)
|
||||
with TileLinkArbiterLike
|
||||
with PassesId {
|
||||
val arbN = nClients
|
||||
require(nManagers == 1)
|
||||
if(arbN > 1) {
|
||||
hookupClientSource(clients.map(_.acquire), managers.head.acquire)
|
||||
hookupClientSource(clients.map(_.release), managers.head.release)
|
||||
hookupFinish(clients.map(_.finish), managers.head.finish)
|
||||
hookupManagerSourceWithHeader(clients.map(_.probe), managers.head.probe)
|
||||
hookupManagerSourceWithHeader(clients.map(_.grant), managers.head.grant)
|
||||
} else {
|
||||
managers.head <> clients.head
|
||||
}
|
||||
}
|
||||
|
||||
/** Provides a separate physical crossbar for each channel. Assumes multiple manager
|
||||
* agents. Managers are assigned to higher physical network port ids than
|
||||
* clients, and translations between logical network id and physical crossbar
|
||||
* port id are done automatically.
|
||||
*/
|
||||
class RocketChipTileLinkCrossbar(
|
||||
addrToManagerId: UInt => UInt = u => UInt(0),
|
||||
sharerToClientId: UInt => UInt = u => u,
|
||||
clientDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0),
|
||||
managerDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0))
|
||||
extends RocketChipNetwork(addrToManagerId, sharerToClientId, clientDepths, managerDepths) {
|
||||
val n = params(LNEndpoints)
|
||||
val count = params(TLDataBeats)
|
||||
// Actually instantiate the particular networks required for TileLink
|
||||
val acqNet = Module(new BasicCrossbar(n, new Acquire))
|
||||
val relNet = Module(new BasicCrossbar(n, new Release))
|
||||
val acqNet = Module(new BasicCrossbar(n, new Acquire, count, Some((a: PhysicalNetworkIO[Acquire]) => a.payload.hasMultibeatData())))
|
||||
val relNet = Module(new BasicCrossbar(n, new Release, count, Some((r: PhysicalNetworkIO[Release]) => r.payload.hasMultibeatData())))
|
||||
val prbNet = Module(new BasicCrossbar(n, new Probe))
|
||||
val gntNet = Module(new BasicCrossbar(n, new Grant))
|
||||
val gntNet = Module(new BasicCrossbar(n, new Grant, count, Some((g: PhysicalNetworkIO[Grant]) => g.payload.hasMultibeatData())))
|
||||
val ackNet = Module(new BasicCrossbar(n, new Finish))
|
||||
|
||||
// Aliases for the various network IO bundle types
|
||||
type FBCIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]]
|
||||
type FLNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]]
|
||||
type FromCrossbar[T <: Data] = FBCIO[T] => FLNIO[T]
|
||||
type ToCrossbar[T <: Data] = FLNIO[T] => FBCIO[T]
|
||||
type PNIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]]
|
||||
type LNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]]
|
||||
type FromCrossbar[T <: Data] = PNIO[T] => LNIO[T]
|
||||
type ToCrossbar[T <: Data] = LNIO[T] => PNIO[T]
|
||||
|
||||
// Shims for converting between logical network IOs and physical network IOs
|
||||
//TODO: Could be less verbose if you could override subbundles after a <>
|
||||
def DefaultFromCrossbarShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||
val out = Decoupled(new LogicalNetworkIO(in.bits.payload)).asDirectionless
|
||||
out.bits.header := in.bits.header
|
||||
out.bits.payload := in.bits.payload
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
def crossbarToManagerShim[T <: Data](in: PNIO[T]): LNIO[T] = {
|
||||
val out = DefaultFromPhysicalShim(in)
|
||||
out.bits.header.src := in.bits.header.src - UInt(nManagers)
|
||||
out
|
||||
}
|
||||
def CrossbarToMasterShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||
val out = DefaultFromCrossbarShim(in)
|
||||
out.bits.header.src := in.bits.header.src - UInt(params(LNMasters))
|
||||
def crossbarToClientShim[T <: Data](in: PNIO[T]): LNIO[T] = {
|
||||
val out = DefaultFromPhysicalShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst - UInt(nManagers)
|
||||
out
|
||||
}
|
||||
def CrossbarToClientShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||
val out = DefaultFromCrossbarShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst - UInt(params(LNMasters))
|
||||
def managerToCrossbarShim[T <: Data](in: LNIO[T]): PNIO[T] = {
|
||||
val out = DefaultToPhysicalShim(n, in)
|
||||
out.bits.header.dst := in.bits.header.dst + UInt(nManagers)
|
||||
out
|
||||
}
|
||||
def DefaultToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = Decoupled(new PhysicalNetworkIO(n,in.bits.payload)).asDirectionless
|
||||
out.bits.header := in.bits.header
|
||||
out.bits.payload := in.bits.payload
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
def MasterToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = DefaultToCrossbarShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst + UInt(params(LNMasters))
|
||||
out
|
||||
}
|
||||
def ClientToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = DefaultToCrossbarShim(in)
|
||||
out.bits.header.src := in.bits.header.src + UInt(params(LNMasters))
|
||||
def clientToCrossbarShim[T <: Data](in: LNIO[T]): PNIO[T] = {
|
||||
val out = DefaultToPhysicalShim(n, in)
|
||||
out.bits.header.src := in.bits.header.src + UInt(nManagers)
|
||||
out
|
||||
}
|
||||
|
||||
// Make an individual connection between virtual and physical ports using
|
||||
// a particular shim. Also seal the unused FIFO control signal.
|
||||
def doFIFOInputHookup[T <: Data](phys_in: FBCIO[T], phys_out: FBCIO[T], log_io: FLNIO[T], shim: ToCrossbar[T]) = {
|
||||
// a particular shim. Also pin the unused Decoupled control signal low.
|
||||
def doDecoupledInputHookup[T <: Data](phys_in: PNIO[T], phys_out: PNIO[T], log_io: LNIO[T], shim: ToCrossbar[T]) = {
|
||||
val s = shim(log_io)
|
||||
phys_in.valid := s.valid
|
||||
phys_in.bits := s.bits
|
||||
@ -93,7 +138,7 @@ class RocketChipCrossbarNetwork extends LogicalNetwork {
|
||||
phys_out.ready := Bool(false)
|
||||
}
|
||||
|
||||
def doFIFOOutputHookup[T <: Data](phys_in: FBCIO[T], phys_out: FBCIO[T], log_io: FLNIO[T], shim: FromCrossbar[T]) = {
|
||||
def doDecoupledOutputHookup[T <: Data](phys_in: PNIO[T], phys_out: PNIO[T], log_io: LNIO[T], shim: FromCrossbar[T]) = {
|
||||
val s = shim(phys_out)
|
||||
log_io.valid := s.valid
|
||||
log_io.bits := s.bits
|
||||
@ -101,29 +146,31 @@ class RocketChipCrossbarNetwork extends LogicalNetwork {
|
||||
phys_in.valid := Bool(false)
|
||||
}
|
||||
|
||||
def doFIFOHookup[T <: Data](isEndpointSourceOfMessage: Boolean, physIn: FBCIO[T], physOut: FBCIO[T], logIO: FLNIO[T], inShim: ToCrossbar[T], outShim: FromCrossbar[T]) = {
|
||||
if(isEndpointSourceOfMessage) doFIFOInputHookup(physIn, physOut, logIO, inShim)
|
||||
else doFIFOOutputHookup(physIn, physOut, logIO, outShim)
|
||||
}
|
||||
|
||||
//Hookup all instances of a particular subbundle of TileLink
|
||||
def doFIFOHookups[T <: Data: TypeTag](physIO: BasicCrossbarIO[T], getLogIO: TileLinkIO => FLNIO[T]) = {
|
||||
typeTag[T].tpe match{
|
||||
case t if t <:< typeTag[ClientSourcedMessage].tpe => {
|
||||
io.masters.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](false, physIO.in(id), physIO.out(id), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](true, physIO.in(id+params(LNMasters)), physIO.out(id+params(LNMasters)), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim) }
|
||||
def doDecoupledHookups[T <: Data](physIO: BasicCrossbarIO[T], getLogIO: TileLinkIO => LNIO[T]) = {
|
||||
physIO.in.head.bits.payload match {
|
||||
case c: ClientToManagerChannel => {
|
||||
managers.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledOutputHookup(physIO.in(id), physIO.out(id), getLogIO(i), crossbarToManagerShim[T])
|
||||
}
|
||||
clients.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledInputHookup(physIO.in(id+nManagers), physIO.out(id+nManagers), getLogIO(i), clientToCrossbarShim[T])
|
||||
}
|
||||
}
|
||||
case t if t <:< typeTag[MasterSourcedMessage].tpe => {
|
||||
io.masters.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](true, physIO.in(id), physIO.out(id), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](false, physIO.in(id+params(LNMasters)), physIO.out(id+params(LNMasters)), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim) }
|
||||
case m: ManagerToClientChannel => {
|
||||
managers.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledInputHookup(physIO.in(id), physIO.out(id), getLogIO(i), managerToCrossbarShim[T])
|
||||
}
|
||||
clients.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledOutputHookup(physIO.in(id+nManagers), physIO.out(id+nManagers), getLogIO(i), crossbarToClientShim[T])
|
||||
}
|
||||
}
|
||||
case _ => require(false, "Unknown message sourcing.")
|
||||
}
|
||||
}
|
||||
|
||||
doFIFOHookups(acqNet.io, (tl: TileLinkIO) => tl.acquire)
|
||||
doFIFOHookups(relNet.io, (tl: TileLinkIO) => tl.release)
|
||||
doFIFOHookups(prbNet.io, (tl: TileLinkIO) => tl.probe)
|
||||
doFIFOHookups(gntNet.io, (tl: TileLinkIO) => tl.grant)
|
||||
doFIFOHookups(ackNet.io, (tl: TileLinkIO) => tl.finish)
|
||||
doDecoupledHookups(acqNet.io, (tl: TileLinkIO) => tl.acquire)
|
||||
doDecoupledHookups(relNet.io, (tl: TileLinkIO) => tl.release)
|
||||
doDecoupledHookups(prbNet.io, (tl: TileLinkIO) => tl.probe)
|
||||
doDecoupledHookups(gntNet.io, (tl: TileLinkIO) => tl.grant)
|
||||
doDecoupledHookups(ackNet.io, (tl: TileLinkIO) => tl.finish)
|
||||
}
|
||||
|
@ -6,14 +6,13 @@ import Chisel._
|
||||
import uncore._
|
||||
import rocket._
|
||||
import rocket.Util._
|
||||
import scala.math.max
|
||||
|
||||
class DefaultConfig extends ChiselConfig (
|
||||
topDefinitions = { (pname,site,here) =>
|
||||
type PF = PartialFunction[Any,Any]
|
||||
def findBy(sname:Any):Any = here[PF](site[Any](sname))(pname)
|
||||
pname match {
|
||||
//RocketChip Parameters
|
||||
case BuildTile => (r:Bool) => {new RocketTile(resetSignal = r)}
|
||||
//HTIF Parameters
|
||||
case HTIFWidth => Dump("HTIF_WIDTH", 16)
|
||||
case HTIFNSCR => 64
|
||||
@ -21,121 +20,165 @@ class DefaultConfig extends ChiselConfig (
|
||||
case HTIFNCores => site(NTiles)
|
||||
//Memory Parameters
|
||||
case PAddrBits => 32
|
||||
case VAddrBits => 43
|
||||
case PgIdxBits => 13
|
||||
case ASIdBits => 7
|
||||
case PermBits => 6
|
||||
case PgIdxBits => 12
|
||||
case PgLevels => if (site(XLen) == 64) 3 /* Sv39 */ else 2 /* Sv32 */
|
||||
case PgLevelBits => site(PgIdxBits) - log2Up(site(XLen)/8)
|
||||
case VPNBits => site(PgLevels) * site(PgLevelBits)
|
||||
case PPNBits => site(PAddrBits) - site(PgIdxBits)
|
||||
case VPNBits => site(VAddrBits) - site(PgIdxBits)
|
||||
case MIFTagBits => Dump("MEM_TAG_BITS", 5)
|
||||
case VAddrBits => site(VPNBits) + site(PgIdxBits)
|
||||
case ASIdBits => 7
|
||||
case MIFTagBits => Dump("MEM_TAG_BITS",
|
||||
log2Up(site(NAcquireTransactors)+2) +
|
||||
log2Up(site(NBanksPerMemoryChannel)) +
|
||||
log2Up(site(NMemoryChannels)) + /* TODO: Remove for multichannel Top */
|
||||
1)
|
||||
case MIFDataBits => Dump("MEM_DATA_BITS", 128)
|
||||
case MIFAddrBits => Dump("MEM_ADDR_BITS", site(PAddrBits) - site(CacheBlockOffsetBits))
|
||||
case MIFDataBeats => site(TLDataBits)/site(MIFDataBits)
|
||||
case MIFDataBeats => site(TLDataBits)*site(TLDataBeats)/site(MIFDataBits)
|
||||
case NASTIDataBits => site(MIFDataBits)
|
||||
case NASTIAddrBits => site(MIFAddrBits)
|
||||
case NASTIIdBits => site(MIFTagBits)
|
||||
case UseNASTI => false
|
||||
//Params used by all caches
|
||||
case NSets => findBy(CacheName)
|
||||
case NWays => findBy(CacheName)
|
||||
case RowBits => findBy(CacheName)
|
||||
case BlockOffBits => findBy(CacheName)
|
||||
case NTLBEntries => findBy(CacheName)
|
||||
case "L1I" => {
|
||||
case NSets => Knob("L1I_SETS") //128
|
||||
case NWays => Knob("L1I_WAYS") //2
|
||||
case NSets => Knob("L1I_SETS") //64
|
||||
case NWays => Knob("L1I_WAYS") //4
|
||||
case RowBits => 4*site(CoreInstBits)
|
||||
case BlockOffBits => log2Up(site(TLDataBits)/8)
|
||||
case NTLBEntries => 8
|
||||
}:PF
|
||||
case "L1D" => {
|
||||
case NSets => Knob("L1D_SETS") //128
|
||||
case NSets => Knob("L1D_SETS") //64
|
||||
case NWays => Knob("L1D_WAYS") //4
|
||||
case RowBits => 2*site(CoreDataBits)
|
||||
case BlockOffBits => log2Up(site(TLDataBits)/8)
|
||||
}:PF
|
||||
case "L2" => {
|
||||
case NSets => 512
|
||||
case NWays => 8
|
||||
case RowBits => site(TLDataBits)
|
||||
case BlockOffBits => 0
|
||||
case NTLBEntries => 8
|
||||
}:PF
|
||||
case ECCCode => None
|
||||
case WordBits => site(XprLen)
|
||||
case Replacer => () => new RandomReplacement(site(NWays))
|
||||
case AmoAluOperandBits => site(XLen)
|
||||
//L1InstCache
|
||||
case NITLBEntries => 8
|
||||
case NBTBEntries => 62
|
||||
case NRAS => 2
|
||||
//L1DataCache
|
||||
case NDTLBEntries => 8
|
||||
case WordBits => site(XLen)
|
||||
case StoreDataQueueDepth => 17
|
||||
case ReplayQueueDepth => 16
|
||||
case NMSHRs => Knob("L1D_MSHRS")
|
||||
case LRSCCycles => 32
|
||||
//L2CacheParams
|
||||
case NReleaseTransactors => Knob("L2_REL_XACTS")
|
||||
case NAcquireTransactors => Knob("L2_ACQ_XACTS")
|
||||
case NClients => site(NTiles) + 1
|
||||
//L2 Memory System Params
|
||||
case NAcquireTransactors => 7
|
||||
case L2StoreDataQueueDepth => 1
|
||||
case L2DirectoryRepresentation => new NullRepresentation(site(TLNCachingClients))
|
||||
case BuildL2CoherenceManager => () =>
|
||||
Module(new L2BroadcastHub, { case InnerTLId => "L1ToL2"; case OuterTLId => "L2ToMC" })
|
||||
//Tile Constants
|
||||
case BuildTiles =>
|
||||
List.fill(site(NTiles)){ (r:Bool) => Module(new RocketTile(resetSignal = r), {case TLId => "L1ToL2"}) }
|
||||
case BuildRoCC => None
|
||||
case NDCachePorts => 2 + (if(site(BuildRoCC).isEmpty) 0 else 1)
|
||||
case NTilePorts => 2 + (if(site(BuildRoCC).isEmpty) 0 else 1)
|
||||
case NPTWPorts => 2 + (if(site(BuildRoCC).isEmpty) 0 else 3)
|
||||
//Rocket Core Constants
|
||||
case FetchWidth => 1
|
||||
case RetireWidth => 1
|
||||
case UseVM => true
|
||||
case FastLoadWord => true
|
||||
case FastLoadByte => false
|
||||
case FastMulDiv => true
|
||||
case XprLen => 64
|
||||
case XLen => 64
|
||||
case NMultXpr => 32
|
||||
case BuildFPU => Some(() => Module(new FPU))
|
||||
case FDivSqrt => true
|
||||
case SFMALatency => 2
|
||||
case DFMALatency => 3
|
||||
case CoreInstBits => 32
|
||||
case CoreDataBits => site(XprLen)
|
||||
case CoreDataBits => site(XLen)
|
||||
case CoreDCacheReqTagBits => 7 + log2Up(here(NDCachePorts))
|
||||
case NCustomMRWCSRs => 0
|
||||
//Uncore Paramters
|
||||
case LNMasters => site(NBanks)
|
||||
case LNClients => site(NTiles)+1
|
||||
case LNEndpoints => site(LNMasters) + site(LNClients)
|
||||
case TLId => "inner"
|
||||
case TLCoherence => site(Coherence)
|
||||
case TLAddrBits => site(PAddrBits) - site(CacheBlockOffsetBits)
|
||||
case TLMasterXactIdBits => site(TLId) match {
|
||||
case "inner" => log2Up(site(NReleaseTransactors)+site(NAcquireTransactors))
|
||||
case "outer" => 1
|
||||
}
|
||||
case TLClientXactIdBits => site(TLId) match {
|
||||
case "inner" => log2Up(site(NMSHRs))+log2Up(site(NTilePorts))
|
||||
case "outer" => log2Up(site(NReleaseTransactors)+site(NAcquireTransactors))
|
||||
}
|
||||
case TLDataBits => site(CacheBlockBytes)*8
|
||||
case TLWriteMaskBits => 6
|
||||
case TLWordAddrBits => 3
|
||||
case TLAtomicOpBits => 4
|
||||
case LNEndpoints => site(TLNManagers) + site(TLNClients)
|
||||
case LNHeaderBits => log2Ceil(site(TLNManagers)) + log2Up(site(TLNClients))
|
||||
case TLBlockAddrBits => site(PAddrBits) - site(CacheBlockOffsetBits)
|
||||
case TLNClients => site(TLNCachingClients) + site(TLNCachelessClients)
|
||||
case TLDataBits => site(CacheBlockBytes)*8/site(TLDataBeats)
|
||||
case TLDataBeats => 4
|
||||
case TLNetworkIsOrderedP2P => false
|
||||
case TLNManagers => findBy(TLId)
|
||||
case TLNCachingClients => findBy(TLId)
|
||||
case TLNCachelessClients => findBy(TLId)
|
||||
case TLCoherencePolicy => findBy(TLId)
|
||||
case TLMaxManagerXacts => findBy(TLId)
|
||||
case TLMaxClientXacts => findBy(TLId)
|
||||
case TLMaxClientsPerPort => findBy(TLId)
|
||||
case "L1ToL2" => {
|
||||
case TLNManagers => site(NBanksPerMemoryChannel)*site(NMemoryChannels)
|
||||
case TLNCachingClients => site(NTiles)
|
||||
case TLNCachelessClients => site(NTiles) + 1
|
||||
case TLCoherencePolicy => new MESICoherence(site(L2DirectoryRepresentation))
|
||||
case TLMaxManagerXacts => site(NAcquireTransactors) + 2
|
||||
case TLMaxClientXacts => max(site(NMSHRs),
|
||||
if(site(BuildRoCC).isEmpty) 1
|
||||
else site(RoCCMaxTaggedMemXacts))
|
||||
case TLMaxClientsPerPort => if(site(BuildRoCC).isEmpty) 1 else 3
|
||||
}:PF
|
||||
case "L2ToMC" => {
|
||||
case TLNManagers => 1
|
||||
case TLNCachingClients => site(NBanksPerMemoryChannel)
|
||||
case TLNCachelessClients => 0
|
||||
case TLCoherencePolicy => new MEICoherence(new NullRepresentation(site(NBanksPerMemoryChannel)))
|
||||
case TLMaxManagerXacts => 1
|
||||
case TLMaxClientXacts => 1
|
||||
case TLMaxClientsPerPort => site(NAcquireTransactors) + 2
|
||||
}:PF
|
||||
case NTiles => Knob("NTILES")
|
||||
case NBanks => Knob("NBANKS")
|
||||
case NOutstandingMemReqs => 2*site(NBanks)*(site(NReleaseTransactors)+site(NAcquireTransactors))
|
||||
case BankIdLSB => 5
|
||||
case NMemoryChannels => 1
|
||||
case NBanksPerMemoryChannel => Knob("NBANKS")
|
||||
case NOutstandingMemReqsPerChannel => site(NBanksPerMemoryChannel)*(site(NAcquireTransactors)+2)
|
||||
case BankIdLSB => 0
|
||||
case CacheBlockBytes => 64
|
||||
case CacheBlockOffsetBits => log2Up(here(CacheBlockBytes))
|
||||
case UseBackupMemoryPort => true
|
||||
case BuildCoherenceMaster => (id: Int) => {
|
||||
Module(new L2CoherenceAgent(id, "inner", "outer"), { case CacheName => "L2" })
|
||||
}
|
||||
case Coherence => new MSICoherence(() => new NullRepresentation)
|
||||
}},
|
||||
knobValues = {
|
||||
case "NTILES" => 1
|
||||
case "NBANKS" => 1
|
||||
case "L2_REL_XACTS" => 1
|
||||
case "L2_ACQ_XACTS" => 7
|
||||
case "L1D_MSHRS" => 2
|
||||
case "L1D_SETS" => 128
|
||||
case "L1D_SETS" => 64
|
||||
case "L1D_WAYS" => 4
|
||||
case "L1I_SETS" => 128
|
||||
case "L1I_WAYS" => 2
|
||||
case "L1I_SETS" => 64
|
||||
case "L1I_WAYS" => 4
|
||||
}
|
||||
)
|
||||
class DefaultVLSIConfig extends DefaultConfig
|
||||
class DefaultCPPConfig extends DefaultConfig
|
||||
|
||||
class WithL2Cache extends ChiselConfig(
|
||||
(pname,site,here) => pname match {
|
||||
case "L2_CAPACITY_IN_KB" => Knob("L2_CAPACITY_IN_KB")
|
||||
case "L2Bank" => {
|
||||
case NSets => (((here[Int]("L2_CAPACITY_IN_KB")*1024) /
|
||||
site(CacheBlockBytes)) /
|
||||
site(NBanksPerMemoryChannel)*site(NMemoryChannels)) /
|
||||
site(NWays)
|
||||
case NWays => Knob("L2_WAYS")
|
||||
case RowBits => site(TLDataBits)
|
||||
}: PartialFunction[Any,Any]
|
||||
case NAcquireTransactors => 2
|
||||
case NSecondaryMisses => 4
|
||||
case L2DirectoryRepresentation => new FullRepresentation(site(TLNCachingClients))
|
||||
case BuildL2CoherenceManager => () =>
|
||||
Module(new L2HellaCacheBank, {
|
||||
case CacheName => "L2Bank"
|
||||
case InnerTLId => "L1ToL2"
|
||||
case OuterTLId => "L2ToMC"})
|
||||
},
|
||||
knobValues = { case "L2_WAYS" => 8; case "L2_CAPACITY_IN_KB" => 2048 }
|
||||
)
|
||||
|
||||
class DefaultL2Config extends ChiselConfig(new WithL2Cache ++ new DefaultConfig)
|
||||
|
||||
class FPGAConfig extends ChiselConfig (
|
||||
(pname,site,here) => pname match {
|
||||
case UseBackupMemoryPort => false
|
||||
@ -148,9 +191,8 @@ class SmallConfig extends ChiselConfig (
|
||||
topDefinitions = { (pname,site,here) => pname match {
|
||||
case BuildFPU => None
|
||||
case FastMulDiv => false
|
||||
case NITLBEntries => 4
|
||||
case NTLBEntries => 4
|
||||
case NBTBEntries => 8
|
||||
case NDTLBEntries => 4
|
||||
}},
|
||||
knobValues = {
|
||||
case "L1D_SETS" => 64
|
||||
|
@ -7,157 +7,200 @@ import uncore._
|
||||
import rocket._
|
||||
import rocket.Util._
|
||||
|
||||
case object NTiles extends Field[Int]
|
||||
case object NBanks extends Field[Int]
|
||||
case object NOutstandingMemReqs extends Field[Int]
|
||||
case object BankIdLSB extends Field[Int]
|
||||
case object CacheBlockBytes extends Field[Int]
|
||||
case object CacheBlockOffsetBits extends Field[Int]
|
||||
case object UseBackupMemoryPort extends Field[Boolean]
|
||||
case object Coherence extends Field[CoherencePolicyWithUncached]
|
||||
case object BuildCoherenceMaster extends Field[(Int) => CoherenceAgent]
|
||||
case object BuildTile extends Field[(Bool)=>Tile]
|
||||
/** Top-level parameters of RocketChip, values set in e.g. PublicConfigs.scala */
|
||||
|
||||
abstract trait TopLevelParameters extends UsesParameters {
|
||||
/** Number of tiles */
|
||||
case object NTiles extends Field[Int]
|
||||
/** Number of memory channels */
|
||||
case object NMemoryChannels extends Field[Int]
|
||||
/** Number of banks per memory channel */
|
||||
case object NBanksPerMemoryChannel extends Field[Int]
|
||||
/** Least significant bit of address used for bank partitioning */
|
||||
case object BankIdLSB extends Field[Int]
|
||||
/** Number of outstanding memory requests */
|
||||
case object NOutstandingMemReqsPerChannel extends Field[Int]
|
||||
/** Whether to use the slow backup memory port [VLSI] */
|
||||
case object UseBackupMemoryPort extends Field[Boolean]
|
||||
/** Function for building some kind of coherence manager agent */
|
||||
case object BuildL2CoherenceManager extends Field[() => CoherenceAgent]
|
||||
/** Function for building some kind of tile connected to a reset signal */
|
||||
case object BuildTiles extends Field[Seq[(Bool) => Tile]]
|
||||
/** Which protocol to use to talk to memory/devices */
|
||||
case object UseNASTI extends Field[Boolean]
|
||||
|
||||
/** Utility trait for quick access to some relevant parameters */
|
||||
trait TopLevelParameters extends UsesParameters {
|
||||
val htifW = params(HTIFWidth)
|
||||
val nTiles = params(NTiles)
|
||||
val nBanks = params(NBanks)
|
||||
val nMemChannels = params(NMemoryChannels)
|
||||
val nBanksPerMemChannel = params(NBanksPerMemoryChannel)
|
||||
val nBanks = nMemChannels*nBanksPerMemChannel
|
||||
val lsb = params(BankIdLSB)
|
||||
val refillCycles = params(MIFDataBeats)
|
||||
val nMemReqs = params(NOutstandingMemReqsPerChannel)
|
||||
val mifAddrBits = params(MIFAddrBits)
|
||||
val mifDataBeats = params(MIFDataBeats)
|
||||
require(lsb + log2Up(nBanks) < mifAddrBits)
|
||||
}
|
||||
|
||||
class OuterMemorySystem extends Module with TopLevelParameters {
|
||||
val io = new Bundle {
|
||||
val tiles = Vec.fill(params(NTiles)){new TileLinkIO}.flip
|
||||
val htif = (new TileLinkIO).flip
|
||||
val incoherent = Vec.fill(params(LNClients)){Bool()}.asInput
|
||||
val mem = new MemIO
|
||||
val mem_backup = new MemSerializedIO(params(HTIFWidth))
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
}
|
||||
|
||||
// Create a simple NoC and points of coherence serialization
|
||||
val net = Module(new RocketChipCrossbarNetwork)
|
||||
val masterEndpoints = (0 until params(NBanks)).map(params(BuildCoherenceMaster))
|
||||
net.io.clients zip (io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
||||
net.io.masters zip (masterEndpoints.map(_.io.inner)) map { case (net, end) => net <> end }
|
||||
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
||||
|
||||
// Create a converter between TileLinkIO and MemIO
|
||||
val conv = Module(new MemPipeIOUncachedTileLinkIOConverter(
|
||||
params(NOutstandingMemReqs), refillCycles),
|
||||
{ case TLId => "outer" })
|
||||
if(params(NBanks) > 1) {
|
||||
val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(params(NBanks)),
|
||||
{ case TLId => "outer" })
|
||||
arb.io.in zip masterEndpoints.map(_.io.outer) map { case (arb, cache) => arb <> cache }
|
||||
conv.io.uncached <> arb.io.out
|
||||
} else {
|
||||
conv.io.uncached <> masterEndpoints.head.io.outer
|
||||
}
|
||||
|
||||
// Create a SerDes for backup memory port
|
||||
if(params(UseBackupMemoryPort)) {
|
||||
VLSIUtils.doOuterMemorySystemSerdes(conv.io.mem, io.mem, io.mem_backup,
|
||||
io.mem_backup_en, htifW)
|
||||
} else {
|
||||
io.mem <> conv.io.mem
|
||||
}
|
||||
class MemBackupCtrlIO extends Bundle {
|
||||
val en = Bool(INPUT)
|
||||
val in_valid = Bool(INPUT)
|
||||
val out_ready = Bool(INPUT)
|
||||
val out_valid = Bool(OUTPUT)
|
||||
}
|
||||
|
||||
/** Top-level io for the chip */
|
||||
class BasicTopIO extends Bundle {
|
||||
val host = new HostIO
|
||||
val mem_backup_ctrl = new MemBackupCtrlIO
|
||||
}
|
||||
|
||||
class TopIO extends BasicTopIO {
|
||||
val mem = new MemIO
|
||||
}
|
||||
|
||||
class MultiChannelTopIO extends BasicTopIO with TopLevelParameters {
|
||||
val mem = Vec.fill(nMemChannels){ new MemIO }
|
||||
}
|
||||
|
||||
/** Top-level module for the chip */
|
||||
//TODO: Remove this wrapper once multichannel DRAM controller is provided
|
||||
class Top extends Module with TopLevelParameters {
|
||||
val io = new TopIO
|
||||
val temp = Module(new MultiChannelTop)
|
||||
val arb = Module(new MemIOArbiter(nMemChannels))
|
||||
arb.io.inner <> temp.io.mem
|
||||
io.mem <> arb.io.outer
|
||||
io.mem_backup_ctrl <> temp.io.mem_backup_ctrl
|
||||
io.host <> temp.io.host
|
||||
}
|
||||
|
||||
class MultiChannelTop extends Module with TopLevelParameters {
|
||||
val io = new MultiChannelTopIO
|
||||
|
||||
// Build an Uncore and a set of Tiles
|
||||
val uncore = Module(new Uncore, {case TLId => "L1ToL2"})
|
||||
val tileList = uncore.io.htif zip params(BuildTiles) map { case(hl, bt) => bt(hl.reset) }
|
||||
|
||||
// Connect each tile to the HTIF
|
||||
uncore.io.htif.zip(tileList).zipWithIndex.foreach {
|
||||
case ((hl, tile), i) =>
|
||||
tile.io.host.id := UInt(i)
|
||||
tile.io.host.reset := Reg(next=Reg(next=hl.reset))
|
||||
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
||||
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
||||
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
||||
tile.io.host.ipi_rep <> Queue(hl.ipi_rep)
|
||||
hl.debug_stats_pcr := tile.io.host.debug_stats_pcr
|
||||
}
|
||||
|
||||
// Connect the uncore to the tile memory ports, HostIO and MemIO
|
||||
uncore.io.tiles_cached <> tileList.map(_.io.cached)
|
||||
uncore.io.tiles_uncached <> tileList.map(_.io.uncached)
|
||||
uncore.io.host <> io.host
|
||||
uncore.io.mem <> io.mem
|
||||
if(params(UseBackupMemoryPort)) { uncore.io.mem_backup_ctrl <> io.mem_backup_ctrl }
|
||||
}
|
||||
|
||||
/** Wrapper around everything that isn't a Tile.
|
||||
*
|
||||
* Usually this is clocked and/or place-and-routed separately from the Tiles.
|
||||
* Contains the Host-Target InterFace module (HTIF).
|
||||
*/
|
||||
class Uncore extends Module with TopLevelParameters {
|
||||
val io = new Bundle {
|
||||
val host = new HostIO
|
||||
val mem = new MemIO
|
||||
val tiles = Vec.fill(nTiles){new TileLinkIO}.flip
|
||||
val mem = Vec.fill(nMemChannels){ new MemIO }
|
||||
val tiles_cached = Vec.fill(nTiles){new ClientTileLinkIO}.flip
|
||||
val tiles_uncached = Vec.fill(nTiles){new ClientUncachedTileLinkIO}.flip
|
||||
val htif = Vec.fill(nTiles){new HTIFIO}.flip
|
||||
val incoherent = Vec.fill(nTiles){Bool()}.asInput
|
||||
val mem_backup = new MemSerializedIO(htifW)
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
val mem_backup_ctrl = new MemBackupCtrlIO
|
||||
}
|
||||
|
||||
// Used to hash physical addresses to banks
|
||||
require(params(BankIdLSB) + log2Up(params(NBanks)) < params(MIFAddrBits))
|
||||
def addrToBank(addr: Bits): UInt = {
|
||||
if(nBanks > 1) addr( lsb + log2Up(nBanks) - 1, lsb)
|
||||
else UInt(0)
|
||||
}
|
||||
|
||||
val htif = Module(new HTIF(CSRs.reset)) // One HTIF module per chip
|
||||
val htif = Module(new HTIF(CSRs.mreset)) // One HTIF module per chip
|
||||
val outmemsys = Module(new OuterMemorySystem) // NoC, LLC and SerDes
|
||||
|
||||
// Wire outer mem system to tiles and htif, adding
|
||||
// networking headers and endpoint queues
|
||||
(outmemsys.io.tiles :+ outmemsys.io.htif) // Collect outward-facing TileLink ports
|
||||
.zip(io.tiles :+ htif.io.mem) // Zip them with matching ports from clients
|
||||
.zipWithIndex // Index them
|
||||
.map { case ((outer, client), i) => // Then use the index and bank hash to
|
||||
// overwrite the networking header
|
||||
outer.acquire <> Queue(TileLinkHeaderOverwriter(client.acquire, i, nBanks, addrToBank _))
|
||||
outer.release <> Queue(TileLinkHeaderOverwriter(client.release, i, nBanks, addrToBank _))
|
||||
outer.finish <> Queue(TileLinkHeaderOverwriter(client.finish, i, true))
|
||||
client.grant <> Queue(outer.grant, 1, pipe = true)
|
||||
client.probe <> Queue(outer.probe)
|
||||
}
|
||||
outmemsys.io.incoherent := (io.incoherent :+ Bool(true).asInput)
|
||||
outmemsys.io.incoherent := htif.io.cpu.map(_.reset)
|
||||
outmemsys.io.htif_uncached <> htif.io.mem
|
||||
outmemsys.io.tiles_uncached <> io.tiles_uncached
|
||||
outmemsys.io.tiles_cached <> io.tiles_cached
|
||||
|
||||
// Wire the htif to the memory port(s) and host interface
|
||||
io.host.debug_stats_pcr := htif.io.host.debug_stats_pcr
|
||||
htif.io.cpu <> io.htif
|
||||
outmemsys.io.mem <> io.mem
|
||||
if(params(UseBackupMemoryPort)) {
|
||||
outmemsys.io.mem_backup_en := io.mem_backup_en
|
||||
VLSIUtils.padOutHTIFWithDividedClock(htif.io, outmemsys.io.mem_backup,
|
||||
io.mem_backup, io.host, io.mem_backup_en, htifW)
|
||||
outmemsys.io.mem_backup_en := io.mem_backup_ctrl.en
|
||||
VLSIUtils.padOutHTIFWithDividedClock(htif.io, outmemsys.io.mem_backup, io.mem_backup_ctrl, io.host, htifW)
|
||||
} else {
|
||||
htif.io.host.out <> io.host.out
|
||||
htif.io.host.in <> io.host.in
|
||||
}
|
||||
}
|
||||
|
||||
class TopIO extends Bundle {
|
||||
val host = new HostIO
|
||||
val mem = new MemIO
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
val in_mem_ready = Bool(OUTPUT)
|
||||
val in_mem_valid = Bool(INPUT)
|
||||
val out_mem_ready = Bool(INPUT)
|
||||
val out_mem_valid = Bool(OUTPUT)
|
||||
}
|
||||
|
||||
class Top extends Module with TopLevelParameters {
|
||||
val io = new TopIO
|
||||
|
||||
val resetSigs = Vec.fill(nTiles){Bool()}
|
||||
val tileList = (0 until nTiles).map(r => Module(params(BuildTile)(resetSigs(r))))
|
||||
val uncore = Module(new Uncore)
|
||||
|
||||
for (i <- 0 until nTiles) {
|
||||
val hl = uncore.io.htif(i)
|
||||
val tl = uncore.io.tiles(i)
|
||||
val il = uncore.io.incoherent(i)
|
||||
|
||||
resetSigs(i) := hl.reset
|
||||
val tile = tileList(i)
|
||||
|
||||
tile.io.tilelink <> tl
|
||||
il := hl.reset
|
||||
tile.io.host.id := UInt(i)
|
||||
tile.io.host.reset := Reg(next=Reg(next=hl.reset))
|
||||
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
||||
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
||||
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
||||
tile.io.host.ipi_rep <> Queue(hl.ipi_rep)
|
||||
hl.debug_stats_pcr := tile.io.host.debug_stats_pcr
|
||||
/** The whole outer memory hierarchy, including a NoC, some kind of coherence
|
||||
* manager agent, and a converter from TileLink to MemIO.
|
||||
*/
|
||||
class OuterMemorySystem extends Module with TopLevelParameters {
|
||||
val io = new Bundle {
|
||||
val tiles_cached = Vec.fill(nTiles){new ClientTileLinkIO}.flip
|
||||
val tiles_uncached = Vec.fill(nTiles){new ClientUncachedTileLinkIO}.flip
|
||||
val htif_uncached = (new ClientUncachedTileLinkIO).flip
|
||||
val incoherent = Vec.fill(nTiles){Bool()}.asInput
|
||||
val mem = Vec.fill(nMemChannels){ new MemIO }
|
||||
val mem_backup = new MemSerializedIO(htifW)
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
}
|
||||
|
||||
io.host <> uncore.io.host
|
||||
io.mem <> uncore.io.mem
|
||||
// Create a simple L1toL2 NoC between the tiles+htif and the banks of outer memory
|
||||
// Cached ports are first in client list, making sharerToClientId just an indentity function
|
||||
// addrToBank is sed to hash physical addresses (of cache blocks) to banks (and thereby memory channels)
|
||||
val ordered_clients = (io.tiles_cached ++ (io.tiles_uncached :+ io.htif_uncached).map(TileLinkIOWrapper(_)))
|
||||
def sharerToClientId(sharerId: UInt) = sharerId
|
||||
def addrToBank(addr: Bits): UInt = if(nBanks > 1) addr(lsb + log2Up(nBanks) - 1, lsb) else UInt(0)
|
||||
val preBuffering = TileLinkDepths(2,2,2,2,2)
|
||||
val postBuffering = TileLinkDepths(0,0,1,0,0) //TODO: had EOS24 crit path on inner.release
|
||||
val l1tol2net = Module(
|
||||
if(nBanks == 1) new RocketChipTileLinkArbiter(sharerToClientId, preBuffering, postBuffering)
|
||||
else new RocketChipTileLinkCrossbar(addrToBank, sharerToClientId, preBuffering, postBuffering))
|
||||
|
||||
// Create point(s) of coherence serialization
|
||||
val managerEndpoints = List.fill(nMemChannels) {
|
||||
List.fill(nBanksPerMemChannel) {
|
||||
params(BuildL2CoherenceManager)()}}
|
||||
managerEndpoints.flatten.foreach { _.incoherent := io.incoherent }
|
||||
|
||||
// Wire the tiles and htif to the TileLink client ports of the L1toL2 network,
|
||||
// and coherence manager(s) to the other side
|
||||
l1tol2net.io.clients <> ordered_clients
|
||||
l1tol2net.io.managers <> managerEndpoints.flatMap(_.map(_.innerTL))
|
||||
|
||||
// Create a converter between TileLinkIO and MemIO for each channel
|
||||
val outerTLParams = params.alterPartial({ case TLId => "L2ToMC" })
|
||||
val backendBuffering = TileLinkDepths(0,0,0,0,0)
|
||||
val mem_channels = managerEndpoints.map { banks =>
|
||||
if(!params(UseNASTI)) {
|
||||
val arb = Module(new RocketChipTileLinkArbiter(managerDepths = backendBuffering))(outerTLParams)
|
||||
val conv = Module(new MemPipeIOTileLinkIOConverter(nMemReqs))(outerTLParams)
|
||||
arb.io.clients <> banks.map(_.outerTL)
|
||||
conv.io.tl <> arb.io.managers.head
|
||||
MemIOMemPipeIOConverter(conv.io.mem)
|
||||
} else {
|
||||
val arb = Module(new RocketChipTileLinkArbiter(managerDepths = backendBuffering))(outerTLParams)
|
||||
val conv1 = Module(new NASTIMasterIOTileLinkIOConverter)(outerTLParams)
|
||||
val conv2 = Module(new MemIONASTISlaveIOConverter)
|
||||
val conv3 = Module(new MemPipeIOMemIOConverter(nMemReqs))
|
||||
arb.io.clients <> banks.map(_.outerTL)
|
||||
conv1.io.tl <> arb.io.managers.head
|
||||
conv2.io.nasti <> conv1.io.nasti
|
||||
conv3.io.cpu.req_cmd <> Queue(conv2.io.mem.req_cmd, 2)
|
||||
conv3.io.cpu.req_data <> Queue(conv2.io.mem.req_data, mifDataBeats)
|
||||
conv2.io.mem.resp <> conv3.io.cpu.resp
|
||||
MemIOMemPipeIOConverter(conv3.io.mem)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a SerDes for backup memory port
|
||||
if(params(UseBackupMemoryPort)) {
|
||||
uncore.io.mem_backup.resp.valid := io.in_mem_valid
|
||||
io.out_mem_valid := uncore.io.mem_backup.req.valid
|
||||
uncore.io.mem_backup.req.ready := io.out_mem_ready
|
||||
io.mem_backup_en <> uncore.io.mem_backup_en
|
||||
}
|
||||
VLSIUtils.doOuterMemorySystemSerdes(mem_channels, io.mem, io.mem_backup, io.mem_backup_en, nMemChannels)
|
||||
} else { io.mem <> mem_channels }
|
||||
}
|
||||
|
@ -13,32 +13,43 @@ class MemDessert extends Module {
|
||||
}
|
||||
|
||||
object VLSIUtils {
|
||||
def doOuterMemorySystemSerdes(llc: MemPipeIO, mem: MemIO,
|
||||
backup: MemSerializedIO, en: Bool, w: Int) {
|
||||
val mem_serdes = Module(new MemSerdes(w))
|
||||
val wide = mem_serdes.io.wide
|
||||
llc.req_cmd.ready := Mux(en, wide.req_cmd.ready, mem.req_cmd.ready)
|
||||
mem.req_cmd.valid := llc.req_cmd.valid && !en
|
||||
mem.req_cmd.bits := llc.req_cmd.bits
|
||||
wide.req_cmd.valid := llc.req_cmd.valid && en
|
||||
wide.req_cmd.bits := llc.req_cmd.bits
|
||||
def doOuterMemorySystemSerdes(
|
||||
llcs: Seq[MemIO],
|
||||
mems: Seq[MemIO],
|
||||
backup: MemSerializedIO,
|
||||
en: Bool,
|
||||
nMemChannels: Int) {
|
||||
val arb = Module(new MemIOArbiter(nMemChannels))
|
||||
val mem_serdes = Module(new MemSerdes)
|
||||
mem_serdes.io.wide <> arb.io.outer
|
||||
mem_serdes.io.narrow <> backup
|
||||
|
||||
llc.req_data.ready := Mux(en, wide.req_data.ready, mem.req_data.ready)
|
||||
mem.req_data.valid := llc.req_data.valid && !en
|
||||
mem.req_data.bits := llc.req_data.bits
|
||||
wide.req_data.valid := llc.req_data.valid && en
|
||||
wide.req_data.bits := llc.req_data.bits
|
||||
llcs zip mems zip arb.io.inner foreach { case ((llc, mem), wide) =>
|
||||
llc.req_cmd.ready := Mux(en, wide.req_cmd.ready, mem.req_cmd.ready)
|
||||
mem.req_cmd.valid := llc.req_cmd.valid && !en
|
||||
mem.req_cmd.bits := llc.req_cmd.bits
|
||||
wide.req_cmd.valid := llc.req_cmd.valid && en
|
||||
wide.req_cmd.bits := llc.req_cmd.bits
|
||||
|
||||
llc.resp.valid := Mux(en, wide.resp.valid, mem.resp.valid)
|
||||
llc.resp.bits := Mux(en, wide.resp.bits, mem.resp.bits)
|
||||
mem.resp.ready := Bool(true)
|
||||
llc.req_data.ready := Mux(en, wide.req_data.ready, mem.req_data.ready)
|
||||
mem.req_data.valid := llc.req_data.valid && !en
|
||||
mem.req_data.bits := llc.req_data.bits
|
||||
wide.req_data.valid := llc.req_data.valid && en
|
||||
wide.req_data.bits := llc.req_data.bits
|
||||
|
||||
backup <> mem_serdes.io.narrow
|
||||
llc.resp.valid := Mux(en, wide.resp.valid, mem.resp.valid)
|
||||
llc.resp.bits := Mux(en, wide.resp.bits, mem.resp.bits)
|
||||
mem.resp.ready := llc.resp.ready && !en
|
||||
wide.resp.ready := llc.resp.ready && en
|
||||
}
|
||||
}
|
||||
|
||||
def padOutHTIFWithDividedClock(htif: HTIFModuleIO, child: MemSerializedIO,
|
||||
parent: MemSerializedIO, host: HostIO,
|
||||
en: Bool, htifW: Int) {
|
||||
def padOutHTIFWithDividedClock(
|
||||
htif: HTIFModuleIO,
|
||||
child: MemSerializedIO,
|
||||
parent: MemBackupCtrlIO,
|
||||
host: HostIO,
|
||||
htifW: Int) {
|
||||
val hio = Module((new SlowIO(512)) { Bits(width = htifW+1) })
|
||||
hio.io.set_divisor.valid := htif.scr.wen && (htif.scr.waddr === UInt(63))
|
||||
hio.io.set_divisor.bits := htif.scr.wdata
|
||||
@ -50,10 +61,10 @@ object VLSIUtils {
|
||||
child.req.ready := hio.io.out_fast.ready && !htif.host.out.valid
|
||||
host.out.valid := hio.io.out_slow.valid && hio.io.out_slow.bits(htifW)
|
||||
host.out.bits := hio.io.out_slow.bits
|
||||
parent.req.valid := hio.io.out_slow.valid && !hio.io.out_slow.bits(htifW)
|
||||
hio.io.out_slow.ready := Mux(hio.io.out_slow.bits(htifW), host.out.ready, parent.req.ready)
|
||||
parent.out_valid := hio.io.out_slow.valid && !hio.io.out_slow.bits(htifW)
|
||||
hio.io.out_slow.ready := Mux(hio.io.out_slow.bits(htifW), host.out.ready, parent.out_ready)
|
||||
|
||||
val mem_backup_resp_valid = en && parent.resp.valid
|
||||
val mem_backup_resp_valid = parent.en && parent.in_valid
|
||||
hio.io.in_slow.valid := mem_backup_resp_valid || host.in.valid
|
||||
hio.io.in_slow.bits := Cat(mem_backup_resp_valid, host.in.bits)
|
||||
host.in.ready := hio.io.in_slow.ready
|
||||
|
2
uncore
2
uncore
@ -1 +1 @@
|
||||
Subproject commit 94a91189f1c4522c3e38c4a1ec4542650c03b8aa
|
||||
Subproject commit bf608ce9144d54f372f81f237ed25f5418337f14
|
@ -54,6 +54,7 @@ simv = $(sim_dir)/simv-$(CONFIG)
|
||||
$(simv) : $(sim_vsrcs) $(sim_csrcs) $(sim_dir)/libdramsim.a
|
||||
cd $(sim_dir) && \
|
||||
$(VCS) $(VCS_OPTS) -o $(simv) \
|
||||
-debug_pp \
|
||||
|
||||
simv_debug = $(sim_dir)/simv-$(CONFIG)-debug
|
||||
$(simv_debug) : $(sim_vsrcs) $(sim_csrcs) $(sim_dir)/libdramsim.a
|
||||
|
@ -5,19 +5,19 @@
|
||||
# UCB use only
|
||||
-include $(base_dir)/vsim/Makefrag-sim-refchip
|
||||
|
||||
asm_tests_out = $(foreach test, $(asm_p_tests) $(asm_v_tests), $(output_dir)/$(test).out)
|
||||
asm_tests_out = $(foreach test, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests), $(output_dir)/$(test).out)
|
||||
bmarks_out = $(foreach test, $(bmarks), $(output_dir)/$(test).out)
|
||||
mt_bmarks_out = $(foreach test, $(mt_bmarks), $(output_dir)/$(test).out)
|
||||
|
||||
asm_tests_vcd = $(foreach test, $(asm_p_tests) $(asm_v_tests), $(output_dir)/$(test).vcd)
|
||||
asm_tests_vcd = $(foreach test, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests), $(output_dir)/$(test).vcd)
|
||||
bmarks_vcd = $(foreach test, $(bmarks), $(output_dir)/$(test).vcd)
|
||||
mt_bmarks_vcd = $(foreach test, $(mt_bmarks), $(output_dir)/$(test).vcd)
|
||||
|
||||
asm_tests_vpd = $(foreach test, $(asm_p_tests) $(asm_v_tests), $(output_dir)/$(test).vpd)
|
||||
asm_tests_vpd = $(foreach test, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests), $(output_dir)/$(test).vpd)
|
||||
bmarks_vpd = $(foreach test, $(bmarks), $(output_dir)/$(test).vpd)
|
||||
mt_bmarks_vpd = $(foreach test, $(mt_bmarks), $(output_dir)/$(test).vpd)
|
||||
|
||||
asm_tests_saif = $(foreach test, $(asm_p_tests) $(asm_v_tests), $(output_dir)/$(test).saif)
|
||||
asm_tests_saif = $(foreach test, $(asm_p_tests) $(asm_pt_tests) $(asm_v_tests), $(output_dir)/$(test).saif)
|
||||
bmarks_saif = $(foreach test, $(bmarks), $(output_dir)/$(test).saif)
|
||||
mt_bmarks_saif = $(foreach test, $(mt_bmarks), $(output_dir)/$(test).saif)
|
||||
|
||||
|
@ -82,16 +82,6 @@ def gen_mem(name, width, depth, ports):
|
||||
masked = len(maskedports)>0
|
||||
tup = (depth, width, nr, nw, nrw, masked)
|
||||
|
||||
decl.append('reg [%d:0] ram [%d:0];' % (width-1, depth-1))
|
||||
decl.append('`ifndef SYNTHESIS')
|
||||
decl.append(' integer initvar;')
|
||||
decl.append(' initial begin')
|
||||
decl.append(' #0.002;')
|
||||
decl.append(' for (initvar = 0; initvar < %d; initvar = initvar+1)' % depth)
|
||||
decl.append(' ram[initvar] = {%d {$random}};' % ((width-1)/32+1))
|
||||
decl.append(' end')
|
||||
decl.append('`endif')
|
||||
|
||||
for pid in readports:
|
||||
decl.append('reg [%d:0] reg_R%dA;' % (addr_width-1, pid))
|
||||
sequential.append('if (R%dE) reg_R%dA <= R%dA;' % (pid, pid, pid))
|
||||
@ -115,6 +105,20 @@ def gen_mem(name, width, depth, ports):
|
||||
combinational.append(' if (CLK && latch_W%dE)' % (pid))
|
||||
combinational.append(' ram[latch_W%dA] <= latch_W%dI;' % (pid, pid))
|
||||
|
||||
decl.append('reg [%d:0] ram [%d:0];' % (width-1, depth-1))
|
||||
decl.append('`ifndef SYNTHESIS')
|
||||
decl.append(' integer initvar;')
|
||||
decl.append(' initial begin')
|
||||
decl.append(' #0.002;')
|
||||
decl.append(' for (initvar = 0; initvar < %d; initvar = initvar+1)' % depth)
|
||||
decl.append(' ram[initvar] = {%d {$random}};' % ((width-1)/32+1))
|
||||
for pid in readports:
|
||||
decl.append(' reg_R%dA = {%d {$random}};' % (pid, ((addr_width-1)/32+1)))
|
||||
for pid in rwports:
|
||||
decl.append(' reg_RW%dA = {%d {$random}};' % (pid, ((addr_width-1)/32+1)))
|
||||
decl.append(' end')
|
||||
decl.append('`endif')
|
||||
|
||||
decl.append("integer i;")
|
||||
sequential.append("for (i = 0; i < %d; i=i+1) begin" % width)
|
||||
for pid in writeports:
|
||||
|
@ -18,7 +18,7 @@ extern "A" void htif_tick
|
||||
output reg htif_out_ready,
|
||||
input reg [`HTIF_WIDTH-1:0] htif_out_bits,
|
||||
|
||||
output reg [1:0] exit
|
||||
output reg [31:0] exit
|
||||
);
|
||||
|
||||
extern "A" void memory_tick
|
||||
@ -128,14 +128,13 @@ module rocketTestHarness;
|
||||
.io_host_debug_stats_pcr(htif_out_stats_delay),
|
||||
|
||||
`ifdef MEM_BACKUP_EN
|
||||
.io_mem_backup_en(1'b1),
|
||||
.io_mem_backup_ctrl_en(1'b1),
|
||||
`else
|
||||
.io_mem_backup_en(1'b0),
|
||||
.io_mem_backup_ctrl_en(1'b0),
|
||||
`endif
|
||||
.io_in_mem_ready(),
|
||||
.io_in_mem_valid(mem_bk_in_valid_delay),
|
||||
.io_out_mem_ready(mem_bk_out_ready_delay),
|
||||
.io_out_mem_valid(mem_bk_out_valid_delay),
|
||||
.io_mem_backup_ctrl_in_valid(mem_bk_in_valid_delay),
|
||||
.io_mem_backup_ctrl_out_ready(mem_bk_out_ready_delay),
|
||||
.io_mem_backup_ctrl_out_valid(mem_bk_out_valid_delay),
|
||||
`endif
|
||||
|
||||
.io_mem_req_cmd_valid(mem_req_valid_delay),
|
||||
@ -268,7 +267,7 @@ module rocketTestHarness;
|
||||
assign htif_in_bits = mem_bk_in_valid ? mem_in_bits : htif_in_bits_premux;
|
||||
assign htif_in_valid = htif_in_valid_premux && !mem_bk_in_valid;
|
||||
wire htif_in_ready_premux = htif_in_ready && !mem_bk_in_valid;
|
||||
reg [1:0] exit = 0;
|
||||
reg [31:0] exit = 0;
|
||||
|
||||
always @(posedge htif_clk)
|
||||
begin
|
||||
|
Loading…
Reference in New Issue
Block a user