1
0

vlsi verilog compiles now but doesn't simulate

This commit is contained in:
Andrew Waterman 2011-12-20 22:08:27 -08:00
parent 38ea10a5f4
commit d65e1a2eee
3 changed files with 102 additions and 134 deletions

View File

@ -37,7 +37,6 @@ class ioCtrlDpath extends Bundle()
val mem_eret = Bool('output); val mem_eret = Bool('output);
val mem_load = Bool('output); val mem_load = Bool('output);
val wen = Bool('output); val wen = Bool('output);
val ex_mem_type = UFix(3, 'output)
// instruction in execute is an unconditional jump // instruction in execute is an unconditional jump
val ex_jmp = Bool('output); val ex_jmp = Bool('output);
// enable/disable interrupts // enable/disable interrupts
@ -677,7 +676,6 @@ class rocketCtrl extends Component
io.dmem.req_kill := mem_kill_dmem; io.dmem.req_kill := mem_kill_dmem;
io.dmem.req_cmd := ex_reg_mem_cmd; io.dmem.req_cmd := ex_reg_mem_cmd;
io.dmem.req_type := ex_reg_mem_type; io.dmem.req_type := ex_reg_mem_type;
io.dpath.ex_mem_type:= ex_reg_mem_type
} }
} }

View File

@ -11,35 +11,32 @@ class StoreMaskGen extends Component {
val addr = Bits(3, 'input) val addr = Bits(3, 'input)
val wmask = Bits(8, 'output) val wmask = Bits(8, 'output)
} }
val mask = Wire { Bits(width = io.wmask.width) } val word = (io.typ === MT_W) || (io.typ === MT_WU)
switch (io.typ(1,0)) val half = (io.typ === MT_H) || (io.typ === MT_HU)
{ val byte = (io.typ === MT_B) || (io.typ === MT_BU)
is (MT_B) { mask <== Bits( 1,1) << io.addr(2,0).toUFix }
is (MT_H) { mask <== Bits( 3,2) << Cat(io.addr(2,1), Bits(0,1)).toUFix } io.wmask := Mux(byte, Bits( 1,1) << io.addr(2,0).toUFix,
is (MT_W) { mask <== Bits( 15,4) << Cat(io.addr(2,2), Bits(0,2)).toUFix } Mux(half, Bits( 3,2) << Cat(io.addr(2,1), Bits(0,1)).toUFix,
otherwise { mask <== Bits(255,8) } // MT_D Mux(word, Bits( 15,4) << Cat(io.addr(2), Bits(0,2)).toUFix,
} Bits(255,8))));
io.wmask := mask
} }
class StoreDataGen extends Component { class StoreDataGen extends Component {
val io = new Bundle { val io = new Bundle {
val typ = Bits(3, 'input) val typ = Bits(3, 'input)
val addr = Bits(3, 'input)
val din = Bits(64, 'input) val din = Bits(64, 'input)
val dout = Bits(64, 'output) val dout = Bits(64, 'output)
} }
val data = Wire { Bits(width = io.din.width) } val word = (io.typ === MT_W) || (io.typ === MT_WU)
switch (io.typ(1,0)) val half = (io.typ === MT_H) || (io.typ === MT_HU)
{ val byte = (io.typ === MT_B) || (io.typ === MT_BU)
is (MT_B) { data <== Fill(8, io.din( 7,0)) }
is (MT_H) { data <== Fill(4, io.din(15,0)) } io.dout := Mux(byte, Fill(8, io.din( 7,0)),
is (MT_W) { data <== Fill(2, io.din(31,0)) } Mux(half, Fill(4, io.din(15,0)),
otherwise { data <== io.din } // MT_D Mux(word, Fill(2, io.din(31,0)),
} io.din)))
io.dout := data
} }
// this currently requires that CPU_DATA_BITS == 64 // this currently requires that CPU_DATA_BITS == 64
@ -53,26 +50,27 @@ class LoadDataGen extends Component {
val r_dout_subword = Bits(64, 'output) val r_dout_subword = Bits(64, 'output)
} }
val sext = (io.typ === MT_B) || (io.typ === MT_H) ||
(io.typ === MT_W) || (io.typ === MT_D)
val word = (io.typ === MT_W) || (io.typ === MT_WU)
val half = (io.typ === MT_H) || (io.typ === MT_HU)
val byte = (io.typ === MT_B) || (io.typ === MT_BU)
val shifted = io.din >> Cat(io.addr(io.addr.width-1,2), Bits(0, 5)).toUFix val shifted = io.din >> Cat(io.addr(io.addr.width-1,2), Bits(0, 5)).toUFix
val extended = Wire { Bits(width = io.din.width) } val extended =
switch (io.typ) Mux(word, Cat(Fill(32, sext & shifted(31)), shifted(31,0)), shifted)
{
is(MT_W) { extended <== Cat(Fill(32, shifted(31)), shifted(31,0)) }
is(MT_WU) { extended <== Cat(Bits(0, 32), shifted(31,0)) }
otherwise { extended <== shifted }
}
val r_extended = Reg(extended) val r_extended = Reg(extended)
val shifted_subword = r_extended >> Cat(Reg(io.addr(1,0)), Bits(0, 3)).toUFix val r_sext = Reg(sext)
val extended_subword = Wire { Bits(width = io.din.width) } val r_half = Reg(half)
switch (Reg(io.typ)) val r_byte = Reg(byte)
{ val r_addr = Reg(io.addr)
is (MT_B) { extended_subword <== Cat(Fill(56, shifted_subword( 7)), shifted_subword( 7, 0)) }
is (MT_BU) { extended_subword <== Cat(Bits(0, 56), shifted_subword( 7, 0)) } val shifted_subword = r_extended >> Cat(r_addr(1,0), Bits(0, 3)).toUFix
is (MT_H) { extended_subword <== Cat(Fill(48, shifted_subword(15)), shifted_subword(15, 0)) } val extended_subword =
is (MT_HU) { extended_subword <== Cat(Bits(0, 48), shifted_subword(15, 0)) } Mux(r_byte, Cat(Fill(56, r_sext & shifted_subword( 7)), shifted_subword( 7,0)),
otherwise { extended_subword <== shifted_subword } Mux(r_half, Cat(Fill(48, r_sext & shifted_subword(15)), shifted_subword(15,0)),
} shifted_subword))
io.dout := extended io.dout := extended
io.r_dout := r_extended io.r_dout := r_extended
@ -237,7 +235,6 @@ class MSHRFile extends Component {
val req_offset = Bits(OFFSET_BITS, 'input) val req_offset = Bits(OFFSET_BITS, 'input)
val req_cmd = Bits(4, 'input) val req_cmd = Bits(4, 'input)
val req_type = Bits(3, 'input) val req_type = Bits(3, 'input)
val req_data = Bits(CPU_DATA_BITS, 'input)
val req_tag = Bits(DCACHE_TAG_BITS, 'input) val req_tag = Bits(DCACHE_TAG_BITS, 'input)
val req_sdq_id = UFix(log2up(NSDQ), 'input) val req_sdq_id = UFix(log2up(NSDQ), 'input)
@ -252,11 +249,6 @@ class MSHRFile extends Component {
val replay = (new ioDecoupled) { new Replay() }.flip() val replay = (new ioDecoupled) { new Replay() }.flip()
} }
val idx_match = Wire { Bool() }
val pri_rdy = Wire { Bool() }
val fence = Wire { Bool() }
val sec_rdy = Wire { Bool() }
val tag_mux = new Mux1H(NMSHR, PPN_BITS) val tag_mux = new Mux1H(NMSHR, PPN_BITS)
val mem_resp_idx_mux = new Mux1H(NMSHR, IDX_BITS) val mem_resp_idx_mux = new Mux1H(NMSHR, IDX_BITS)
val meta_req_arb = (new Arbiter(NMSHR)) { new MetaArrayReq() } val meta_req_arb = (new Arbiter(NMSHR)) { new MetaArrayReq() }
@ -264,10 +256,14 @@ class MSHRFile extends Component {
val replay_arb = (new Arbiter(NMSHR)) { new Replay() } val replay_arb = (new Arbiter(NMSHR)) { new Replay() }
val alloc_arb = (new Arbiter(NMSHR)) { Bool() } val alloc_arb = (new Arbiter(NMSHR)) { Bool() }
alloc_arb.io.out.ready := io.req_val && !idx_match
val tag_match = tag_mux.io.out === io.req_ppn val tag_match = tag_mux.io.out === io.req_ppn
var idx_match = Bool(false)
var pri_rdy = Bool(false)
var fence = Bool(false)
var sec_rdy = Bool(false)
for (i <- 0 to NMSHR-1) { for (i <- 0 to NMSHR-1) {
val mshr = new MSHR(i) val mshr = new MSHR(i)
@ -295,15 +291,13 @@ class MSHRFile extends Component {
mem_resp_idx_mux.io.sel(i) := (UFix(i) === io.mem_resp_tag) mem_resp_idx_mux.io.sel(i) := (UFix(i) === io.mem_resp_tag)
mem_resp_idx_mux.io.in(i) := mshr.io.idx mem_resp_idx_mux.io.in(i) := mshr.io.idx
when (mshr.io.req_pri_rdy) { pri_rdy <== Bool(true) } pri_rdy = pri_rdy || mshr.io.req_pri_rdy
when (!mshr.io.req_pri_rdy) { fence <== Bool(true) } sec_rdy = sec_rdy || mshr.io.req_sec_rdy
when (mshr.io.req_sec_rdy) { sec_rdy <== Bool(true) } fence = fence || !mshr.io.req_pri_rdy
when (mshr.io.idx_match) { idx_match <== Bool(true) } idx_match = idx_match || mshr.io.idx_match
} }
pri_rdy <== Bool(false)
fence <== Bool(false) alloc_arb.io.out.ready := io.req_val && !idx_match
sec_rdy <== Bool(false)
idx_match <== Bool(false)
meta_req_arb.io.out ^^ io.meta_req meta_req_arb.io.out ^^ io.meta_req
mem_req_arb.io.out ^^ io.mem_req mem_req_arb.io.out ^^ io.mem_req
@ -329,9 +323,9 @@ class ReplayUnit extends Component {
sdq_allocator.io.in := ~sdq_val sdq_allocator.io.in := ~sdq_val
val sdq_alloc_id = sdq_allocator.io.out.toUFix val sdq_alloc_id = sdq_allocator.io.out.toUFix
val replay_retry = Wire { Bool() } val replay_val = Reg(resetVal = Bool(false))
val replay_val = Reg(io.replay.valid || replay_retry, resetVal = Bool(false)) val replay_retry = replay_val && !io.data_req.ready
replay_retry <== replay_val && !io.data_req.ready replay_val <== io.replay.valid || replay_retry
val rp = Reg { new Replay() } val rp = Reg { new Replay() }
when (io.replay.valid && io.replay.ready) { rp <== io.replay.bits } when (io.replay.valid && io.replay.ready) { rp <== io.replay.bits }
@ -350,7 +344,7 @@ class ReplayUnit extends Component {
val sdq = Mem4(NSDQ, io.sdq_enq.bits) val sdq = Mem4(NSDQ, io.sdq_enq.bits)
sdq.setReadLatency(0) sdq.setReadLatency(0)
sdq.setTarget('inst) // sdq.setTarget('inst)
val sdq_dout = sdq.rw(sdq_addr, io.sdq_enq.bits, sdq_wen, cs = sdq_ren || sdq_wen) val sdq_dout = sdq.rw(sdq_addr, io.sdq_enq.bits, sdq_wen, cs = sdq_ren || sdq_wen)
val sdq_free = replay_val && !replay_retry && rp_write val sdq_free = replay_val && !replay_retry && rp_write
@ -392,6 +386,7 @@ class WritebackUnit extends Component {
val block_refill = valid && (io.refill_req.bits.addr(IDX_BITS-1,0) === addr.idx) val block_refill = valid && (io.refill_req.bits.addr(IDX_BITS-1,0) === addr.idx)
val refill_val = io.refill_req.valid && !block_refill val refill_val = io.refill_req.valid && !block_refill
wbq.io.q_reset := Bool(false)
wbq.io.enq.valid := valid && Reg(io.data_req.valid && io.data_req.ready) wbq.io.enq.valid := valid && Reg(io.data_req.valid && io.data_req.ready)
wbq.io.enq.bits := io.data_resp wbq.io.enq.bits := io.data_resp
wbq.io.deq.ready := io.mem_req.ready && !refill_val && (cnt === UFix(REFILL_CYCLES)) wbq.io.deq.ready := io.mem_req.ready && !refill_val && (cnt === UFix(REFILL_CYCLES))
@ -471,7 +466,7 @@ class MetaDataArray(lines: Int) extends Component {
val tag_array = Mem4(lines, io.resp.tag) val tag_array = Mem4(lines, io.resp.tag)
tag_array.setReadLatency(0) tag_array.setReadLatency(0)
tag_array.setTarget('inst) // tag_array.setTarget('inst)
val tag_rdata = tag_array.rw(io.req.bits.idx, io.req.bits.data.tag, io.req.valid && io.req.bits.rw, cs = io.req.valid) val tag_rdata = tag_array.rw(io.req.bits.idx, io.req.bits.data.tag, io.req.valid && io.req.bits.rw, cs = io.req.valid)
io.resp.valid := vd_rdata1(1).toBool io.resp.valid := vd_rdata1(1).toBool
@ -490,7 +485,7 @@ class DataArray(lines: Int) extends Component {
val array = Mem4(lines*REFILL_CYCLES, io.resp) val array = Mem4(lines*REFILL_CYCLES, io.resp)
array.setReadLatency(0) array.setReadLatency(0)
array.setTarget('inst) // array.setTarget('inst)
val addr = Cat(io.req.bits.idx, io.req.bits.offset) val addr = Cat(io.req.bits.idx, io.req.bits.offset)
val rdata = array.rw(addr, io.req.bits.data, io.req.valid && io.req.bits.rw, wmask, cs = io.req.valid) val rdata = array.rw(addr, io.req.bits.data, io.req.valid && io.req.bits.rw, wmask, cs = io.req.valid)
io.resp := rdata io.resp := rdata
@ -506,7 +501,7 @@ class AMOALU extends Component {
val out = UFix(64, 'output) val out = UFix(64, 'output)
} }
val signed = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MAX) val sgned = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MAX)
val sub = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MINU) || (io.cmd === M_XA_MAX) || (io.cmd === M_XA_MAXU) val sub = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MINU) || (io.cmd === M_XA_MAX) || (io.cmd === M_XA_MAXU)
val min = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MINU) val min = (io.cmd === M_XA_MIN) || (io.cmd === M_XA_MINU)
val word = (io.typ === MT_W) || (io.typ === MT_WU) val word = (io.typ === MT_W) || (io.typ === MT_WU)
@ -516,19 +511,14 @@ class AMOALU extends Component {
val cmp_lhs = Mux(word, io.lhs(31), io.lhs(63)) val cmp_lhs = Mux(word, io.lhs(31), io.lhs(63))
val cmp_rhs = Mux(word, io.rhs(31), io.rhs(63)) val cmp_rhs = Mux(word, io.rhs(31), io.rhs(63))
val cmp_diff = Mux(word, adder_out(31), adder_out(63)) val cmp_diff = Mux(word, adder_out(31), adder_out(63))
val less = Mux(cmp_lhs === cmp_rhs, cmp_diff, Mux(signed, cmp_lhs, cmp_rhs)) val less = Mux(cmp_lhs === cmp_rhs, cmp_diff, Mux(sgned, cmp_lhs, cmp_rhs))
val cmp_out = Mux(min === less, io.lhs, io.rhs) val cmp_out = Mux(min === less, io.lhs, io.rhs)
val alu_out = Wire { UFix(width = io.out.width) }; io.out := Mux(io.cmd === M_XA_ADD, adder_out,
switch (io.cmd) { Mux(io.cmd === M_XA_SWAP, io.rhs,
is (M_XA_ADD) { alu_out <== adder_out } Mux(io.cmd === M_XA_AND, io.lhs & io.rhs,
is (M_XA_SWAP) { alu_out <== io.rhs } Mux(io.cmd === M_XA_OR, io.lhs | io.rhs,
is (M_XA_AND) { alu_out <== io.lhs & io.rhs } /* MIN[U]/MAX[U] */ cmp_out))));
is (M_XA_OR) { alu_out <== io.lhs | io.rhs }
}
alu_out <== cmp_out
io.out := alu_out
} }
class HellaCache(lines: Int) extends Component { class HellaCache(lines: Int) extends Component {
@ -560,9 +550,7 @@ class HellaCache(lines: Int) extends Component {
val p_store_idx = Reg() { Bits() } val p_store_idx = Reg() { Bits() }
val p_store_cmd = Reg() { Bits() } val p_store_cmd = Reg() { Bits() }
val p_store_type = Reg() { Bits() } val p_store_type = Reg() { Bits() }
val r_replay_amo = Reg(resetVal = Bool(false))
val store_data_wide = Wire { Bits(width = MEM_DATA_BITS) }
val store_wmask_wide = Wire { Bits(width = MEM_DATA_BITS) }
val req_store = (io.cpu.req_cmd === M_XWR) val req_store = (io.cpu.req_cmd === M_XWR)
val req_load = (io.cpu.req_cmd === M_XRD) val req_load = (io.cpu.req_cmd === M_XRD)
@ -578,18 +566,11 @@ class HellaCache(lines: Int) extends Component {
val r_req_write = r_req_store || r_req_amo val r_req_write = r_req_store || r_req_amo
val r_req_readwrite = r_req_read || r_req_write val r_req_readwrite = r_req_read || r_req_write
val nack_wb = Wire { Bool() }
val nack_mshr = Wire { Bool() }
val nack_sdq = Wire { Bool() }
// replay unit // replay unit
val replayer = new ReplayUnit val replayer = new ReplayUnit
val replay_amo_val = replayer.io.data_req.valid && replayer.io.data_req.bits.cmd(3).toBool val replay_amo_val = replayer.io.data_req.valid && replayer.io.data_req.bits.cmd(3).toBool
val replay_amo_rdy = replayer.io.data_req.ready
val replay_amo = replay_amo_val && replay_amo_rdy
val r_replay_amo = Reg(replay_amo, resetVal = Bool(false))
when (replay_amo) { when (replay_amo_val) {
r_cpu_req_data <== replayer.io.data_req.bits.data r_cpu_req_data <== replayer.io.data_req.bits.data
} }
when (io.cpu.req_val) { when (io.cpu.req_val) {
@ -645,12 +626,6 @@ class HellaCache(lines: Int) extends Component {
data_arb.io.in(0).bits.wmask := ~UFix(0, MEM_DATA_BITS/8) data_arb.io.in(0).bits.wmask := ~UFix(0, MEM_DATA_BITS/8)
data_arb.io.in(0).bits.data := io.mem.resp_data data_arb.io.in(0).bits.data := io.mem.resp_data
// writeback
val wb_rdy = wb_arb.io.in(1).ready
wb_arb.io.in(1).valid := tag_miss && r_req_readwrite && dirty && !nack_wb
wb_arb.io.in(1).bits.ppn := meta.io.resp.tag
wb_arb.io.in(1).bits.idx := r_cpu_req_idx(indexmsb,indexlsb)
// load hits // load hits
data_arb.io.in(4).bits.offset := io.cpu.req_idx(offsetmsb,ramindexlsb) data_arb.io.in(4).bits.offset := io.cpu.req_idx(offsetmsb,ramindexlsb)
data_arb.io.in(4).bits.idx := io.cpu.req_idx(indexmsb,indexlsb) data_arb.io.in(4).bits.idx := io.cpu.req_idx(indexmsb,indexlsb)
@ -664,22 +639,27 @@ class HellaCache(lines: Int) extends Component {
// we nack new stores if a pending store can't retire for some reason. // we nack new stores if a pending store can't retire for some reason.
// we drain a pending store if the CPU performs a store or a // we drain a pending store if the CPU performs a store or a
// conflictig load, or if the cache is idle, or after a miss. // conflictig load, or if the cache is idle, or after a miss.
val p_store_match = r_cpu_req_val && r_req_read && p_store_valid && (r_cpu_req_idx(indexlsb-1,offsetlsb) === p_store_idx(indexlsb-1,offsetlsb)) val p_store_idx_match = p_store_valid && (r_cpu_req_idx(indexmsb,indexlsb) === p_store_idx(indexmsb,indexlsb))
val p_store_offset_match = (r_cpu_req_idx(indexlsb-1,offsetlsb) === p_store_idx(indexlsb-1,offsetlsb))
val p_store_match = r_cpu_req_val && r_req_read && p_store_idx_match && p_store_offset_match
val drain_store_val = (p_store_valid && (!io.cpu.req_val || !req_read || Reg(tag_miss))) || p_store_match val drain_store_val = (p_store_valid && (!io.cpu.req_val || !req_read || Reg(tag_miss))) || p_store_match
data_arb.io.in(2).bits.offset := p_store_idx(offsetmsb,ramindexlsb) data_arb.io.in(2).bits.offset := p_store_idx(offsetmsb,ramindexlsb)
data_arb.io.in(2).bits.idx := p_store_idx(indexmsb,indexlsb) data_arb.io.in(2).bits.idx := p_store_idx(indexmsb,indexlsb)
data_arb.io.in(2).bits.rw := Bool(true) data_arb.io.in(2).bits.rw := Bool(true)
data_arb.io.in(2).bits.wmask := store_wmask_wide
data_arb.io.in(2).bits.data := store_data_wide
data_arb.io.in(2).valid := drain_store_val data_arb.io.in(2).valid := drain_store_val
val drain_store_rdy = data_arb.io.in(2).ready val drain_store = drain_store_val && data_arb.io.in(2).ready
val drain_store = drain_store_val && drain_store_rdy
val p_store_rdy = !p_store_valid || drain_store val p_store_rdy = !p_store_valid || drain_store
val p_amo = Reg(tag_hit && r_req_amo && drain_store_rdy && !p_store_match || r_replay_amo, resetVal = Bool(false)) val p_amo = Reg(tag_hit && r_req_amo && p_store_rdy && !p_store_match || r_replay_amo, resetVal = Bool(false))
p_store_valid <== !p_store_rdy || (tag_hit && r_req_store) || p_amo p_store_valid <== !p_store_rdy || (tag_hit && r_req_store) || p_amo
// writeback
val wb_rdy = wb_arb.io.in(1).ready && !p_store_idx_match
wb_arb.io.in(1).valid := tag_miss && r_req_readwrite && dirty && !p_store_idx_match
wb_arb.io.in(1).bits.ppn := meta.io.resp.tag
wb_arb.io.in(1).bits.idx := r_cpu_req_idx(indexmsb,indexlsb)
// tag update after a miss or a store to an exclusive clean line. // tag update after a miss or a store to an exclusive clean line.
val clear_valid = tag_miss && r_req_readwrite && meta.io.resp.valid && (!dirty || wb_rdy && !nack_wb) val clear_valid = tag_miss && r_req_readwrite && meta.io.resp.valid && (!dirty || wb_rdy)
val set_dirty = tag_hit && !meta.io.resp.dirty && r_req_write val set_dirty = tag_hit && !meta.io.resp.dirty && r_req_write
meta.io.state_req.valid := clear_valid || set_dirty meta.io.state_req.valid := clear_valid || set_dirty
meta.io.state_req.bits.rw := Bool(true) meta.io.state_req.bits.rw := Bool(true)
@ -691,7 +671,6 @@ class HellaCache(lines: Int) extends Component {
val storegen = new StoreDataGen val storegen = new StoreDataGen
val amoalu = new AMOALU val amoalu = new AMOALU
storegen.io.typ := r_cpu_req_type storegen.io.typ := r_cpu_req_type
storegen.io.addr := r_cpu_req_idx(offsetlsb-1, 0)
storegen.io.din := r_cpu_req_data storegen.io.din := r_cpu_req_data
when (p_amo) { when (p_amo) {
p_store_data <== amoalu.io.out p_store_data <== amoalu.io.out
@ -705,10 +684,9 @@ class HellaCache(lines: Int) extends Component {
// miss handling // miss handling
val mshr = new MSHRFile val mshr = new MSHRFile
mshr.io.req_val := tag_miss && r_req_readwrite && !nack_mshr mshr.io.req_val := tag_miss && r_req_readwrite && (!dirty || wb_rdy) && (!r_req_write || replayer.io.sdq_enq.ready)
mshr.io.req_ppn := io.cpu.req_ppn mshr.io.req_ppn := io.cpu.req_ppn
mshr.io.req_idx := r_cpu_req_idx(indexmsb,indexlsb) mshr.io.req_idx := r_cpu_req_idx(indexmsb,indexlsb)
mshr.io.req_data := p_store_data
mshr.io.req_tag := r_cpu_req_tag mshr.io.req_tag := r_cpu_req_tag
mshr.io.req_offset := r_cpu_req_idx(offsetmsb,0) mshr.io.req_offset := r_cpu_req_idx(offsetmsb,0)
mshr.io.req_cmd := r_cpu_req_cmd mshr.io.req_cmd := r_cpu_req_cmd
@ -719,7 +697,7 @@ class HellaCache(lines: Int) extends Component {
mshr.io.mem_req <> wb.io.refill_req mshr.io.mem_req <> wb.io.refill_req
mshr.io.meta_req <> meta_arb.io.in(1) mshr.io.meta_req <> meta_arb.io.in(1)
mshr.io.replay <> replayer.io.replay mshr.io.replay <> replayer.io.replay
replayer.io.sdq_enq.valid := tag_miss && r_req_write && !nack_sdq replayer.io.sdq_enq.valid := tag_miss && r_req_write && (!dirty || wb_rdy) && mshr.io.req_rdy
replayer.io.sdq_enq.bits := storegen.io.dout replayer.io.sdq_enq.bits := storegen.io.dout
data_arb.io.in(0).bits.idx := mshr.io.mem_resp_idx data_arb.io.in(0).bits.idx := mshr.io.mem_resp_idx
@ -727,13 +705,13 @@ class HellaCache(lines: Int) extends Component {
val replay = replayer.io.data_req.bits val replay = replayer.io.data_req.bits
val stall_replay = r_replay_amo || p_amo || p_store_valid val stall_replay = r_replay_amo || p_amo || p_store_valid
val replay_val = replayer.io.data_req.valid && !stall_replay val replay_val = replayer.io.data_req.valid && !stall_replay
val replay_rdy = data_arb.io.in(1).ready
data_arb.io.in(1).bits.offset := replay.offset(offsetmsb,ramindexlsb) data_arb.io.in(1).bits.offset := replay.offset(offsetmsb,ramindexlsb)
data_arb.io.in(1).bits.idx := replay.idx data_arb.io.in(1).bits.idx := replay.idx
data_arb.io.in(1).bits.rw := replay.cmd === M_XWR data_arb.io.in(1).bits.rw := replay.cmd === M_XWR
data_arb.io.in(1).bits.wmask := store_wmask_wide
data_arb.io.in(1).bits.data := store_data_wide
data_arb.io.in(1).valid := replay_val data_arb.io.in(1).valid := replay_val
replayer.io.data_req.ready := data_arb.io.in(1).ready && !stall_replay replayer.io.data_req.ready := replay_rdy && !stall_replay
r_replay_amo <== replay_amo_val && replay_rdy && !stall_replay
// store write mask generation. // store write mask generation.
// assumes store replays are higher-priority than pending stores. // assumes store replays are higher-priority than pending stores.
@ -741,14 +719,18 @@ class HellaCache(lines: Int) extends Component {
val store_offset = Mux(!replay_val, p_store_idx(offsetmsb,0), replay.offset) val store_offset = Mux(!replay_val, p_store_idx(offsetmsb,0), replay.offset)
maskgen.io.typ := Mux(!replay_val, p_store_type, replay.typ) maskgen.io.typ := Mux(!replay_val, p_store_type, replay.typ)
maskgen.io.addr := store_offset(offsetlsb-1,0) maskgen.io.addr := store_offset(offsetlsb-1,0)
store_wmask_wide <== maskgen.io.wmask << Cat(store_offset(ramindexlsb-1,offsetlsb), Bits(0, log2up(CPU_DATA_BITS/8))).toUFix val store_wmask_wide = maskgen.io.wmask << Cat(store_offset(ramindexlsb-1,offsetlsb), Bits(0, log2up(CPU_DATA_BITS/8))).toUFix
val store_data = Mux(!replay_val, p_store_data, replay.data) val store_data = Mux(!replay_val, p_store_data, replay.data)
store_data_wide <== Fill(MEM_DATA_BITS/CPU_DATA_BITS, store_data) val store_data_wide = Fill(MEM_DATA_BITS/CPU_DATA_BITS, store_data)
data_arb.io.in(1).bits.data := store_data_wide
data_arb.io.in(1).bits.wmask := store_wmask_wide
data_arb.io.in(2).bits.data := store_data_wide
data_arb.io.in(2).bits.wmask := store_wmask_wide
// load data subword mux/sign extension. // load data subword mux/sign extension.
// subword loads are delayed by one cycle. // subword loads are delayed by one cycle.
val loadgen = new LoadDataGen val loadgen = new LoadDataGen
val loadgen_use_replay = Reg(replayer.io.data_req.valid && replayer.io.data_req.ready) val loadgen_use_replay = Reg(replay_val && replay_rdy)
loadgen.io.typ := Mux(loadgen_use_replay, Reg(replay.typ), r_cpu_req_type) loadgen.io.typ := Mux(loadgen_use_replay, Reg(replay.typ), r_cpu_req_type)
loadgen.io.addr := Mux(loadgen_use_replay, Reg(replay.offset), r_cpu_req_idx)(ramindexlsb-1,0) loadgen.io.addr := Mux(loadgen_use_replay, Reg(replay.offset), r_cpu_req_idx)(ramindexlsb-1,0)
loadgen.io.din := data.io.resp loadgen.io.din := data.io.resp
@ -758,22 +740,14 @@ class HellaCache(lines: Int) extends Component {
amoalu.io.lhs := loadgen.io.r_dout.toUFix amoalu.io.lhs := loadgen.io.r_dout.toUFix
amoalu.io.rhs := p_store_data.toUFix amoalu.io.rhs := p_store_data.toUFix
early_nack <== early_tag_nack || early_load_nack || r_cpu_req_val && r_req_amo || replay_amo || r_replay_amo early_nack <== early_tag_nack || early_load_nack || r_cpu_req_val && r_req_amo || replay_amo_val || r_replay_amo
val nack_miss_wb = dirty && !wb_rdy
val nack_miss_mshr = !mshr.io.req_rdy
val nack_miss_sdq = r_req_write && !replayer.io.sdq_enq.ready
nack_wb <== nack_miss_mshr || nack_miss_sdq || !p_store_rdy || p_store_match
nack_mshr <== nack_miss_wb || nack_miss_sdq || !p_store_rdy || p_store_match
nack_sdq <== nack_miss_wb || nack_miss_mshr || !p_store_rdy || p_store_match
// reset and flush unit // reset and flush unit
val flusher = new FlushUnit(lines) val flusher = new FlushUnit(lines)
val flushed = Reg(resetVal = Bool(true)) val flushed = Reg(resetVal = Bool(true))
val fence_rdy = mshr.io.fence_rdy && wb_rdy && p_store_rdy val flush_rdy = mshr.io.fence_rdy && wb_rdy && !p_store_valid
flushed <== flushed && !r_cpu_req_val || flusher.io.req.valid && flusher.io.req.ready flushed <== flushed && !r_cpu_req_val || r_cpu_req_val && r_req_flush && flush_rdy && flusher.io.req.ready
flusher.io.req.valid := r_cpu_req_val && r_req_flush && fence_rdy && !flushed flusher.io.req.valid := r_cpu_req_val && r_req_flush && flush_rdy && !flushed
flusher.io.wb_req <> wb_arb.io.in(0) flusher.io.wb_req <> wb_arb.io.in(0)
flusher.io.meta_req <> meta_arb.io.in(0) flusher.io.meta_req <> meta_arb.io.in(0)
flusher.io.meta_resp <> meta.io.resp flusher.io.meta_resp <> meta.io.resp
@ -782,19 +756,17 @@ class HellaCache(lines: Int) extends Component {
// we usually nack rather than reporting that the cache is not ready. // we usually nack rather than reporting that the cache is not ready.
// fences and flushes are the exceptions. // fences and flushes are the exceptions.
val pending_fence = Reg(resetVal = Bool(false)) val pending_fence = Reg(resetVal = Bool(false))
pending_fence <== (r_cpu_req_val && r_req_fence || pending_fence) && !fence_rdy pending_fence <== (r_cpu_req_val && r_req_fence || pending_fence) && !flush_rdy
val nack = p_store_match || val nack_hit = p_store_match || r_req_write && !p_store_rdy
early_nack || val nack_miss = dirty && !wb_rdy || !mshr.io.req_rdy || r_req_write && !replayer.io.sdq_enq.ready
!fence_rdy && (r_req_fence || r_req_flush) || val nack_flush = !flush_rdy && (r_req_fence || r_req_flush) ||
!p_store_rdy && r_req_write || !flushed && r_req_flush
!flushed && r_req_flush || val nack = early_nack || r_req_readwrite && Mux(tag_match, nack_hit, nack_miss) || nack_flush
tag_miss && r_req_readwrite && (nack_miss_wb || nack_miss_mshr || nack_miss_sdq || !p_store_rdy) ||
!flusher.io.req.ready
io.cpu.req_rdy := flusher.io.req.ready && !pending_fence io.cpu.req_rdy := flusher.io.req.ready && !(r_cpu_req_val_ && r_req_flush) && !pending_fence
io.cpu.resp_nack := r_cpu_req_val_ && !io.cpu.req_kill && nack io.cpu.resp_nack := r_cpu_req_val_ && !io.cpu.req_kill && nack
io.cpu.resp_val := (tag_hit && !nack && r_req_read) || replayer.io.cpu_resp_val io.cpu.resp_val := (tag_hit && !nack_hit && r_req_read) || replayer.io.cpu_resp_val
io.cpu.resp_miss := tag_miss && !nack && r_req_read io.cpu.resp_miss := tag_miss && !nack_miss && r_req_read
io.cpu.resp_tag := Mux(replayer.io.cpu_resp_val, replayer.io.cpu_resp_tag, r_cpu_req_tag) io.cpu.resp_tag := Mux(replayer.io.cpu_resp_val, replayer.io.cpu_resp_tag, r_cpu_req_tag)
io.cpu.resp_data := loadgen.io.dout io.cpu.resp_data := loadgen.io.dout
io.cpu.resp_data_subword := loadgen.io.r_dout_subword io.cpu.resp_data_subword := loadgen.io.r_dout_subword

View File

@ -64,7 +64,6 @@ class ioArbiter[T <: Data](n: Int)(data: => T) extends Bundle {
class Arbiter[T <: Data](n: Int)(data: => T) extends Component { class Arbiter[T <: Data](n: Int)(data: => T) extends Component {
val io = new ioArbiter(n)(data) val io = new ioArbiter(n)(data)
val vout = Wire { Bool() }
io.in(0).ready := io.out.ready io.in(0).ready := io.out.ready
for (i <- 1 to n-1) { for (i <- 1 to n-1) {
@ -75,10 +74,9 @@ class Arbiter[T <: Data](n: Int)(data: => T) extends Component {
for (i <- 1 to n-1) for (i <- 1 to n-1)
dout = Mux(io.in(n-1-i).valid, io.in(n-1-i).bits, dout) dout = Mux(io.in(n-1-i).valid, io.in(n-1-i).bits, dout)
for (i <- 0 to n-2) { var vout = io.in(0).valid
when (io.in(i).valid) { vout <== Bool(true) } for (i <- 1 to n-1)
} vout = vout || io.in(i).valid
vout <== io.in(n-1).valid
vout ^^ io.out.valid vout ^^ io.out.valid
dout ^^ io.out.bits dout ^^ io.out.bits