2016-11-28 01:16:37 +01:00
|
|
|
// See LICENSE.Berkeley for license details.
|
|
|
|
// See LICENSE.SiFive for license details.
|
2014-09-13 03:06:41 +02:00
|
|
|
|
2017-07-07 19:48:16 +02:00
|
|
|
package freechips.rocketchip.rocket
|
2011-12-10 04:42:58 +01:00
|
|
|
|
|
|
|
import Chisel._
|
2016-11-19 04:01:36 +01:00
|
|
|
import Chisel.ImplicitConversions._
|
2017-07-07 19:48:16 +02:00
|
|
|
import freechips.rocketchip.config.Parameters
|
|
|
|
import freechips.rocketchip.diplomacy._
|
|
|
|
import freechips.rocketchip.tilelink._
|
|
|
|
import freechips.rocketchip.util._
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
trait HasMissInfo extends HasL1HellaCacheParameters {
|
2015-02-02 05:04:13 +01:00
|
|
|
val tag_match = Bool()
|
|
|
|
val old_meta = new L1Metadata
|
|
|
|
val way_en = Bits(width = nWays)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class L1DataReadReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
|
2014-08-12 03:36:23 +02:00
|
|
|
val way_en = Bits(width = nWays)
|
|
|
|
val addr = Bits(width = untagBits)
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class L1DataWriteReq(implicit p: Parameters) extends L1DataReadReq()(p) {
|
2014-08-12 03:36:23 +02:00
|
|
|
val wmask = Bits(width = rowWords)
|
|
|
|
val data = Bits(width = encRowBits)
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class L1RefillReq(implicit p: Parameters) extends L1DataReadReq()(p)
|
2015-03-27 07:26:03 +01:00
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class Replay(implicit p: Parameters) extends HellaCacheReqInternal()(p) with HasCoreData
|
2016-11-19 04:01:36 +01:00
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
class ReplayInternal(implicit p: Parameters) extends HellaCacheReqInternal()(p)
|
|
|
|
with HasL1HellaCacheParameters {
|
2016-08-18 01:53:39 +02:00
|
|
|
val sdq_id = UInt(width = log2Up(cfg.nSDQ))
|
|
|
|
}
|
2014-04-24 01:23:51 +02:00
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class MSHRReq(implicit p: Parameters) extends Replay()(p) with HasMissInfo
|
2016-11-19 04:01:36 +01:00
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
class MSHRReqInternal(implicit p: Parameters) extends ReplayInternal()(p) with HasMissInfo
|
2015-02-02 05:04:13 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
class WritebackReq(params: TLBundleParameters)(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
|
|
|
|
val tag = Bits(width = tagBits)
|
|
|
|
val idx = Bits(width = idxBits)
|
|
|
|
val source = UInt(width = params.sourceBits)
|
|
|
|
val param = UInt(width = TLPermissions.cWidth)
|
2014-08-12 03:36:23 +02:00
|
|
|
val way_en = Bits(width = nWays)
|
2016-11-19 04:01:36 +01:00
|
|
|
val voluntary = Bool()
|
2016-11-19 02:18:42 +01:00
|
|
|
|
|
|
|
override def cloneType = new WritebackReq(params)(p).asInstanceOf[this.type]
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:38:55 +01:00
|
|
|
class IOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
|
2015-08-05 20:01:01 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val req = Decoupled(new HellaCacheReq).flip
|
|
|
|
val resp = Decoupled(new HellaCacheResp)
|
2016-11-19 04:01:36 +01:00
|
|
|
val mem_access = Decoupled(new TLBundleA(edge.bundle))
|
|
|
|
val mem_ack = Valid(new TLBundleD(edge.bundle)).flip
|
2016-05-14 02:54:23 +02:00
|
|
|
val replay_next = Bool(OUTPUT)
|
2015-08-05 20:01:01 +02:00
|
|
|
}
|
|
|
|
|
2016-07-15 06:42:12 +02:00
|
|
|
def beatOffset(addr: UInt) = addr.extract(beatOffBits - 1, wordOffBits)
|
2016-05-26 06:47:48 +02:00
|
|
|
|
2015-08-05 20:01:01 +02:00
|
|
|
def wordFromBeat(addr: UInt, dat: UInt) = {
|
2016-05-26 06:47:48 +02:00
|
|
|
val shift = Cat(beatOffset(addr), UInt(0, wordOffBits + log2Up(wordBytes)))
|
2015-08-05 20:01:01 +02:00
|
|
|
(dat >> shift)(wordBits - 1, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
val req = Reg(new HellaCacheReq)
|
|
|
|
val grant_word = Reg(UInt(width = wordBits))
|
2016-04-02 00:34:40 +02:00
|
|
|
|
2016-11-19 02:18:42 +01:00
|
|
|
val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(Bits(), 4)
|
2016-04-27 00:30:24 +02:00
|
|
|
val state = Reg(init = s_idle)
|
|
|
|
io.req.ready := (state === s_idle)
|
|
|
|
|
2017-05-02 12:04:41 +02:00
|
|
|
val loadgen = new LoadGen(req.typ, mtSigned(req.typ), req.addr, grant_word, false.B, wordBytes)
|
2016-11-19 04:01:36 +01:00
|
|
|
|
|
|
|
val a_source = UInt(id)
|
|
|
|
val a_address = req.addr
|
2017-05-02 12:04:41 +02:00
|
|
|
val a_size = mtSize(req.typ)
|
|
|
|
val a_data = Fill(beatWords, req.data)
|
2016-11-19 04:01:36 +01:00
|
|
|
|
|
|
|
val get = edge.Get(a_source, a_address, a_size)._2
|
|
|
|
val put = edge.Put(a_source, a_address, a_size, a_data)._2
|
|
|
|
val atomics = if (edge.manager.anySupportLogical) {
|
|
|
|
MuxLookup(req.cmd, Wire(new TLBundleA(edge.bundle)), Array(
|
|
|
|
M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2,
|
|
|
|
M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2,
|
|
|
|
M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2,
|
|
|
|
M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2,
|
|
|
|
M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2,
|
|
|
|
M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2,
|
|
|
|
M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2,
|
|
|
|
M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2,
|
|
|
|
M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2))
|
|
|
|
} else {
|
|
|
|
// If no managers support atomics, assert fail if processor asks for them
|
2017-05-02 12:04:41 +02:00
|
|
|
assert(state === s_idle || !isAMO(req.cmd))
|
2016-11-19 04:01:36 +01:00
|
|
|
Wire(new TLBundleA(edge.bundle))
|
|
|
|
}
|
2017-05-02 12:04:41 +02:00
|
|
|
assert(state === s_idle || req.cmd =/= M_XSC)
|
2015-08-05 20:01:01 +02:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
io.mem_access.valid := (state === s_mem_access)
|
|
|
|
io.mem_access.bits := Mux(isAMO(req.cmd), atomics, Mux(isRead(req.cmd), get, put))
|
|
|
|
|
|
|
|
io.replay_next := (state === s_mem_ack) || io.resp.valid && !io.resp.ready
|
2015-08-05 20:01:01 +02:00
|
|
|
io.resp.valid := (state === s_resp)
|
|
|
|
io.resp.bits := req
|
|
|
|
io.resp.bits.has_data := isRead(req.cmd)
|
2017-05-02 12:04:41 +02:00
|
|
|
io.resp.bits.data := loadgen.data
|
2015-08-05 20:01:01 +02:00
|
|
|
io.resp.bits.store_data := req.data
|
2016-04-02 04:30:39 +02:00
|
|
|
io.resp.bits.replay := Bool(true)
|
2015-08-05 20:01:01 +02:00
|
|
|
|
|
|
|
when (io.req.fire()) {
|
|
|
|
req := io.req.bits
|
2016-11-19 04:01:36 +01:00
|
|
|
state := s_mem_access
|
2015-08-05 20:01:01 +02:00
|
|
|
}
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
when (io.mem_access.fire()) {
|
|
|
|
state := s_mem_ack
|
2015-08-05 20:01:01 +02:00
|
|
|
}
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
when (state === s_mem_ack && io.mem_ack.valid) {
|
2016-05-01 07:20:29 +02:00
|
|
|
state := s_resp
|
2015-08-05 20:01:01 +02:00
|
|
|
when (isRead(req.cmd)) {
|
2016-11-19 04:01:36 +01:00
|
|
|
grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data)
|
2015-08-05 20:01:01 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
when (io.resp.fire()) {
|
|
|
|
state := s_idle
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
class MSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
|
2011-12-10 04:42:58 +01:00
|
|
|
val io = new Bundle {
|
2012-01-18 19:28:48 +01:00
|
|
|
val req_pri_val = Bool(INPUT)
|
|
|
|
val req_pri_rdy = Bool(OUTPUT)
|
|
|
|
val req_sec_val = Bool(INPUT)
|
|
|
|
val req_sec_rdy = Bool(OUTPUT)
|
2017-02-09 22:59:09 +01:00
|
|
|
val req_bits = new MSHRReqInternal().asInput
|
2012-01-18 19:28:48 +01:00
|
|
|
|
2013-03-01 03:11:40 +01:00
|
|
|
val idx_match = Bool(OUTPUT)
|
2014-08-12 03:36:23 +02:00
|
|
|
val tag = Bits(OUTPUT, tagBits)
|
2012-01-18 19:28:48 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
|
|
|
|
val mem_grant = Valid(new TLBundleD(edge.bundle)).flip
|
|
|
|
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
|
|
|
|
|
2015-03-27 07:26:03 +01:00
|
|
|
val refill = new L1RefillReq().asOutput // Data is bypassed
|
2014-04-24 00:43:31 +02:00
|
|
|
val meta_read = Decoupled(new L1MetaReadReq)
|
2014-05-28 22:30:48 +02:00
|
|
|
val meta_write = Decoupled(new L1MetaWriteReq)
|
2017-02-09 22:59:09 +01:00
|
|
|
val replay = Decoupled(new ReplayInternal)
|
2016-11-19 04:01:36 +01:00
|
|
|
val wb_req = Decoupled(new WritebackReq(edge.bundle))
|
2013-04-08 04:27:21 +02:00
|
|
|
val probe_rdy = Bool(OUTPUT)
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2013-09-10 19:51:35 +02:00
|
|
|
val s_invalid :: s_wb_req :: s_wb_resp :: s_meta_clear :: s_refill_req :: s_refill_resp :: s_meta_write_req :: s_meta_write_resp :: s_drain_rpq :: Nil = Enum(UInt(), 9)
|
2013-08-16 00:28:15 +02:00
|
|
|
val state = Reg(init=s_invalid)
|
2012-03-09 11:55:46 +01:00
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
val req = Reg(new MSHRReqInternal)
|
2014-08-12 03:36:23 +02:00
|
|
|
val req_idx = req.addr(untagBits-1,blockOffBits)
|
2016-11-19 04:01:36 +01:00
|
|
|
val req_tag = req.addr >> untagBits
|
|
|
|
val req_block_addr = (req.addr >> blockOffBits) << blockOffBits
|
2014-08-12 03:36:23 +02:00
|
|
|
val idx_match = req_idx === io.req_bits.addr(untagBits-1,blockOffBits)
|
2016-11-23 21:02:23 +01:00
|
|
|
|
|
|
|
val new_coh = Reg(init=ClientMetadata.onReset)
|
|
|
|
val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH)
|
|
|
|
val grow_param = new_coh.onAccess(req.cmd)._2
|
|
|
|
val coh_on_grant = new_coh.onGrant(req.cmd, io.mem_grant.bits.param)
|
2015-03-01 02:00:05 +01:00
|
|
|
// We only accept secondary misses if we haven't yet sent an Acquire to outer memory
|
|
|
|
// or if the Acquire that was sent will obtain a Grant with sufficient permissions
|
|
|
|
// to let us replay this new request. I.e. we don't handle multiple outstanding
|
|
|
|
// Acquires on the same block for now.
|
2016-11-23 21:02:23 +01:00
|
|
|
val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) =
|
|
|
|
new_coh.onSecondaryAccess(req.cmd, io.req_bits.cmd)
|
|
|
|
|
2016-07-27 02:20:20 +02:00
|
|
|
val states_before_refill = Seq(s_wb_req, s_wb_resp, s_meta_clear)
|
2016-11-19 04:01:36 +01:00
|
|
|
val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant)
|
2016-07-27 02:20:20 +02:00
|
|
|
val sec_rdy = idx_match &&
|
2016-08-19 18:46:43 +02:00
|
|
|
(state.isOneOf(states_before_refill) ||
|
|
|
|
(state.isOneOf(s_refill_req, s_refill_resp) &&
|
2016-07-27 02:20:20 +02:00
|
|
|
!cmd_requires_second_acquire && !refill_done))
|
2013-03-01 03:11:40 +01:00
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
val rpq = Module(new Queue(new ReplayInternal, cfg.nRPQ))
|
2015-03-01 02:00:05 +01:00
|
|
|
rpq.io.enq.valid := (io.req_pri_val && io.req_pri_rdy || io.req_sec_val && sec_rdy) && !isPrefetch(io.req_bits.cmd)
|
2012-03-02 05:20:15 +01:00
|
|
|
rpq.io.enq.bits := io.req_bits
|
2016-08-19 18:46:43 +02:00
|
|
|
rpq.io.deq.ready := (io.replay.ready && state === s_drain_rpq) || state === s_invalid
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2013-01-25 02:17:12 +01:00
|
|
|
when (state === s_drain_rpq && !rpq.io.deq.valid) {
|
2012-03-09 11:55:46 +01:00
|
|
|
state := s_invalid
|
2012-03-07 10:26:35 +01:00
|
|
|
}
|
2012-12-08 00:14:20 +01:00
|
|
|
when (state === s_meta_write_resp) {
|
|
|
|
// this wait state allows us to catch RAW hazards on the tags via nack_victim
|
2012-11-16 11:39:33 +01:00
|
|
|
state := s_drain_rpq
|
|
|
|
}
|
2012-12-08 00:14:20 +01:00
|
|
|
when (state === s_meta_write_req && io.meta_write.ready) {
|
|
|
|
state := s_meta_write_resp
|
|
|
|
}
|
2016-04-02 00:34:40 +02:00
|
|
|
when (state === s_refill_resp && refill_done) {
|
2016-11-23 21:02:23 +01:00
|
|
|
new_coh := coh_on_grant
|
2016-04-02 00:34:40 +02:00
|
|
|
state := s_meta_write_req
|
2012-03-07 10:26:35 +01:00
|
|
|
}
|
2016-11-19 04:01:36 +01:00
|
|
|
when (io.mem_acquire.fire()) { // s_refill_req
|
2013-04-06 10:03:37 +02:00
|
|
|
state := s_refill_resp
|
2012-03-07 10:26:35 +01:00
|
|
|
}
|
2012-11-20 13:09:26 +01:00
|
|
|
when (state === s_meta_clear && io.meta_write.ready) {
|
2012-04-13 06:57:37 +02:00
|
|
|
state := s_refill_req
|
|
|
|
}
|
2015-03-27 07:26:03 +01:00
|
|
|
when (state === s_wb_resp && io.mem_grant.valid) {
|
2013-04-06 10:03:37 +02:00
|
|
|
state := s_meta_clear
|
2012-03-07 10:26:35 +01:00
|
|
|
}
|
2013-04-06 10:03:37 +02:00
|
|
|
when (io.wb_req.fire()) { // s_wb_req
|
2016-11-19 04:01:36 +01:00
|
|
|
state := s_wb_resp
|
2012-03-09 11:55:46 +01:00
|
|
|
}
|
2012-03-10 05:01:47 +01:00
|
|
|
when (io.req_sec_val && io.req_sec_rdy) { // s_wb_req, s_wb_resp, s_refill_req
|
2015-03-01 02:00:05 +01:00
|
|
|
//If we get a secondary miss that needs more permissions before we've sent
|
|
|
|
// out the primary miss's Acquire, we can upgrade the permissions we're
|
|
|
|
// going to ask for in s_refill_req
|
2016-11-23 21:02:23 +01:00
|
|
|
req.cmd := dirtier_cmd
|
|
|
|
when (is_hit_again) {
|
|
|
|
new_coh := dirtier_coh
|
2015-03-01 02:00:05 +01:00
|
|
|
}
|
2012-03-07 10:26:35 +01:00
|
|
|
}
|
2013-01-25 02:17:12 +01:00
|
|
|
when (io.req_pri_val && io.req_pri_rdy) {
|
2012-03-09 11:55:46 +01:00
|
|
|
req := io.req_bits
|
2016-11-23 21:02:23 +01:00
|
|
|
val old_coh = io.req_bits.old_meta.coh
|
|
|
|
val needs_wb = old_coh.onCacheControl(M_FLUSH)._1
|
|
|
|
val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req_bits.cmd)
|
2012-11-20 13:09:26 +01:00
|
|
|
when (io.req_bits.tag_match) {
|
2016-11-19 04:01:36 +01:00
|
|
|
when (is_hit) { // set dirty bit
|
2016-11-23 21:02:23 +01:00
|
|
|
new_coh := coh_on_hit
|
2012-12-08 00:14:20 +01:00
|
|
|
state := s_meta_write_req
|
2012-11-20 13:09:26 +01:00
|
|
|
}.otherwise { // upgrade permissions
|
2016-11-23 21:02:23 +01:00
|
|
|
new_coh := old_coh
|
2012-11-20 13:09:26 +01:00
|
|
|
state := s_refill_req
|
|
|
|
}
|
2013-04-05 00:50:29 +02:00
|
|
|
}.otherwise { // writback if necessary and refill
|
2016-11-23 21:02:23 +01:00
|
|
|
new_coh := ClientMetadata.onReset
|
2016-11-19 04:01:36 +01:00
|
|
|
state := Mux(needs_wb, s_wb_req, s_meta_clear)
|
2012-11-20 13:09:26 +01:00
|
|
|
}
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val grantackq = Module(new Queue(io.mem_finish.bits, 1))
|
2016-08-19 18:46:43 +02:00
|
|
|
val can_finish = state.isOneOf(s_invalid, s_refill_req)
|
2017-03-20 21:27:24 +01:00
|
|
|
grantackq.io.enq.valid := refill_done && edge.isRequest(io.mem_grant.bits)
|
2016-11-19 04:01:36 +01:00
|
|
|
grantackq.io.enq.bits := edge.GrantAck(io.mem_grant.bits)
|
|
|
|
io.mem_finish.valid := grantackq.io.deq.valid && can_finish
|
|
|
|
io.mem_finish.bits := grantackq.io.deq.bits
|
|
|
|
grantackq.io.deq.ready := io.mem_finish.ready && can_finish
|
2016-04-02 00:34:40 +02:00
|
|
|
|
2016-01-14 22:57:45 +01:00
|
|
|
io.idx_match := (state =/= s_invalid) && idx_match
|
2015-03-27 07:26:03 +01:00
|
|
|
io.refill.way_en := req.way_en
|
2016-11-19 04:01:36 +01:00
|
|
|
io.refill.addr := req_block_addr | refill_address_inc
|
|
|
|
io.tag := req_tag
|
2013-04-06 10:03:37 +02:00
|
|
|
io.req_pri_rdy := state === s_invalid
|
2011-12-10 04:42:58 +01:00
|
|
|
io.req_sec_rdy := sec_rdy && rpq.io.enq.ready
|
2013-04-30 09:37:51 +02:00
|
|
|
|
2013-08-16 00:28:15 +02:00
|
|
|
val meta_hazard = Reg(init=UInt(0,2))
|
2016-01-14 22:57:45 +01:00
|
|
|
when (meta_hazard =/= UInt(0)) { meta_hazard := meta_hazard + 1 }
|
2013-05-02 01:35:24 +02:00
|
|
|
when (io.meta_write.fire()) { meta_hazard := 1 }
|
2016-08-19 18:46:43 +02:00
|
|
|
io.probe_rdy := !idx_match || (!state.isOneOf(states_before_refill) && meta_hazard === 0)
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2016-08-19 18:46:43 +02:00
|
|
|
io.meta_write.valid := state.isOneOf(s_meta_write_req, s_meta_clear)
|
2012-11-20 13:09:26 +01:00
|
|
|
io.meta_write.bits.idx := req_idx
|
2016-11-23 21:02:23 +01:00
|
|
|
io.meta_write.bits.data.coh := Mux(state === s_meta_clear, coh_on_clear, new_coh)
|
2012-11-20 13:09:26 +01:00
|
|
|
io.meta_write.bits.data.tag := io.tag
|
|
|
|
io.meta_write.bits.way_en := req.way_en
|
2012-03-09 11:55:46 +01:00
|
|
|
|
2015-03-27 07:26:03 +01:00
|
|
|
io.wb_req.valid := state === s_wb_req
|
2016-11-19 04:01:36 +01:00
|
|
|
io.wb_req.bits.source := UInt(id)
|
|
|
|
io.wb_req.bits.tag := req.old_meta.tag
|
|
|
|
io.wb_req.bits.idx := req_idx
|
|
|
|
io.wb_req.bits.param := shrink_param
|
2012-11-16 11:39:33 +01:00
|
|
|
io.wb_req.bits.way_en := req.way_en
|
2016-11-19 04:01:36 +01:00
|
|
|
io.wb_req.bits.voluntary := Bool(true)
|
2012-03-09 11:55:46 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
io.mem_acquire.valid := state === s_refill_req && grantackq.io.enq.ready
|
2017-10-05 21:49:49 +02:00
|
|
|
io.mem_acquire.bits := edge.AcquireBlock(
|
2016-11-19 02:18:42 +01:00
|
|
|
fromSource = UInt(id),
|
|
|
|
toAddress = Cat(io.tag, req_idx) << blockOffBits,
|
|
|
|
lgSize = lgCacheBlockBytes,
|
|
|
|
growPermissions = grow_param)._2
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2012-11-20 13:09:26 +01:00
|
|
|
io.meta_read.valid := state === s_drain_rpq
|
2014-04-24 00:43:31 +02:00
|
|
|
io.meta_read.bits.idx := req_idx
|
|
|
|
io.meta_read.bits.tag := io.tag
|
2012-11-20 13:09:26 +01:00
|
|
|
|
2012-11-16 11:39:33 +01:00
|
|
|
io.replay.valid := state === s_drain_rpq && rpq.io.deq.valid
|
|
|
|
io.replay.bits := rpq.io.deq.bits
|
|
|
|
io.replay.bits.phys := Bool(true)
|
2016-08-01 02:13:52 +02:00
|
|
|
io.replay.bits.addr := Cat(io.tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0))
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2012-11-20 13:09:26 +01:00
|
|
|
when (!io.meta_read.ready) {
|
2012-11-16 11:39:33 +01:00
|
|
|
rpq.io.deq.ready := Bool(false)
|
2016-06-01 04:27:28 +02:00
|
|
|
io.replay.bits.cmd := M_FLUSH_ALL /* nop */
|
2012-11-16 11:39:33 +01:00
|
|
|
}
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2017-02-09 22:59:09 +01:00
|
|
|
class MSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
|
2011-12-10 04:42:58 +01:00
|
|
|
val io = new Bundle {
|
2013-08-12 19:39:11 +02:00
|
|
|
val req = Decoupled(new MSHRReq).flip
|
2015-08-05 20:01:01 +02:00
|
|
|
val resp = Decoupled(new HellaCacheResp)
|
2012-03-11 00:50:10 +01:00
|
|
|
val secondary_miss = Bool(OUTPUT)
|
2012-01-18 19:28:48 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val mem_acquire = Decoupled(new TLBundleA(edge.bundle))
|
|
|
|
val mem_grant = Valid(new TLBundleD(edge.bundle)).flip
|
|
|
|
val mem_finish = Decoupled(new TLBundleE(edge.bundle))
|
|
|
|
|
2015-03-27 07:26:03 +01:00
|
|
|
val refill = new L1RefillReq().asOutput
|
2014-04-24 00:43:31 +02:00
|
|
|
val meta_read = Decoupled(new L1MetaReadReq)
|
2014-05-28 22:30:48 +02:00
|
|
|
val meta_write = Decoupled(new L1MetaWriteReq)
|
2013-08-12 19:39:11 +02:00
|
|
|
val replay = Decoupled(new Replay)
|
2016-11-19 04:01:36 +01:00
|
|
|
val wb_req = Decoupled(new WritebackReq(edge.bundle))
|
2012-03-02 04:30:56 +01:00
|
|
|
|
2013-04-08 04:27:21 +02:00
|
|
|
val probe_rdy = Bool(OUTPUT)
|
2012-11-16 11:39:33 +01:00
|
|
|
val fence_rdy = Bool(OUTPUT)
|
2016-05-14 02:54:23 +02:00
|
|
|
val replay_next = Bool(OUTPUT)
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2016-04-22 00:34:28 +02:00
|
|
|
// determine if the request is cacheable or not
|
2017-01-18 03:52:16 +01:00
|
|
|
val cacheable = edge.manager.supportsAcquireBFast(io.req.bits.addr, lgCacheBlockBytes)
|
2015-08-05 20:01:01 +02:00
|
|
|
|
2016-08-18 01:53:39 +02:00
|
|
|
val sdq_val = Reg(init=Bits(0, cfg.nSDQ))
|
|
|
|
val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0))
|
2012-03-02 04:30:56 +01:00
|
|
|
val sdq_rdy = !sdq_val.andR
|
2015-08-05 20:01:01 +02:00
|
|
|
val sdq_enq = io.req.valid && io.req.ready && cacheable && isWrite(io.req.bits.cmd)
|
2016-08-18 01:53:39 +02:00
|
|
|
val sdq = Mem(cfg.nSDQ, io.req.bits.data)
|
2012-06-06 11:47:22 +02:00
|
|
|
when (sdq_enq) { sdq(sdq_alloc_id) := io.req.bits.data }
|
2012-03-02 04:30:56 +01:00
|
|
|
|
2016-08-18 01:53:39 +02:00
|
|
|
val idxMatch = Wire(Vec(cfg.nMSHRs, Bool()))
|
|
|
|
val tagList = Wire(Vec(cfg.nMSHRs, Bits(width = tagBits)))
|
2014-08-12 03:36:23 +02:00
|
|
|
val tag_match = Mux1H(idxMatch, tagList) === io.req.bits.addr >> untagBits
|
2013-04-08 04:27:21 +02:00
|
|
|
|
2016-08-18 01:53:39 +02:00
|
|
|
val wbTagList = Wire(Vec(cfg.nMSHRs, Bits()))
|
|
|
|
val refillMux = Wire(Vec(cfg.nMSHRs, new L1RefillReq))
|
|
|
|
val meta_read_arb = Module(new Arbiter(new L1MetaReadReq, cfg.nMSHRs))
|
|
|
|
val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq, cfg.nMSHRs))
|
2016-11-19 04:01:36 +01:00
|
|
|
val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs))
|
2017-02-09 22:59:09 +01:00
|
|
|
val replay_arb = Module(new Arbiter(new ReplayInternal, cfg.nMSHRs))
|
2016-08-18 01:53:39 +02:00
|
|
|
val alloc_arb = Module(new Arbiter(Bool(), cfg.nMSHRs))
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2011-12-21 07:08:27 +01:00
|
|
|
var idx_match = Bool(false)
|
|
|
|
var pri_rdy = Bool(false)
|
|
|
|
var sec_rdy = Bool(false)
|
2013-04-08 04:27:21 +02:00
|
|
|
|
|
|
|
io.fence_rdy := true
|
|
|
|
io.probe_rdy := true
|
2011-12-21 07:08:27 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val mshrs = (0 until cfg.nMSHRs) map { i =>
|
2016-12-13 02:38:55 +01:00
|
|
|
val mshr = Module(new MSHR(i))
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2012-10-12 01:48:51 +02:00
|
|
|
idxMatch(i) := mshr.io.idx_match
|
|
|
|
tagList(i) := mshr.io.tag
|
2016-11-19 04:01:36 +01:00
|
|
|
wbTagList(i) := mshr.io.wb_req.bits.tag
|
2011-12-10 04:42:58 +01:00
|
|
|
|
|
|
|
alloc_arb.io.in(i).valid := mshr.io.req_pri_rdy
|
2011-12-12 15:49:16 +01:00
|
|
|
mshr.io.req_pri_val := alloc_arb.io.in(i).ready
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2012-03-02 05:20:15 +01:00
|
|
|
mshr.io.req_sec_val := io.req.valid && sdq_rdy && tag_match
|
|
|
|
mshr.io.req_bits := io.req.bits
|
2014-11-12 03:18:35 +01:00
|
|
|
mshr.io.req_bits.sdq_id := sdq_alloc_id
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2015-08-02 06:11:25 +02:00
|
|
|
meta_read_arb.io.in(i) <> mshr.io.meta_read
|
|
|
|
meta_write_arb.io.in(i) <> mshr.io.meta_write
|
|
|
|
wb_req_arb.io.in(i) <> mshr.io.wb_req
|
|
|
|
replay_arb.io.in(i) <> mshr.io.replay
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
mshr.io.mem_grant.valid := io.mem_grant.valid && io.mem_grant.bits.source === UInt(i)
|
2015-03-27 07:26:03 +01:00
|
|
|
mshr.io.mem_grant.bits := io.mem_grant.bits
|
|
|
|
refillMux(i) := mshr.io.refill
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2011-12-21 07:08:27 +01:00
|
|
|
pri_rdy = pri_rdy || mshr.io.req_pri_rdy
|
|
|
|
sec_rdy = sec_rdy || mshr.io.req_sec_rdy
|
|
|
|
idx_match = idx_match || mshr.io.idx_match
|
2013-04-08 04:27:21 +02:00
|
|
|
|
|
|
|
when (!mshr.io.req_pri_rdy) { io.fence_rdy := false }
|
|
|
|
when (!mshr.io.probe_rdy) { io.probe_rdy := false }
|
2016-11-19 04:01:36 +01:00
|
|
|
|
|
|
|
mshr
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
2011-12-21 07:08:27 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
|
2015-08-05 20:01:01 +02:00
|
|
|
alloc_arb.io.out.ready := io.req.valid && sdq_rdy && cacheable && !idx_match
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2015-08-02 06:11:25 +02:00
|
|
|
io.meta_read <> meta_read_arb.io.out
|
|
|
|
io.meta_write <> meta_write_arb.io.out
|
|
|
|
io.wb_req <> wb_req_arb.io.out
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2015-08-05 20:01:01 +02:00
|
|
|
val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs))
|
|
|
|
val resp_arb = Module(new Arbiter(new HellaCacheResp, nIOMSHRs))
|
|
|
|
|
|
|
|
var mmio_rdy = Bool(false)
|
2016-05-14 02:54:23 +02:00
|
|
|
io.replay_next := Bool(false)
|
2015-08-05 20:01:01 +02:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val mmios = (0 until nIOMSHRs) map { i =>
|
2016-08-18 01:53:39 +02:00
|
|
|
val id = cfg.nMSHRs + i
|
2016-12-13 02:38:55 +01:00
|
|
|
val mshr = Module(new IOMSHR(id))
|
2015-08-05 20:01:01 +02:00
|
|
|
|
|
|
|
mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready
|
|
|
|
mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready
|
|
|
|
mshr.io.req.bits := io.req.bits
|
|
|
|
|
|
|
|
mmio_rdy = mmio_rdy || mshr.io.req.ready
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
mshr.io.mem_ack.bits := io.mem_grant.bits
|
|
|
|
mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === UInt(id)
|
2015-08-05 20:01:01 +02:00
|
|
|
|
|
|
|
resp_arb.io.in(i) <> mshr.io.resp
|
|
|
|
|
|
|
|
when (!mshr.io.req.ready) { io.fence_rdy := Bool(false) }
|
2016-05-14 02:54:23 +02:00
|
|
|
when (mshr.io.replay_next) { io.replay_next := Bool(true) }
|
2016-11-19 04:01:36 +01:00
|
|
|
|
|
|
|
mshr
|
2015-08-05 20:01:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
mmio_alloc_arb.io.out.ready := io.req.valid && !cacheable
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access))
|
|
|
|
TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish))
|
|
|
|
|
2015-08-05 20:01:01 +02:00
|
|
|
io.resp <> resp_arb.io.out
|
2016-11-19 04:01:36 +01:00
|
|
|
io.req.ready := Mux(!cacheable,
|
|
|
|
mmio_rdy,
|
|
|
|
sdq_rdy && Mux(idx_match, tag_match && sec_rdy, pri_rdy))
|
2012-03-11 00:50:10 +01:00
|
|
|
io.secondary_miss := idx_match
|
2016-11-19 04:01:36 +01:00
|
|
|
io.refill := refillMux(io.mem_grant.bits.source)
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2012-11-16 11:39:33 +01:00
|
|
|
val free_sdq = io.replay.fire() && isWrite(io.replay.bits.cmd)
|
2013-08-14 02:50:02 +02:00
|
|
|
io.replay.bits.data := sdq(RegEnable(replay_arb.io.out.bits.sdq_id, free_sdq))
|
2012-11-16 11:39:33 +01:00
|
|
|
io.replay <> replay_arb.io.out
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2012-11-16 11:39:33 +01:00
|
|
|
when (io.replay.valid || sdq_enq) {
|
2016-08-18 01:53:39 +02:00
|
|
|
sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) |
|
|
|
|
PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq)
|
2012-11-16 11:39:33 +01:00
|
|
|
}
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:38:55 +01:00
|
|
|
class WritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
|
2011-12-10 04:42:58 +01:00
|
|
|
val io = new Bundle {
|
2016-11-19 04:01:36 +01:00
|
|
|
val req = Decoupled(new WritebackReq(edge.bundle)).flip
|
2014-04-24 00:43:31 +02:00
|
|
|
val meta_read = Decoupled(new L1MetaReadReq)
|
2015-02-02 05:04:13 +01:00
|
|
|
val data_req = Decoupled(new L1DataReadReq)
|
2014-08-12 03:36:23 +02:00
|
|
|
val data_resp = Bits(INPUT, encRowBits)
|
2016-11-19 04:01:36 +01:00
|
|
|
val release = Decoupled(new TLBundleC(edge.bundle))
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
val req = Reg(new WritebackReq(edge.bundle))
|
2014-04-08 03:22:46 +02:00
|
|
|
val active = Reg(init=Bool(false))
|
2013-08-16 00:28:15 +02:00
|
|
|
val r1_data_req_fired = Reg(init=Bool(false))
|
|
|
|
val r2_data_req_fired = Reg(init=Bool(false))
|
2015-02-02 05:04:13 +01:00
|
|
|
val data_req_cnt = Reg(init = UInt(0, width = log2Up(refillCycles+1))) //TODO Zero width
|
2016-11-19 04:01:36 +01:00
|
|
|
val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release)
|
2012-11-26 04:46:48 +01:00
|
|
|
|
2014-04-08 03:22:46 +02:00
|
|
|
io.release.valid := false
|
|
|
|
when (active) {
|
2012-11-26 04:46:48 +01:00
|
|
|
r1_data_req_fired := false
|
|
|
|
r2_data_req_fired := r1_data_req_fired
|
2012-12-12 00:58:53 +01:00
|
|
|
when (io.data_req.fire() && io.meta_read.fire()) {
|
2012-11-26 04:46:48 +01:00
|
|
|
r1_data_req_fired := true
|
2015-02-02 05:04:13 +01:00
|
|
|
data_req_cnt := data_req_cnt + 1
|
2012-11-26 04:46:48 +01:00
|
|
|
}
|
2014-12-07 12:09:54 +01:00
|
|
|
when (r2_data_req_fired) {
|
2016-11-19 04:01:36 +01:00
|
|
|
io.release.valid := true
|
|
|
|
when(!io.release.ready) {
|
|
|
|
r1_data_req_fired := false
|
|
|
|
r2_data_req_fired := false
|
|
|
|
data_req_cnt := data_req_cnt - Mux[UInt](Bool(refillCycles > 1) && r1_data_req_fired, 2, 1)
|
2015-05-30 17:25:27 +02:00
|
|
|
}
|
2014-12-07 12:09:54 +01:00
|
|
|
when(!r1_data_req_fired) {
|
2015-02-16 19:59:57 +01:00
|
|
|
// We're done if this is the final data request and the Release can be sent
|
|
|
|
active := data_req_cnt < UInt(refillCycles) || !io.release.ready
|
2014-04-08 03:22:46 +02:00
|
|
|
}
|
2012-11-26 04:46:48 +01:00
|
|
|
}
|
2012-03-14 00:43:35 +01:00
|
|
|
}
|
2012-11-26 04:46:48 +01:00
|
|
|
when (io.req.fire()) {
|
2014-04-08 03:22:46 +02:00
|
|
|
active := true
|
2015-02-02 05:04:13 +01:00
|
|
|
data_req_cnt := 0
|
2012-03-09 11:55:46 +01:00
|
|
|
req := io.req.bits
|
2012-03-06 09:31:44 +01:00
|
|
|
}
|
2011-12-10 04:42:58 +01:00
|
|
|
|
2014-04-08 03:22:46 +02:00
|
|
|
io.req.ready := !active
|
|
|
|
|
2015-02-02 05:04:13 +01:00
|
|
|
val fire = active && data_req_cnt < UInt(refillCycles)
|
|
|
|
|
|
|
|
// We reissue the meta read as it sets up the mux ctrl for s2_data_muxed
|
2014-04-08 03:22:46 +02:00
|
|
|
io.meta_read.valid := fire
|
2016-11-19 04:01:36 +01:00
|
|
|
io.meta_read.bits.idx := req.idx
|
|
|
|
io.meta_read.bits.tag := req.tag
|
2014-04-08 03:22:46 +02:00
|
|
|
|
2012-12-12 00:58:53 +01:00
|
|
|
io.data_req.valid := fire
|
2012-11-16 11:39:33 +01:00
|
|
|
io.data_req.bits.way_en := req.way_en
|
2015-02-02 05:04:13 +01:00
|
|
|
io.data_req.bits.addr := (if(refillCycles > 1)
|
2016-11-19 04:01:36 +01:00
|
|
|
Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0))
|
|
|
|
else req.idx) << rowOffBits
|
|
|
|
|
|
|
|
val r_address = Cat(req.tag, req.idx) << blockOffBits
|
|
|
|
val probeResponse = edge.ProbeAck(
|
|
|
|
fromSource = req.source,
|
|
|
|
toAddress = r_address,
|
|
|
|
lgSize = lgCacheBlockBytes,
|
|
|
|
reportPermissions = req.param,
|
|
|
|
data = io.data_resp)
|
|
|
|
|
|
|
|
val voluntaryRelease = edge.Release(
|
|
|
|
fromSource = req.source,
|
|
|
|
toAddress = r_address,
|
|
|
|
lgSize = lgCacheBlockBytes,
|
|
|
|
shrinkPermissions = req.param,
|
|
|
|
data = io.data_resp)._2
|
|
|
|
|
|
|
|
io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse)
|
2012-03-14 00:43:35 +01:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:38:55 +01:00
|
|
|
class ProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) {
|
2012-03-14 00:43:35 +01:00
|
|
|
val io = new Bundle {
|
2016-11-19 04:01:36 +01:00
|
|
|
val req = Decoupled(new TLBundleB(edge.bundle)).flip
|
|
|
|
val rep = Decoupled(new TLBundleC(edge.bundle))
|
2014-04-24 00:43:31 +02:00
|
|
|
val meta_read = Decoupled(new L1MetaReadReq)
|
2014-05-28 22:30:48 +02:00
|
|
|
val meta_write = Decoupled(new L1MetaWriteReq)
|
2016-11-19 04:01:36 +01:00
|
|
|
val wb_req = Decoupled(new WritebackReq(edge.bundle))
|
2014-08-12 03:36:23 +02:00
|
|
|
val way_en = Bits(INPUT, nWays)
|
2013-04-08 04:27:21 +02:00
|
|
|
val mshr_rdy = Bool(INPUT)
|
2015-03-01 02:00:05 +01:00
|
|
|
val block_state = new ClientMetadata().asInput
|
2012-03-14 00:43:35 +01:00
|
|
|
}
|
|
|
|
|
2015-10-21 08:25:23 +02:00
|
|
|
val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req ::
|
|
|
|
s_mshr_resp :: s_release :: s_writeback_req :: s_writeback_resp ::
|
|
|
|
s_meta_write :: Nil) = Enum(UInt(), 9)
|
2013-08-16 00:28:15 +02:00
|
|
|
val state = Reg(init=s_invalid)
|
2016-11-19 04:01:36 +01:00
|
|
|
|
|
|
|
val req = Reg(new TLBundleB(edge.bundle))
|
|
|
|
val req_idx = req.address(idxMSB, idxLSB)
|
|
|
|
val req_tag = req.address >> untagBits
|
|
|
|
|
2013-08-12 19:39:11 +02:00
|
|
|
val way_en = Reg(Bits())
|
2015-03-01 02:00:05 +01:00
|
|
|
val tag_matches = way_en.orR
|
2016-11-19 04:01:36 +01:00
|
|
|
val old_coh = Reg(new ClientMetadata)
|
2015-10-01 01:58:10 +02:00
|
|
|
val miss_coh = ClientMetadata.onReset
|
|
|
|
val reply_coh = Mux(tag_matches, old_coh, miss_coh)
|
2016-11-19 04:01:36 +01:00
|
|
|
val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param)
|
|
|
|
|
2013-01-25 02:46:11 +01:00
|
|
|
io.req.ready := state === s_invalid
|
2015-10-21 08:25:23 +02:00
|
|
|
io.rep.valid := state === s_release
|
2016-11-19 04:01:36 +01:00
|
|
|
io.rep.bits := edge.ProbeAck(req, report_param)
|
2012-03-14 00:43:35 +01:00
|
|
|
|
2016-11-19 04:01:36 +01:00
|
|
|
assert(!io.rep.valid || !edge.hasData(io.rep.bits),
|
|
|
|
"ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it")
|
2015-10-21 08:25:23 +02:00
|
|
|
|
2012-11-20 13:09:26 +01:00
|
|
|
io.meta_read.valid := state === s_meta_read
|
2016-11-19 04:01:36 +01:00
|
|
|
io.meta_read.bits.idx := req_idx
|
|
|
|
io.meta_read.bits.tag := req_tag
|
2012-03-14 00:43:35 +01:00
|
|
|
|
2012-11-20 13:09:26 +01:00
|
|
|
io.meta_write.valid := state === s_meta_write
|
|
|
|
io.meta_write.bits.way_en := way_en
|
2016-11-19 04:01:36 +01:00
|
|
|
io.meta_write.bits.idx := req_idx
|
|
|
|
io.meta_write.bits.data.tag := req_tag
|
|
|
|
io.meta_write.bits.data.coh := new_coh
|
2012-11-20 13:09:26 +01:00
|
|
|
|
2012-03-14 00:43:35 +01:00
|
|
|
io.wb_req.valid := state === s_writeback_req
|
2016-11-19 04:01:36 +01:00
|
|
|
io.wb_req.bits.source := req.source
|
|
|
|
io.wb_req.bits.idx := req_idx
|
|
|
|
io.wb_req.bits.tag := req_tag
|
|
|
|
io.wb_req.bits.param := report_param
|
2012-11-16 11:39:33 +01:00
|
|
|
io.wb_req.bits.way_en := way_en
|
2016-11-19 04:01:36 +01:00
|
|
|
io.wb_req.bits.voluntary := Bool(false)
|
2015-10-21 08:25:23 +02:00
|
|
|
|
|
|
|
// state === s_invalid
|
|
|
|
when (io.req.fire()) {
|
|
|
|
state := s_meta_read
|
|
|
|
req := io.req.bits
|
|
|
|
}
|
|
|
|
|
|
|
|
// state === s_meta_read
|
|
|
|
when (io.meta_read.fire()) {
|
|
|
|
state := s_meta_resp
|
|
|
|
}
|
|
|
|
|
|
|
|
// we need to wait one cycle for the metadata to be read from the array
|
|
|
|
when (state === s_meta_resp) {
|
|
|
|
state := s_mshr_req
|
|
|
|
}
|
|
|
|
|
|
|
|
when (state === s_mshr_req) {
|
|
|
|
old_coh := io.block_state
|
|
|
|
way_en := io.way_en
|
|
|
|
// if the read didn't go through, we need to retry
|
2016-11-19 04:01:36 +01:00
|
|
|
state := Mux(io.mshr_rdy, s_mshr_resp, s_meta_read)
|
2015-10-21 08:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
when (state === s_mshr_resp) {
|
2016-11-19 04:01:36 +01:00
|
|
|
state := Mux(tag_matches && is_dirty, s_writeback_req, s_release)
|
2015-10-21 08:25:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
when (state === s_release && io.rep.ready) {
|
|
|
|
state := Mux(tag_matches, s_meta_write, s_invalid)
|
|
|
|
}
|
|
|
|
|
|
|
|
// state === s_writeback_req
|
|
|
|
when (io.wb_req.fire()) {
|
|
|
|
state := s_writeback_resp
|
|
|
|
}
|
|
|
|
|
|
|
|
// wait for the writeback request to finish before updating the metadata
|
|
|
|
when (state === s_writeback_resp && io.wb_req.ready) {
|
|
|
|
state := s_meta_write
|
|
|
|
}
|
|
|
|
|
|
|
|
when (io.meta_write.fire()) {
|
|
|
|
state := s_invalid
|
|
|
|
}
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:48:05 +02:00
|
|
|
class DataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
|
2011-12-10 04:42:58 +01:00
|
|
|
val io = new Bundle {
|
2015-02-02 05:04:13 +01:00
|
|
|
val read = Decoupled(new L1DataReadReq).flip
|
|
|
|
val write = Decoupled(new L1DataWriteReq).flip
|
2016-01-14 22:57:45 +01:00
|
|
|
val resp = Vec(nWays, Bits(OUTPUT, encRowBits))
|
2012-01-24 20:41:44 +01:00
|
|
|
}
|
|
|
|
|
2014-08-12 03:36:23 +02:00
|
|
|
val waddr = io.write.bits.addr >> rowOffBits
|
|
|
|
val raddr = io.read.bits.addr >> rowOffBits
|
2012-01-24 20:41:44 +01:00
|
|
|
|
2014-08-12 03:36:23 +02:00
|
|
|
if (doNarrowRead) {
|
|
|
|
for (w <- 0 until nWays by rowWords) {
|
|
|
|
val wway_en = io.write.bits.way_en(w+rowWords-1,w)
|
|
|
|
val rway_en = io.read.bits.way_en(w+rowWords-1,w)
|
2016-01-14 22:57:45 +01:00
|
|
|
val resp = Wire(Vec(rowWords, Bits(width = encRowBits)))
|
2013-08-14 02:50:02 +02:00
|
|
|
val r_raddr = RegEnable(io.read.bits.addr, io.read.valid)
|
2016-08-26 00:49:44 +02:00
|
|
|
for (i <- 0 until resp.size) {
|
2017-09-22 03:02:32 +02:00
|
|
|
val array = SeqMem(nSets*refillCycles, Vec(rowWords, Bits(width=encDataBits)))
|
2016-08-26 00:49:44 +02:00
|
|
|
when (wway_en.orR && io.write.valid && io.write.bits.wmask(i)) {
|
|
|
|
val data = Vec.fill(rowWords)(io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))
|
2017-09-22 03:02:32 +02:00
|
|
|
array.write(waddr, data, wway_en.toBools)
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2017-09-22 03:02:32 +02:00
|
|
|
resp(i) := array.read(raddr, rway_en.orR && io.read.valid).asUInt
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2014-08-12 03:36:23 +02:00
|
|
|
for (dw <- 0 until rowWords) {
|
|
|
|
val r = Vec(resp.map(_(encDataBits*(dw+1)-1,encDataBits*dw)))
|
2012-11-27 05:34:30 +01:00
|
|
|
val resp_mux =
|
|
|
|
if (r.size == 1) r
|
2014-08-12 03:36:23 +02:00
|
|
|
else Vec(r(r_raddr(rowOffBits-1,wordOffBits)), r.tail:_*)
|
2016-08-01 02:13:52 +02:00
|
|
|
io.resp(w+dw) := resp_mux.asUInt
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2012-11-16 11:39:33 +01:00
|
|
|
}
|
2012-11-27 05:34:30 +01:00
|
|
|
} else {
|
2014-08-12 03:36:23 +02:00
|
|
|
for (w <- 0 until nWays) {
|
2017-09-22 03:02:32 +02:00
|
|
|
val array = SeqMem(nSets*refillCycles, Vec(rowWords, Bits(width=encDataBits)))
|
2012-11-27 05:34:30 +01:00
|
|
|
when (io.write.bits.way_en(w) && io.write.valid) {
|
2015-09-12 00:43:07 +02:00
|
|
|
val data = Vec.tabulate(rowWords)(i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))
|
2017-09-22 03:02:32 +02:00
|
|
|
array.write(waddr, data, io.write.bits.wmask.toBools)
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2017-09-22 03:02:32 +02:00
|
|
|
io.resp(w) := array.read(raddr, io.read.bits.way_en(w) && io.read.valid).asUInt
|
2012-11-16 11:39:33 +01:00
|
|
|
}
|
2012-01-24 20:41:44 +01:00
|
|
|
}
|
|
|
|
|
2012-11-16 11:39:33 +01:00
|
|
|
io.read.ready := Bool(true)
|
|
|
|
io.write.ready := Bool(true)
|
2012-01-24 20:41:44 +01:00
|
|
|
}
|
|
|
|
|
2017-06-03 00:29:30 +02:00
|
|
|
class NonBlockingDCache(hartid: Int)(implicit p: Parameters) extends HellaCache(hartid)(p) {
|
2016-11-19 04:01:36 +01:00
|
|
|
override lazy val module = new NonBlockingDCacheModule(this)
|
|
|
|
}
|
|
|
|
|
2016-12-02 02:46:52 +01:00
|
|
|
class NonBlockingDCacheModule(outer: NonBlockingDCache) extends HellaCacheModule(outer) {
|
2016-11-19 04:01:36 +01:00
|
|
|
|
2014-08-12 03:36:23 +02:00
|
|
|
require(isPow2(nWays)) // TODO: relax this
|
2017-02-09 22:59:09 +01:00
|
|
|
require(dataScratchpadSize == 0)
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2017-05-23 21:51:48 +02:00
|
|
|
// ECC is only supported on the data array
|
|
|
|
require(cacheParams.tagECC.isInstanceOf[IdentityCode])
|
|
|
|
val dECC = cacheParams.dataECC
|
|
|
|
|
2016-12-13 02:38:55 +01:00
|
|
|
val wb = Module(new WritebackUnit)
|
|
|
|
val prober = Module(new ProbeUnit)
|
|
|
|
val mshrs = Module(new MSHRFile)
|
2012-11-16 11:39:33 +01:00
|
|
|
|
|
|
|
io.cpu.req.ready := Bool(true)
|
2013-08-16 00:28:15 +02:00
|
|
|
val s1_valid = Reg(next=io.cpu.req.fire(), init=Bool(false))
|
2015-07-16 02:30:50 +02:00
|
|
|
val s1_req = Reg(io.cpu.req.bits)
|
2017-04-15 08:57:32 +02:00
|
|
|
val s1_valid_masked = s1_valid && !io.cpu.s1_kill
|
2013-08-16 00:28:15 +02:00
|
|
|
val s1_replay = Reg(init=Bool(false))
|
2013-08-12 19:39:11 +02:00
|
|
|
val s1_clk_en = Reg(Bool())
|
2017-03-14 21:54:49 +01:00
|
|
|
val s1_sfence = s1_req.cmd === M_SFENCE
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2017-04-15 08:57:32 +02:00
|
|
|
val s2_valid = Reg(next=s1_valid_masked && !s1_sfence, init=Bool(false)) && !io.cpu.s2_xcpt.asUInt.orR
|
2015-07-16 02:30:50 +02:00
|
|
|
val s2_req = Reg(io.cpu.req.bits)
|
2016-06-01 04:27:28 +02:00
|
|
|
val s2_replay = Reg(next=s1_replay, init=Bool(false)) && s2_req.cmd =/= M_FLUSH_ALL
|
2015-07-16 05:24:18 +02:00
|
|
|
val s2_recycle = Wire(Bool())
|
|
|
|
val s2_valid_masked = Wire(Bool())
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2013-08-16 00:28:15 +02:00
|
|
|
val s3_valid = Reg(init=Bool(false))
|
2015-07-16 02:30:50 +02:00
|
|
|
val s3_req = Reg(io.cpu.req.bits)
|
2013-08-12 19:39:11 +02:00
|
|
|
val s3_way = Reg(Bits())
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2015-03-18 05:45:17 +01:00
|
|
|
val s1_recycled = RegEnable(s2_recycle, Bool(false), s1_clk_en)
|
2012-11-16 11:39:33 +01:00
|
|
|
val s1_read = isRead(s1_req.cmd)
|
|
|
|
val s1_write = isWrite(s1_req.cmd)
|
2012-11-25 07:01:08 +01:00
|
|
|
val s1_readwrite = s1_read || s1_write || isPrefetch(s1_req.cmd)
|
2017-05-02 12:04:41 +02:00
|
|
|
// check for unsupported operations
|
|
|
|
assert(!s1_valid || !s1_req.cmd.isOneOf(M_PWR))
|
2012-01-19 00:07:36 +01:00
|
|
|
|
2017-08-08 20:52:35 +02:00
|
|
|
val dtlb = Module(new TLB(false, log2Ceil(coreDataBytes), nTLBEntries))
|
2015-08-02 06:11:25 +02:00
|
|
|
io.ptw <> dtlb.io.ptw
|
2017-03-15 23:22:39 +01:00
|
|
|
dtlb.io.req.valid := s1_valid && !io.cpu.s1_kill && (s1_readwrite || s1_sfence)
|
2017-03-14 21:54:49 +01:00
|
|
|
dtlb.io.req.bits.sfence.valid := s1_sfence
|
|
|
|
dtlb.io.req.bits.sfence.bits.rs1 := s1_req.typ(0)
|
|
|
|
dtlb.io.req.bits.sfence.bits.rs2 := s1_req.typ(1)
|
2017-07-06 08:53:52 +02:00
|
|
|
dtlb.io.req.bits.sfence.bits.addr := s1_req.addr
|
2017-05-02 12:04:41 +02:00
|
|
|
dtlb.io.req.bits.sfence.bits.asid := io.cpu.s1_data.data
|
2012-11-16 11:39:33 +01:00
|
|
|
dtlb.io.req.bits.passthrough := s1_req.phys
|
2017-03-13 04:42:51 +01:00
|
|
|
dtlb.io.req.bits.vaddr := s1_req.addr
|
2017-03-15 23:18:56 +01:00
|
|
|
dtlb.io.req.bits.size := s1_req.typ
|
2017-04-13 06:49:37 +02:00
|
|
|
dtlb.io.req.bits.cmd := s1_req.cmd
|
2012-11-16 11:39:33 +01:00
|
|
|
when (!dtlb.io.req.ready && !io.cpu.req.bits.phys) { io.cpu.req.ready := Bool(false) }
|
2012-01-19 00:07:36 +01:00
|
|
|
|
2012-05-02 03:23:04 +02:00
|
|
|
when (io.cpu.req.valid) {
|
2012-11-16 11:39:33 +01:00
|
|
|
s1_req := io.cpu.req.bits
|
|
|
|
}
|
2012-11-20 13:09:26 +01:00
|
|
|
when (wb.io.meta_read.valid) {
|
2014-08-12 03:36:23 +02:00
|
|
|
s1_req.addr := Cat(wb.io.meta_read.bits.tag, wb.io.meta_read.bits.idx) << blockOffBits
|
2012-11-16 11:39:33 +01:00
|
|
|
s1_req.phys := Bool(true)
|
2012-01-19 00:07:36 +01:00
|
|
|
}
|
2012-11-20 13:09:26 +01:00
|
|
|
when (prober.io.meta_read.valid) {
|
2014-08-12 03:36:23 +02:00
|
|
|
s1_req.addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits
|
2012-11-16 11:39:33 +01:00
|
|
|
s1_req.phys := Bool(true)
|
2012-04-16 07:56:02 +02:00
|
|
|
}
|
2013-08-02 19:06:01 +02:00
|
|
|
when (mshrs.io.replay.valid) {
|
|
|
|
s1_req := mshrs.io.replay.bits
|
2012-02-12 02:20:33 +01:00
|
|
|
}
|
2012-12-12 00:58:53 +01:00
|
|
|
when (s2_recycle) {
|
|
|
|
s1_req := s2_req
|
|
|
|
}
|
2017-03-13 04:42:51 +01:00
|
|
|
val s1_addr = dtlb.io.resp.paddr
|
2012-11-16 11:39:33 +01:00
|
|
|
|
2012-12-06 11:07:52 +01:00
|
|
|
when (s1_clk_en) {
|
2012-11-16 11:39:33 +01:00
|
|
|
s2_req.typ := s1_req.typ
|
2013-08-12 19:39:11 +02:00
|
|
|
s2_req.phys := s1_req.phys
|
|
|
|
s2_req.addr := s1_addr
|
2012-11-16 11:39:33 +01:00
|
|
|
when (s1_write) {
|
2017-05-02 12:04:41 +02:00
|
|
|
s2_req.data := Mux(s1_replay, mshrs.io.replay.bits.data, io.cpu.s1_data.data)
|
2012-11-16 11:39:33 +01:00
|
|
|
}
|
2012-12-12 00:58:53 +01:00
|
|
|
when (s1_recycled) { s2_req.data := s1_req.data }
|
2013-08-12 19:39:11 +02:00
|
|
|
s2_req.tag := s1_req.tag
|
|
|
|
s2_req.cmd := s1_req.cmd
|
2012-03-09 11:55:46 +01:00
|
|
|
}
|
2012-01-19 00:07:36 +01:00
|
|
|
|
|
|
|
// tags
|
2015-03-01 02:00:05 +01:00
|
|
|
def onReset = L1Metadata(UInt(0), ClientMetadata.onReset)
|
2017-02-09 22:59:09 +01:00
|
|
|
val meta = Module(new L1MetadataArray(onReset _))
|
|
|
|
val metaReadArb = Module(new Arbiter(new L1MetaReadReq, 5))
|
2014-05-28 22:30:48 +02:00
|
|
|
val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2))
|
2015-08-02 06:11:25 +02:00
|
|
|
meta.io.read <> metaReadArb.io.out
|
|
|
|
meta.io.write <> metaWriteArb.io.out
|
2012-01-19 00:07:36 +01:00
|
|
|
|
|
|
|
// data
|
2013-08-12 19:39:11 +02:00
|
|
|
val data = Module(new DataArray)
|
2015-02-02 05:04:13 +01:00
|
|
|
val readArb = Module(new Arbiter(new L1DataReadReq, 4))
|
|
|
|
val writeArb = Module(new Arbiter(new L1DataWriteReq, 2))
|
2012-12-12 00:58:53 +01:00
|
|
|
data.io.write.valid := writeArb.io.out.valid
|
|
|
|
writeArb.io.out.ready := data.io.write.ready
|
|
|
|
data.io.write.bits := writeArb.io.out.bits
|
2017-05-23 21:51:48 +02:00
|
|
|
val wdata_encoded = (0 until rowWords).map(i => dECC.encode(writeArb.io.out.bits.data(coreDataBits*(i+1)-1,coreDataBits*i)))
|
2016-08-01 02:13:52 +02:00
|
|
|
data.io.write.bits.data := wdata_encoded.asUInt
|
2012-01-19 00:07:36 +01:00
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
// tag read for new requests
|
2012-12-12 00:58:53 +01:00
|
|
|
metaReadArb.io.in(4).valid := io.cpu.req.valid
|
2014-08-12 03:36:23 +02:00
|
|
|
metaReadArb.io.in(4).bits.idx := io.cpu.req.bits.addr >> blockOffBits
|
2012-12-12 00:58:53 +01:00
|
|
|
when (!metaReadArb.io.in(4).ready) { io.cpu.req.ready := Bool(false) }
|
2012-11-20 10:32:33 +01:00
|
|
|
|
|
|
|
// data read for new requests
|
2012-12-12 00:58:53 +01:00
|
|
|
readArb.io.in(3).valid := io.cpu.req.valid
|
2014-04-23 01:53:20 +02:00
|
|
|
readArb.io.in(3).bits.addr := io.cpu.req.bits.addr
|
2015-08-01 00:42:10 +02:00
|
|
|
readArb.io.in(3).bits.way_en := ~UInt(0, nWays)
|
2012-12-12 00:58:53 +01:00
|
|
|
when (!readArb.io.in(3).ready) { io.cpu.req.ready := Bool(false) }
|
|
|
|
|
|
|
|
// recycled requests
|
|
|
|
metaReadArb.io.in(0).valid := s2_recycle
|
2014-08-12 03:36:23 +02:00
|
|
|
metaReadArb.io.in(0).bits.idx := s2_req.addr >> blockOffBits
|
2012-12-12 00:58:53 +01:00
|
|
|
readArb.io.in(0).valid := s2_recycle
|
|
|
|
readArb.io.in(0).bits.addr := s2_req.addr
|
2015-08-01 00:42:10 +02:00
|
|
|
readArb.io.in(0).bits.way_en := ~UInt(0, nWays)
|
2012-11-20 10:32:33 +01:00
|
|
|
|
|
|
|
// tag check and way muxing
|
2014-08-12 03:36:23 +02:00
|
|
|
def wayMap[T <: Data](f: Int => T) = Vec((0 until nWays).map(f))
|
2016-08-01 02:13:52 +02:00
|
|
|
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === (s1_addr >> untagBits)).asUInt
|
|
|
|
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.isValid()).asUInt
|
2014-05-07 03:36:22 +02:00
|
|
|
s1_clk_en := metaReadArb.io.out.valid //TODO: should be metaReadArb.io.out.fire(), but triggers Verilog backend bug
|
2012-11-27 05:34:30 +01:00
|
|
|
val s1_writeback = s1_clk_en && !s1_valid && !s1_replay
|
2013-08-14 02:50:02 +02:00
|
|
|
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
|
2012-11-16 11:39:33 +01:00
|
|
|
val s2_tag_match = s2_tag_match_way.orR
|
2014-05-28 22:30:48 +02:00
|
|
|
val s2_hit_state = Mux1H(s2_tag_match_way, wayMap((w: Int) => RegEnable(meta.io.resp(w).coh, s1_clk_en)))
|
2016-11-19 02:18:42 +01:00
|
|
|
val (s2_has_permission, _, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
|
2016-11-11 00:56:42 +01:00
|
|
|
val s2_hit = s2_tag_match && s2_has_permission && s2_hit_state === s2_new_hit_state
|
2012-11-27 05:34:30 +01:00
|
|
|
|
2013-04-04 07:15:39 +02:00
|
|
|
// load-reserved/store-conditional
|
2013-08-16 00:28:15 +02:00
|
|
|
val lrsc_count = Reg(init=UInt(0))
|
2017-04-14 10:09:13 +02:00
|
|
|
val lrsc_valid = lrsc_count > lrscBackoff
|
2013-08-12 19:39:11 +02:00
|
|
|
val lrsc_addr = Reg(UInt())
|
2013-04-06 04:13:38 +02:00
|
|
|
val (s2_lr, s2_sc) = (s2_req.cmd === M_XLR, s2_req.cmd === M_XSC)
|
2014-08-12 03:36:23 +02:00
|
|
|
val s2_lrsc_addr_match = lrsc_valid && lrsc_addr === (s2_req.addr >> blockOffBits)
|
2013-04-08 04:27:21 +02:00
|
|
|
val s2_sc_fail = s2_sc && !s2_lrsc_addr_match
|
2017-04-14 10:09:13 +02:00
|
|
|
when (lrsc_count > 0) { lrsc_count := lrsc_count - 1 }
|
2013-04-08 04:27:21 +02:00
|
|
|
when (s2_valid_masked && s2_hit || s2_replay) {
|
|
|
|
when (s2_lr) {
|
2016-10-04 02:44:31 +02:00
|
|
|
lrsc_count := lrscCycles - 1
|
2014-08-12 03:36:23 +02:00
|
|
|
lrsc_addr := s2_req.addr >> blockOffBits
|
2013-04-08 04:27:21 +02:00
|
|
|
}
|
2017-04-14 10:09:13 +02:00
|
|
|
when (lrsc_count > 0) {
|
2013-04-08 04:27:21 +02:00
|
|
|
lrsc_count := 0
|
|
|
|
}
|
2013-04-04 07:15:39 +02:00
|
|
|
}
|
2015-04-11 11:26:33 +02:00
|
|
|
when (io.cpu.invalidate_lr) { lrsc_count := 0 }
|
2013-04-04 07:15:39 +02:00
|
|
|
|
2016-01-14 22:57:45 +01:00
|
|
|
val s2_data = Wire(Vec(nWays, Bits(width=encRowBits)))
|
2014-08-12 03:36:23 +02:00
|
|
|
for (w <- 0 until nWays) {
|
2016-01-14 22:57:45 +01:00
|
|
|
val regs = Reg(Vec(rowWords, Bits(width = encDataBits)))
|
2012-11-27 05:34:30 +01:00
|
|
|
val en1 = s1_clk_en && s1_tag_eq_way(w)
|
|
|
|
for (i <- 0 until regs.size) {
|
2014-09-25 20:59:19 +02:00
|
|
|
val en = en1 && ((Bool(i == 0) || !Bool(doNarrowRead)) || s1_writeback)
|
2014-08-12 03:36:23 +02:00
|
|
|
when (en) { regs(i) := data.io.resp(w) >> encDataBits*i }
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2016-08-01 02:13:52 +02:00
|
|
|
s2_data(w) := regs.asUInt
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2012-12-12 00:58:53 +01:00
|
|
|
val s2_data_muxed = Mux1H(s2_tag_match_way, s2_data)
|
2017-05-23 21:51:48 +02:00
|
|
|
val s2_data_decoded = (0 until rowWords).map(i => dECC.decode(s2_data_muxed(encDataBits*(i+1)-1,encDataBits*i)))
|
2016-08-01 02:13:52 +02:00
|
|
|
val s2_data_corrected = s2_data_decoded.map(_.corrected).asUInt
|
|
|
|
val s2_data_uncorrected = s2_data_decoded.map(_.uncorrected).asUInt
|
2016-03-24 22:52:12 +01:00
|
|
|
val s2_word_idx = if(doNarrowRead) UInt(0) else s2_req.addr(log2Up(rowWords*coreDataBytes)-1,log2Up(wordBytes))
|
2016-08-01 02:13:52 +02:00
|
|
|
val s2_data_correctable = s2_data_decoded.map(_.correctable).asUInt()(s2_word_idx)
|
2015-08-02 06:11:25 +02:00
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
// store/amo hits
|
2013-04-08 04:27:21 +02:00
|
|
|
s3_valid := (s2_valid_masked && s2_hit || s2_replay) && !s2_sc_fail && isWrite(s2_req.cmd)
|
2016-08-17 05:04:02 +02:00
|
|
|
val amoalu = Module(new AMOALU(xLen))
|
2012-12-12 00:58:53 +01:00
|
|
|
when ((s2_valid || s2_replay) && (isWrite(s2_req.cmd) || s2_data_correctable)) {
|
2012-11-16 11:39:33 +01:00
|
|
|
s3_req := s2_req
|
2012-12-12 00:58:53 +01:00
|
|
|
s3_req.data := Mux(s2_data_correctable, s2_data_corrected, amoalu.io.out)
|
2012-11-16 11:39:33 +01:00
|
|
|
s3_way := s2_tag_match_way
|
2012-01-19 00:07:36 +01:00
|
|
|
}
|
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
writeArb.io.in(0).bits.addr := s3_req.addr
|
2016-07-15 06:42:12 +02:00
|
|
|
writeArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb))
|
2014-08-12 03:36:23 +02:00
|
|
|
writeArb.io.in(0).bits.data := Fill(rowWords, s3_req.data)
|
2012-11-20 10:32:33 +01:00
|
|
|
writeArb.io.in(0).valid := s3_valid
|
|
|
|
writeArb.io.in(0).bits.way_en := s3_way
|
|
|
|
|
|
|
|
// replacement policy
|
2017-02-09 22:59:09 +01:00
|
|
|
val replacer = cacheParams.replacement
|
2013-08-12 19:39:11 +02:00
|
|
|
val s1_replaced_way_en = UIntToOH(replacer.way)
|
2013-08-14 02:50:02 +02:00
|
|
|
val s2_replaced_way_en = UIntToOH(RegEnable(replacer.way, s1_clk_en))
|
|
|
|
val s2_repl_meta = Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegEnable(meta.io.resp(w), s1_clk_en && s1_replaced_way_en(w))).toSeq)
|
2012-11-20 10:32:33 +01:00
|
|
|
|
2012-01-19 00:07:36 +01:00
|
|
|
// miss handling
|
2013-08-02 19:06:01 +02:00
|
|
|
mshrs.io.req.valid := s2_valid_masked && !s2_hit && (isPrefetch(s2_req.cmd) || isRead(s2_req.cmd) || isWrite(s2_req.cmd))
|
|
|
|
mshrs.io.req.bits := s2_req
|
|
|
|
mshrs.io.req.bits.tag_match := s2_tag_match
|
2014-05-28 22:30:48 +02:00
|
|
|
mshrs.io.req.bits.old_meta := Mux(s2_tag_match, L1Metadata(s2_repl_meta.tag, s2_hit_state), s2_repl_meta)
|
2013-08-02 19:06:01 +02:00
|
|
|
mshrs.io.req.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
|
|
|
|
mshrs.io.req.bits.data := s2_req.data
|
2014-03-29 18:59:07 +01:00
|
|
|
when (mshrs.io.req.fire()) { replacer.miss }
|
2016-11-19 04:01:36 +01:00
|
|
|
tl_out.a <> mshrs.io.mem_acquire
|
2013-03-01 03:11:40 +01:00
|
|
|
|
2012-01-19 00:07:36 +01:00
|
|
|
// replays
|
2013-08-02 19:06:01 +02:00
|
|
|
readArb.io.in(1).valid := mshrs.io.replay.valid
|
|
|
|
readArb.io.in(1).bits := mshrs.io.replay.bits
|
2015-08-01 00:42:10 +02:00
|
|
|
readArb.io.in(1).bits.way_en := ~UInt(0, nWays)
|
2013-08-02 19:06:01 +02:00
|
|
|
mshrs.io.replay.ready := readArb.io.in(1).ready
|
|
|
|
s1_replay := mshrs.io.replay.valid && readArb.io.in(1).ready
|
|
|
|
metaReadArb.io.in(1) <> mshrs.io.meta_read
|
|
|
|
metaWriteArb.io.in(0) <> mshrs.io.meta_write
|
2014-04-08 03:22:46 +02:00
|
|
|
|
2014-12-07 12:09:54 +01:00
|
|
|
// probes and releases
|
2016-11-19 04:01:36 +01:00
|
|
|
prober.io.req.valid := tl_out.b.valid && !lrsc_valid
|
|
|
|
tl_out.b.ready := prober.io.req.ready && !lrsc_valid
|
|
|
|
prober.io.req.bits := tl_out.b.bits
|
2012-11-16 11:39:33 +01:00
|
|
|
prober.io.way_en := s2_tag_match_way
|
2015-03-01 02:00:05 +01:00
|
|
|
prober.io.block_state := s2_hit_state
|
2015-08-02 06:11:25 +02:00
|
|
|
metaReadArb.io.in(2) <> prober.io.meta_read
|
|
|
|
metaWriteArb.io.in(1) <> prober.io.meta_write
|
2013-08-02 19:06:01 +02:00
|
|
|
prober.io.mshr_rdy := mshrs.io.probe_rdy
|
2012-01-19 00:07:36 +01:00
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
// refills
|
2016-11-19 04:01:36 +01:00
|
|
|
val grant_has_data = edge.hasData(tl_out.d.bits)
|
|
|
|
mshrs.io.mem_grant.valid := tl_out.d.fire()
|
|
|
|
mshrs.io.mem_grant.bits := tl_out.d.bits
|
|
|
|
tl_out.d.ready := writeArb.io.in(1).ready || !grant_has_data
|
2015-08-05 20:01:01 +02:00
|
|
|
/* The last clause here is necessary in order to prevent the responses for
|
|
|
|
* the IOMSHRs from being written into the data array. It works because the
|
|
|
|
* IOMSHR ids start right the ones for the regular MSHRs. */
|
2016-11-19 04:01:36 +01:00
|
|
|
writeArb.io.in(1).valid := tl_out.d.valid && grant_has_data &&
|
|
|
|
tl_out.d.bits.source < UInt(cfg.nMSHRs)
|
2015-03-27 07:26:03 +01:00
|
|
|
writeArb.io.in(1).bits.addr := mshrs.io.refill.addr
|
|
|
|
writeArb.io.in(1).bits.way_en := mshrs.io.refill.way_en
|
2015-09-29 01:02:29 +02:00
|
|
|
writeArb.io.in(1).bits.wmask := ~UInt(0, rowWords)
|
2016-11-19 04:01:36 +01:00
|
|
|
writeArb.io.in(1).bits.data := tl_out.d.bits.data(encRowBits-1,0)
|
2015-08-02 06:11:25 +02:00
|
|
|
data.io.read <> readArb.io.out
|
2016-11-19 04:01:36 +01:00
|
|
|
readArb.io.out.ready := !tl_out.d.valid || tl_out.d.ready // insert bubble if refill gets blocked
|
|
|
|
tl_out.e <> mshrs.io.mem_finish
|
2012-11-20 10:32:33 +01:00
|
|
|
|
|
|
|
// writebacks
|
2016-11-19 04:01:36 +01:00
|
|
|
val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2))
|
2015-08-02 06:11:25 +02:00
|
|
|
wbArb.io.in(0) <> prober.io.wb_req
|
|
|
|
wbArb.io.in(1) <> mshrs.io.wb_req
|
|
|
|
wb.io.req <> wbArb.io.out
|
|
|
|
metaReadArb.io.in(3) <> wb.io.meta_read
|
|
|
|
readArb.io.in(2) <> wb.io.data_req
|
2012-12-12 00:58:53 +01:00
|
|
|
wb.io.data_resp := s2_data_corrected
|
2016-11-19 04:01:36 +01:00
|
|
|
TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep)
|
2012-11-20 10:32:33 +01:00
|
|
|
|
|
|
|
// store->load bypassing
|
2013-08-16 00:28:15 +02:00
|
|
|
val s4_valid = Reg(next=s3_valid, init=Bool(false))
|
2013-08-14 02:50:02 +02:00
|
|
|
val s4_req = RegEnable(s3_req, s3_valid && metaReadArb.io.out.valid)
|
2012-11-20 10:32:33 +01:00
|
|
|
val bypasses = List(
|
2013-04-08 04:27:21 +02:00
|
|
|
((s2_valid_masked || s2_replay) && !s2_sc_fail, s2_req, amoalu.io.out),
|
2012-11-20 10:32:33 +01:00
|
|
|
(s3_valid, s3_req, s3_req.data),
|
|
|
|
(s4_valid, s4_req, s4_req.data)
|
2014-08-12 03:36:23 +02:00
|
|
|
).map(r => (r._1 && (s1_addr >> wordOffBits === r._2.addr >> wordOffBits) && isWrite(r._2.cmd), r._3))
|
|
|
|
val s2_store_bypass_data = Reg(Bits(width = coreDataBits))
|
2013-08-12 19:39:11 +02:00
|
|
|
val s2_store_bypass = Reg(Bool())
|
2012-11-27 05:34:30 +01:00
|
|
|
when (s1_clk_en) {
|
2012-12-06 11:07:52 +01:00
|
|
|
s2_store_bypass := false
|
2012-11-27 05:34:30 +01:00
|
|
|
when (bypasses.map(_._1).reduce(_||_)) {
|
2013-12-10 04:52:47 +01:00
|
|
|
s2_store_bypass_data := PriorityMux(bypasses)
|
2012-12-06 11:07:52 +01:00
|
|
|
s2_store_bypass := true
|
2012-11-27 05:34:30 +01:00
|
|
|
}
|
2012-11-20 10:32:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// load data subword mux/sign extension
|
2014-08-12 03:36:23 +02:00
|
|
|
val s2_data_word_prebypass = s2_data_uncorrected >> Cat(s2_word_idx, Bits(0,log2Up(coreDataBits)))
|
2012-12-06 11:07:52 +01:00
|
|
|
val s2_data_word = Mux(s2_store_bypass, s2_store_bypass_data, s2_data_word_prebypass)
|
2016-08-09 23:39:06 +02:00
|
|
|
val loadgen = new LoadGen(s2_req.typ, mtSigned(s2_req.typ), s2_req.addr, s2_data_word, s2_sc, wordBytes)
|
2017-05-02 12:04:41 +02:00
|
|
|
|
|
|
|
amoalu.io.mask := new StoreGen(s2_req.typ, s2_req.addr, 0.U, xLen/8).mask
|
2015-08-02 06:11:25 +02:00
|
|
|
amoalu.io.cmd := s2_req.cmd
|
2012-11-20 10:32:33 +01:00
|
|
|
amoalu.io.lhs := s2_data_word
|
2012-11-16 11:39:33 +01:00
|
|
|
amoalu.io.rhs := s2_req.data
|
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
// nack it like it's hot
|
|
|
|
val s1_nack = dtlb.io.req.valid && dtlb.io.resp.miss ||
|
2014-12-17 23:28:05 +01:00
|
|
|
s1_req.addr(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready
|
2013-08-14 02:50:02 +02:00
|
|
|
val s2_nack_hit = RegEnable(s1_nack, s1_valid || s1_replay)
|
2013-08-02 19:06:01 +02:00
|
|
|
when (s2_nack_hit) { mshrs.io.req.valid := Bool(false) }
|
|
|
|
val s2_nack_victim = s2_hit && mshrs.io.secondary_miss
|
|
|
|
val s2_nack_miss = !s2_hit && !mshrs.io.req.ready
|
2013-09-13 01:07:30 +02:00
|
|
|
val s2_nack = s2_nack_hit || s2_nack_victim || s2_nack_miss
|
2012-11-16 11:39:33 +01:00
|
|
|
s2_valid_masked := s2_valid && !s2_nack
|
|
|
|
|
2012-12-12 00:58:53 +01:00
|
|
|
val s2_recycle_ecc = (s2_valid || s2_replay) && s2_hit && s2_data_correctable
|
2013-08-16 00:28:15 +02:00
|
|
|
val s2_recycle_next = Reg(init=Bool(false))
|
2015-06-02 23:06:12 +02:00
|
|
|
when (s1_valid || s1_replay) { s2_recycle_next := s2_recycle_ecc }
|
2012-12-12 00:58:53 +01:00
|
|
|
s2_recycle := s2_recycle_ecc || s2_recycle_next
|
|
|
|
|
2012-11-20 10:32:33 +01:00
|
|
|
// after a nack, block until nack condition resolves to save energy
|
2013-08-16 00:28:15 +02:00
|
|
|
val block_miss = Reg(init=Bool(false))
|
2012-11-16 11:39:33 +01:00
|
|
|
block_miss := (s2_valid || block_miss) && s2_nack_miss
|
2013-09-13 01:07:30 +02:00
|
|
|
when (block_miss) {
|
2012-11-16 11:39:33 +01:00
|
|
|
io.cpu.req.ready := Bool(false)
|
|
|
|
}
|
|
|
|
|
2015-08-05 20:01:01 +02:00
|
|
|
val cache_resp = Wire(Valid(new HellaCacheResp))
|
|
|
|
cache_resp.valid := (s2_replay || s2_valid_masked && s2_hit) && !s2_data_correctable
|
|
|
|
cache_resp.bits := s2_req
|
2015-09-27 22:31:52 +02:00
|
|
|
cache_resp.bits.has_data := isRead(s2_req.cmd)
|
2015-11-25 03:13:33 +01:00
|
|
|
cache_resp.bits.data := loadgen.data | s2_sc_fail
|
2015-08-05 20:01:01 +02:00
|
|
|
cache_resp.bits.store_data := s2_req.data
|
|
|
|
cache_resp.bits.replay := s2_replay
|
|
|
|
|
|
|
|
val uncache_resp = Wire(Valid(new HellaCacheResp))
|
|
|
|
uncache_resp.bits := mshrs.io.resp.bits
|
|
|
|
uncache_resp.valid := mshrs.io.resp.valid
|
2016-03-25 23:29:32 +01:00
|
|
|
mshrs.io.resp.ready := Reg(next= !(s1_valid || s1_replay))
|
2013-04-08 04:27:21 +02:00
|
|
|
|
2016-04-02 04:30:39 +02:00
|
|
|
io.cpu.s2_nack := s2_valid && s2_nack
|
2016-03-25 23:29:32 +01:00
|
|
|
io.cpu.resp := Mux(mshrs.io.resp.ready, uncache_resp, cache_resp)
|
2015-11-25 03:13:33 +01:00
|
|
|
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
|
2017-05-02 02:36:39 +02:00
|
|
|
io.cpu.resp.bits.data_raw := s2_data_word
|
2015-08-05 20:01:01 +02:00
|
|
|
io.cpu.ordered := mshrs.io.fence_rdy && !s1_valid && !s2_valid
|
2016-05-14 02:54:23 +02:00
|
|
|
io.cpu.replay_next := (s1_replay && s1_read) || mshrs.io.replay_next
|
2017-03-09 09:28:19 +01:00
|
|
|
|
2017-04-18 08:48:30 +02:00
|
|
|
val s1_xcpt_valid = dtlb.io.req.valid && !s1_nack
|
|
|
|
val s1_xcpt = dtlb.io.resp
|
|
|
|
io.cpu.s2_xcpt := Mux(RegNext(s1_xcpt_valid), RegEnable(s1_xcpt, s1_clk_en), 0.U.asTypeOf(s1_xcpt))
|
2017-04-18 07:42:16 +02:00
|
|
|
|
2017-03-09 09:28:19 +01:00
|
|
|
// performance events
|
2017-05-23 21:52:25 +02:00
|
|
|
io.cpu.perf.acquire := edge.done(tl_out.a)
|
|
|
|
io.cpu.perf.release := edge.done(tl_out.c)
|
|
|
|
io.cpu.perf.tlbMiss := io.ptw.req.fire()
|
2011-12-10 04:42:58 +01:00
|
|
|
}
|