2014-09-13 00:31:38 +02:00
|
|
|
// See LICENSE for license details.
|
|
|
|
|
2014-04-23 01:55:35 +02:00
|
|
|
package uncore
|
|
|
|
import Chisel._
|
2015-03-24 10:06:53 +01:00
|
|
|
import scala.reflect.ClassTag
|
2015-07-30 03:04:30 +02:00
|
|
|
import junctions._
|
2015-10-22 03:16:44 +02:00
|
|
|
import cde.{Parameters, Field}
|
2014-04-23 01:55:35 +02:00
|
|
|
|
2014-08-23 10:19:36 +02:00
|
|
|
case object CacheName extends Field[String]
|
2014-08-08 21:21:57 +02:00
|
|
|
case object NSets extends Field[Int]
|
|
|
|
case object NWays extends Field[Int]
|
|
|
|
case object RowBits extends Field[Int]
|
|
|
|
case object Replacer extends Field[() => ReplacementPolicy]
|
2015-12-11 01:40:16 +01:00
|
|
|
case object L2Replacer extends Field[() => SeqReplacementPolicy]
|
2015-02-02 04:57:53 +01:00
|
|
|
case object AmoAluOperandBits extends Field[Int]
|
2015-03-18 06:53:50 +01:00
|
|
|
case object NPrimaryMisses extends Field[Int]
|
|
|
|
case object NSecondaryMisses extends Field[Int]
|
2015-03-11 23:43:41 +01:00
|
|
|
case object CacheBlockBytes extends Field[Int]
|
|
|
|
case object CacheBlockOffsetBits extends Field[Int]
|
2015-04-04 02:24:44 +02:00
|
|
|
case object ECCCode extends Field[Option[Code]]
|
2015-11-22 00:55:11 +01:00
|
|
|
case object CacheIdBits extends Field[Int]
|
|
|
|
case object CacheId extends Field[Int]
|
2016-02-29 23:48:49 +01:00
|
|
|
case object SplitMetadata extends Field[Boolean]
|
2014-05-07 10:51:46 +02:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasCacheParameters {
|
|
|
|
implicit val p: Parameters
|
|
|
|
val nSets = p(NSets)
|
|
|
|
val blockOffBits = p(CacheBlockOffsetBits)
|
2015-11-22 00:55:11 +01:00
|
|
|
val cacheIdBits = p(CacheIdBits)
|
2014-08-12 03:35:49 +02:00
|
|
|
val idxBits = log2Up(nSets)
|
2015-11-22 00:55:11 +01:00
|
|
|
val untagBits = blockOffBits + cacheIdBits + idxBits
|
2015-10-06 06:41:46 +02:00
|
|
|
val tagBits = p(PAddrBits) - untagBits
|
|
|
|
val nWays = p(NWays)
|
2014-08-12 03:35:49 +02:00
|
|
|
val wayBits = log2Up(nWays)
|
|
|
|
val isDM = nWays == 1
|
2015-10-06 06:41:46 +02:00
|
|
|
val rowBits = p(RowBits)
|
2014-08-12 23:55:44 +02:00
|
|
|
val rowBytes = rowBits/8
|
2014-08-12 03:35:49 +02:00
|
|
|
val rowOffBits = log2Up(rowBytes)
|
2015-10-06 06:41:46 +02:00
|
|
|
val code = p(ECCCode).getOrElse(new IdentityCode)
|
2016-02-29 23:48:49 +01:00
|
|
|
val hasSplitMetadata = p(SplitMetadata)
|
2014-08-12 03:35:49 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class CacheModule(implicit val p: Parameters) extends Module
|
|
|
|
with HasCacheParameters
|
|
|
|
abstract class CacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
|
|
|
|
with HasCacheParameters
|
2014-08-12 03:35:49 +02:00
|
|
|
|
2014-04-23 01:55:35 +02:00
|
|
|
abstract class ReplacementPolicy {
|
|
|
|
def way: UInt
|
|
|
|
def miss: Unit
|
|
|
|
def hit: Unit
|
|
|
|
}
|
|
|
|
|
2014-08-08 21:21:57 +02:00
|
|
|
class RandomReplacement(ways: Int) extends ReplacementPolicy {
|
2015-07-16 05:24:03 +02:00
|
|
|
private val replace = Wire(Bool())
|
2014-04-23 01:55:35 +02:00
|
|
|
replace := Bool(false)
|
|
|
|
val lfsr = LFSR16(replace)
|
|
|
|
|
2014-08-08 21:21:57 +02:00
|
|
|
def way = if(ways == 1) UInt(0) else lfsr(log2Up(ways)-1,0)
|
2014-04-23 01:55:35 +02:00
|
|
|
def miss = replace := Bool(true)
|
|
|
|
def hit = {}
|
|
|
|
}
|
|
|
|
|
2015-12-11 01:40:16 +01:00
|
|
|
abstract class SeqReplacementPolicy {
|
|
|
|
def access(set: UInt): Unit
|
|
|
|
def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit
|
|
|
|
def way: UInt
|
|
|
|
}
|
|
|
|
|
|
|
|
class SeqRandom(n_ways: Int) extends SeqReplacementPolicy {
|
|
|
|
val logic = new RandomReplacement(n_ways)
|
|
|
|
def access(set: UInt) = { }
|
|
|
|
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
|
|
|
|
when (valid && !hit) { logic.miss }
|
|
|
|
}
|
|
|
|
def way = logic.way
|
|
|
|
}
|
|
|
|
|
|
|
|
class PseudoLRU(n: Int)
|
|
|
|
{
|
|
|
|
val state_reg = Reg(Bits(width = n))
|
|
|
|
def access(way: UInt) {
|
|
|
|
state_reg := get_next_state(state_reg,way)
|
|
|
|
}
|
2016-01-13 00:30:26 +01:00
|
|
|
def get_next_state(state: UInt, way: UInt) = {
|
2015-12-11 01:40:16 +01:00
|
|
|
var next_state = state
|
|
|
|
var idx = UInt(1,1)
|
|
|
|
for (i <- log2Up(n)-1 to 0 by -1) {
|
|
|
|
val bit = way(i)
|
|
|
|
val mask = (UInt(1,n) << idx)(n-1,0)
|
|
|
|
next_state = next_state & ~mask | Mux(bit, UInt(0), mask)
|
|
|
|
//next_state.bitSet(idx, !bit)
|
|
|
|
idx = Cat(idx, bit)
|
|
|
|
}
|
|
|
|
next_state
|
|
|
|
}
|
|
|
|
def replace = get_replace_way(state_reg)
|
|
|
|
def get_replace_way(state: Bits) = {
|
|
|
|
var idx = UInt(1,1)
|
|
|
|
for (i <- 0 until log2Up(n))
|
|
|
|
idx = Cat(idx, state(idx))
|
|
|
|
idx(log2Up(n)-1,0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy {
|
2016-01-14 22:47:47 +01:00
|
|
|
val state = SeqMem(n_sets, Bits(width = n_ways-1))
|
2015-12-11 01:40:16 +01:00
|
|
|
val logic = new PseudoLRU(n_ways)
|
|
|
|
val current_state = Wire(Bits())
|
|
|
|
val plru_way = logic.get_replace_way(current_state)
|
|
|
|
val next_state = Wire(Bits())
|
|
|
|
|
|
|
|
def access(set: UInt) = {
|
|
|
|
current_state := Cat(state.read(set), Bits(0, width = 1))
|
|
|
|
}
|
|
|
|
|
|
|
|
def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = {
|
|
|
|
val update_way = Mux(hit, way, plru_way)
|
|
|
|
next_state := logic.get_next_state(current_state, update_way)
|
|
|
|
when (valid) { state.write(set, next_state(n_ways-1,1)) }
|
|
|
|
}
|
|
|
|
|
|
|
|
def way = plru_way
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class Metadata(implicit p: Parameters) extends CacheBundle()(p) {
|
2014-08-12 03:35:49 +02:00
|
|
|
val tag = Bits(width = tagBits)
|
2014-05-28 22:35:08 +02:00
|
|
|
val coh: CoherenceMetadata
|
2014-04-23 01:55:35 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class MetaReadReq(implicit p: Parameters) extends CacheBundle()(p) {
|
2014-08-12 03:35:49 +02:00
|
|
|
val idx = Bits(width = idxBits)
|
2016-02-29 23:48:49 +01:00
|
|
|
val way_en = Bits(width = nWays)
|
2014-04-23 01:55:35 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class MetaWriteReq[T <: Metadata](gen: T)(implicit p: Parameters) extends MetaReadReq()(p) {
|
2015-07-16 03:06:27 +02:00
|
|
|
val data = gen.cloneType
|
2015-10-06 06:41:46 +02:00
|
|
|
override def cloneType = new MetaWriteReq(gen)(p).asInstanceOf[this.type]
|
2014-04-23 01:55:35 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class MetadataArray[T <: Metadata](onReset: () => T)(implicit p: Parameters) extends CacheModule()(p) {
|
|
|
|
val rstVal = onReset()
|
2014-04-23 01:55:35 +02:00
|
|
|
val io = new Bundle {
|
2014-04-24 01:24:20 +02:00
|
|
|
val read = Decoupled(new MetaReadReq).flip
|
2015-07-16 03:06:27 +02:00
|
|
|
val write = Decoupled(new MetaWriteReq(rstVal)).flip
|
2016-01-14 22:47:47 +01:00
|
|
|
val resp = Vec(nWays, rstVal.cloneType).asOutput
|
2014-04-23 01:55:35 +02:00
|
|
|
}
|
2014-08-12 03:35:49 +02:00
|
|
|
val rst_cnt = Reg(init=UInt(0, log2Up(nSets+1)))
|
|
|
|
val rst = rst_cnt < UInt(nSets)
|
2014-05-28 22:35:08 +02:00
|
|
|
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
|
|
|
|
val wdata = Mux(rst, rstVal, io.write.bits.data).toBits
|
2016-05-25 00:46:51 +02:00
|
|
|
val wmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.write.bits.way_en.toSInt).toBools
|
|
|
|
val rmask = Mux(rst || Bool(nWays == 1), SInt(-1), io.read.bits.way_en.toSInt).toBools
|
2014-04-23 01:55:35 +02:00
|
|
|
when (rst) { rst_cnt := rst_cnt+UInt(1) }
|
|
|
|
|
2015-03-01 02:02:13 +01:00
|
|
|
val metabits = rstVal.getWidth
|
2016-02-29 23:48:49 +01:00
|
|
|
|
|
|
|
if (hasSplitMetadata) {
|
|
|
|
val tag_arrs = List.fill(nWays){ SeqMem(nSets, UInt(width = metabits)) }
|
|
|
|
val tag_readout = Wire(Vec(nWays,rstVal.cloneType))
|
2016-03-31 07:11:19 +02:00
|
|
|
val tags_vec = Wire(Vec(nWays, UInt(width = metabits)))
|
2016-02-29 23:48:49 +01:00
|
|
|
(0 until nWays).foreach { (i) =>
|
|
|
|
when (rst || (io.write.valid && wmask(i))) {
|
|
|
|
tag_arrs(i).write(waddr, wdata)
|
|
|
|
}
|
|
|
|
tags_vec(i) := tag_arrs(i).read(io.read.bits.idx, io.read.valid && rmask(i))
|
|
|
|
}
|
|
|
|
io.resp := io.resp.fromBits(tags_vec.toBits)
|
|
|
|
} else {
|
|
|
|
val tag_arr = SeqMem(nSets, Vec(nWays, UInt(width = metabits)))
|
|
|
|
when (rst || io.write.valid) {
|
|
|
|
tag_arr.write(waddr, Vec.fill(nWays)(wdata), wmask)
|
|
|
|
}
|
|
|
|
val tags = tag_arr.read(io.read.bits.idx, io.read.valid).toBits
|
|
|
|
io.resp := io.resp.fromBits(tags)
|
2014-04-23 01:55:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
io.read.ready := !rst && !io.write.valid // so really this could be a 6T RAM
|
|
|
|
io.write.ready := !rst
|
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-10-07 03:19:45 +02:00
|
|
|
case object L2DirectoryRepresentation extends Field[DirectoryRepresentation]
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasOuterCacheParameters extends HasCacheParameters with HasCoherenceAgentParameters {
|
2015-11-22 00:55:11 +01:00
|
|
|
val cacheId = p(CacheId)
|
|
|
|
val idxLSB = cacheIdBits
|
2015-11-21 08:21:14 +01:00
|
|
|
val idxMSB = idxLSB + idxBits - 1
|
|
|
|
val tagLSB = idxLSB + idxBits
|
2016-04-05 07:17:11 +02:00
|
|
|
def inSameSet(block: HasCacheBlockAddress, addr: UInt): Bool = {
|
|
|
|
block.addr_block(idxMSB,idxLSB) === addr(idxMSB,idxLSB)
|
|
|
|
}
|
|
|
|
def haveSameTag(block: HasCacheBlockAddress, addr: UInt): Bool = {
|
|
|
|
block.addr_block >> UInt(tagLSB) === addr >> UInt(tagLSB)
|
|
|
|
}
|
2015-10-14 08:42:28 +02:00
|
|
|
//val blockAddrBits = p(TLBlockAddrBits)
|
2015-03-01 02:02:13 +01:00
|
|
|
val refillCyclesPerBeat = outerDataBits/rowBits
|
|
|
|
val refillCycles = refillCyclesPerBeat*outerDataBeats
|
2015-10-06 06:41:46 +02:00
|
|
|
val internalDataBeats = p(CacheBlockBytes)*8/rowBits
|
2015-02-02 04:57:53 +01:00
|
|
|
require(refillCyclesPerBeat == 1)
|
2015-10-06 06:41:46 +02:00
|
|
|
val amoAluOperandBits = p(AmoAluOperandBits)
|
2015-03-01 02:02:13 +01:00
|
|
|
require(amoAluOperandBits <= innerDataBits)
|
|
|
|
require(rowBits == innerDataBits) // TODO: relax this by improving s_data_* states
|
2015-10-06 06:41:46 +02:00
|
|
|
val nSecondaryMisses = p(NSecondaryMisses)
|
2015-03-19 01:55:05 +01:00
|
|
|
val isLastLevelCache = true
|
2016-04-05 07:17:11 +02:00
|
|
|
val alwaysWriteFullBeat = !p(ECCCode).isEmpty
|
2014-12-17 23:28:14 +01:00
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class L2HellaCacheModule(implicit val p: Parameters) extends Module
|
2016-04-05 07:17:11 +02:00
|
|
|
with HasOuterCacheParameters {
|
2015-04-14 04:00:40 +02:00
|
|
|
def doInternalOutputArbitration[T <: Data : ClassTag](
|
|
|
|
out: DecoupledIO[T],
|
|
|
|
ins: Seq[DecoupledIO[T]]) {
|
2015-07-16 03:06:27 +02:00
|
|
|
val arb = Module(new RRArbiter(out.bits, ins.size))
|
2015-04-14 04:00:40 +02:00
|
|
|
out <> arb.io.out
|
2015-04-18 01:55:20 +02:00
|
|
|
arb.io.in <> ins
|
2015-04-14 04:00:40 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def doInternalInputRouting[T <: Bundle with HasL2Id](in: ValidIO[T], outs: Seq[ValidIO[T]]) {
|
2015-04-14 04:00:40 +02:00
|
|
|
outs.map(_.bits := in.bits)
|
2015-04-22 07:23:04 +02:00
|
|
|
outs.zipWithIndex.map { case (o,i) => o.valid := in.valid && in.bits.id === UInt(i) }
|
2015-04-14 04:00:40 +02:00
|
|
|
}
|
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class L2HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
|
2016-04-05 07:17:11 +02:00
|
|
|
with HasOuterCacheParameters
|
2015-10-06 06:41:46 +02:00
|
|
|
|
|
|
|
trait HasL2Id extends HasCoherenceAgentParameters {
|
2014-12-19 12:03:53 +01:00
|
|
|
val id = UInt(width = log2Up(nTransactors + 1))
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2InternalRequestState extends HasOuterCacheParameters {
|
2014-09-30 23:48:02 +02:00
|
|
|
val tag_match = Bool()
|
|
|
|
val meta = new L2Metadata
|
|
|
|
val way_en = Bits(width = nWays)
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2BeatAddr extends HasOuterCacheParameters {
|
2015-03-01 02:02:13 +01:00
|
|
|
val addr_beat = UInt(width = log2Up(refillCycles))
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2Data extends HasOuterCacheParameters
|
2015-03-01 02:02:13 +01:00
|
|
|
with HasL2BeatAddr {
|
|
|
|
val data = UInt(width = rowBits)
|
2015-02-02 04:57:53 +01:00
|
|
|
def hasData(dummy: Int = 0) = Bool(true)
|
2015-03-01 02:02:13 +01:00
|
|
|
def hasMultibeatData(dummy: Int = 0) = Bool(refillCycles > 1)
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
class L2Metadata(implicit p: Parameters) extends Metadata()(p) with HasOuterCacheParameters {
|
2015-03-01 02:02:13 +01:00
|
|
|
val coh = new HierarchicalMetadata
|
2015-02-02 04:57:53 +01:00
|
|
|
}
|
|
|
|
|
2014-09-30 23:48:02 +02:00
|
|
|
object L2Metadata {
|
2016-04-05 07:17:11 +02:00
|
|
|
def apply(tag: Bits, coh: HierarchicalMetadata)
|
|
|
|
(implicit p: Parameters): L2Metadata = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val meta = Wire(new L2Metadata)
|
2014-09-30 23:48:02 +02:00
|
|
|
meta.tag := tag
|
|
|
|
meta.coh := coh
|
|
|
|
meta
|
|
|
|
}
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
def apply(
|
|
|
|
tag: Bits,
|
|
|
|
inner: ManagerMetadata,
|
|
|
|
outer: ClientMetadata)(implicit p: Parameters): L2Metadata = {
|
|
|
|
val coh = Wire(new HierarchicalMetadata)
|
|
|
|
coh.inner := inner
|
|
|
|
coh.outer := outer
|
|
|
|
apply(tag, coh)
|
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2MetaReadReq(implicit p: Parameters) extends MetaReadReq()(p) with HasL2Id {
|
2014-09-30 23:48:02 +02:00
|
|
|
val tag = Bits(width = tagBits)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2MetaWriteReq(implicit p: Parameters) extends MetaWriteReq[L2Metadata](new L2Metadata)(p)
|
2014-10-15 20:46:35 +02:00
|
|
|
with HasL2Id {
|
2015-07-16 03:06:27 +02:00
|
|
|
override def cloneType = new L2MetaWriteReq().asInstanceOf[this.type]
|
2014-10-15 20:46:35 +02:00
|
|
|
}
|
2014-12-12 10:11:08 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2MetaResp(implicit p: Parameters) extends L2HellaCacheBundle()(p)
|
2014-09-30 23:48:02 +02:00
|
|
|
with HasL2Id
|
|
|
|
with HasL2InternalRequestState
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2MetaReadIO extends HasOuterCacheParameters {
|
2014-11-20 00:55:25 +01:00
|
|
|
val read = Decoupled(new L2MetaReadReq)
|
|
|
|
val resp = Valid(new L2MetaResp).flip
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2MetaWriteIO extends HasOuterCacheParameters {
|
2014-11-20 00:55:25 +01:00
|
|
|
val write = Decoupled(new L2MetaWriteReq)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2MetaRWIO(implicit p: Parameters) extends L2HellaCacheBundle()(p)
|
|
|
|
with HasL2MetaReadIO
|
|
|
|
with HasL2MetaWriteIO
|
2014-11-20 00:55:25 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2MetaRWIO extends HasOuterCacheParameters {
|
|
|
|
val meta = new L2MetaRWIO
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2MetadataArray(implicit p: Parameters) extends L2HellaCacheModule()(p) {
|
2014-11-20 00:55:25 +01:00
|
|
|
val io = new L2MetaRWIO().flip
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-03-01 02:02:13 +01:00
|
|
|
def onReset = L2Metadata(UInt(0), HierarchicalMetadata.onReset)
|
|
|
|
val meta = Module(new MetadataArray(onReset _))
|
2014-09-30 23:48:02 +02:00
|
|
|
meta.io.read <> io.read
|
|
|
|
meta.io.write <> io.write
|
2016-02-29 23:48:49 +01:00
|
|
|
val way_en_1h = (Vec.fill(nWays){Bool(true)}).toBits
|
|
|
|
val s1_way_en_1h = RegEnable(way_en_1h, io.read.valid)
|
|
|
|
meta.io.read.bits.way_en := way_en_1h
|
|
|
|
|
2014-09-30 23:48:02 +02:00
|
|
|
val s1_tag = RegEnable(io.read.bits.tag, io.read.valid)
|
|
|
|
val s1_id = RegEnable(io.read.bits.id, io.read.valid)
|
|
|
|
def wayMap[T <: Data](f: Int => T) = Vec((0 until nWays).map(f))
|
2014-12-12 10:11:08 +01:00
|
|
|
val s1_clk_en = Reg(next = io.read.fire())
|
2014-09-30 23:48:02 +02:00
|
|
|
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === s1_tag)
|
2016-02-29 23:48:49 +01:00
|
|
|
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.outer.isValid() && s1_way_en_1h(w).toBool).toBits
|
2015-12-11 01:40:16 +01:00
|
|
|
val s1_idx = RegEnable(io.read.bits.idx, io.read.valid) // deal with stalls?
|
2014-09-30 23:48:02 +02:00
|
|
|
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
|
|
|
|
val s2_tag_match = s2_tag_match_way.orR
|
|
|
|
val s2_hit_coh = Mux1H(s2_tag_match_way, wayMap((w: Int) => RegEnable(meta.io.resp(w).coh, s1_clk_en)))
|
|
|
|
|
2015-12-11 01:40:16 +01:00
|
|
|
val replacer = p(L2Replacer)()
|
|
|
|
val s1_hit_way = Wire(Bits())
|
|
|
|
s1_hit_way := Bits(0)
|
|
|
|
(0 until nWays).foreach(i => when (s1_tag_match_way(i)) { s1_hit_way := Bits(i) })
|
|
|
|
replacer.access(io.read.bits.idx)
|
|
|
|
replacer.update(s1_clk_en, s1_tag_match_way.orR, s1_idx, s1_hit_way)
|
|
|
|
|
2014-09-30 23:48:02 +02:00
|
|
|
val s1_replaced_way_en = UIntToOH(replacer.way)
|
|
|
|
val s2_replaced_way_en = UIntToOH(RegEnable(replacer.way, s1_clk_en))
|
|
|
|
val s2_repl_meta = Mux1H(s2_replaced_way_en, wayMap((w: Int) =>
|
|
|
|
RegEnable(meta.io.resp(w), s1_clk_en && s1_replaced_way_en(w))).toSeq)
|
|
|
|
|
|
|
|
io.resp.valid := Reg(next = s1_clk_en)
|
|
|
|
io.resp.bits.id := RegEnable(s1_id, s1_clk_en)
|
|
|
|
io.resp.bits.tag_match := s2_tag_match
|
|
|
|
io.resp.bits.meta := Mux(s2_tag_match,
|
|
|
|
L2Metadata(s2_repl_meta.tag, s2_hit_coh),
|
|
|
|
s2_repl_meta)
|
2015-11-21 19:35:40 +01:00
|
|
|
io.resp.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2DataReadReq(implicit p: Parameters) extends L2HellaCacheBundle()(p)
|
2015-03-01 02:02:13 +01:00
|
|
|
with HasL2BeatAddr
|
2015-02-02 04:57:53 +01:00
|
|
|
with HasL2Id {
|
2015-02-17 09:35:18 +01:00
|
|
|
val addr_idx = UInt(width = idxBits)
|
2014-09-30 23:48:02 +02:00
|
|
|
val way_en = Bits(width = nWays)
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
object L2DataReadReq {
|
|
|
|
def apply(
|
|
|
|
id: UInt,
|
|
|
|
way_en: UInt,
|
|
|
|
addr_idx: UInt,
|
|
|
|
addr_beat: UInt)(implicit p: Parameters) = {
|
|
|
|
val req = Wire(new L2DataReadReq)
|
|
|
|
req.id := id
|
|
|
|
req.way_en := way_en
|
|
|
|
req.addr_idx := addr_idx
|
|
|
|
req.addr_beat := addr_beat
|
|
|
|
req
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2DataWriteReq(implicit p: Parameters) extends L2DataReadReq()(p)
|
2015-02-02 04:57:53 +01:00
|
|
|
with HasL2Data {
|
2015-03-01 02:02:13 +01:00
|
|
|
val wmask = Bits(width = rowBits/8)
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
object L2DataWriteReq {
|
|
|
|
def apply(
|
|
|
|
id: UInt,
|
|
|
|
way_en: UInt,
|
|
|
|
addr_idx: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
wmask: UInt,
|
|
|
|
data: UInt)(implicit p: Parameters) = {
|
|
|
|
val req = Wire(new L2DataWriteReq)
|
|
|
|
req.id := id
|
|
|
|
req.way_en := way_en
|
|
|
|
req.addr_idx := addr_idx
|
|
|
|
req.addr_beat := addr_beat
|
|
|
|
req.wmask := wmask
|
|
|
|
req.data := data
|
|
|
|
req
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2DataResp(implicit p: Parameters) extends L2HellaCacheBundle()(p)
|
|
|
|
with HasL2Id
|
|
|
|
with HasL2Data
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2DataReadIO extends HasOuterCacheParameters {
|
2014-11-20 00:55:25 +01:00
|
|
|
val read = Decoupled(new L2DataReadReq)
|
|
|
|
val resp = Valid(new L2DataResp).flip
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2DataWriteIO extends HasOuterCacheParameters {
|
2014-11-20 00:55:25 +01:00
|
|
|
val write = Decoupled(new L2DataWriteReq)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2DataRWIO(implicit p: Parameters) extends L2HellaCacheBundle()(p)
|
|
|
|
with HasL2DataReadIO
|
|
|
|
with HasL2DataWriteIO
|
2014-11-20 00:55:25 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2DataRWIO extends HasOuterCacheParameters {
|
|
|
|
val data = new L2DataRWIO
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2DataArray(delay: Int)(implicit p: Parameters) extends L2HellaCacheModule()(p) {
|
2014-11-20 00:55:25 +01:00
|
|
|
val io = new L2DataRWIO().flip
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2016-01-14 22:47:47 +01:00
|
|
|
val array = SeqMem(nWays*nSets*refillCycles, Vec(rowBits/8, Bits(width=8)))
|
2015-07-11 22:36:14 +02:00
|
|
|
val ren = !io.write.valid && io.read.valid
|
2015-02-17 09:35:18 +01:00
|
|
|
val raddr = Cat(OHToUInt(io.read.bits.way_en), io.read.bits.addr_idx, io.read.bits.addr_beat)
|
2015-07-11 22:36:14 +02:00
|
|
|
val waddr = Cat(OHToUInt(io.write.bits.way_en), io.write.bits.addr_idx, io.write.bits.addr_beat)
|
2015-09-12 00:41:39 +02:00
|
|
|
val wdata = Vec.tabulate(rowBits/8)(i => io.write.bits.data(8*(i+1)-1,8*i))
|
|
|
|
val wmask = io.write.bits.wmask.toBools
|
|
|
|
when (io.write.valid) { array.write(waddr, wdata, wmask) }
|
2015-01-26 00:37:04 +01:00
|
|
|
|
2015-06-12 00:28:23 +02:00
|
|
|
val r_req = Pipe(io.read.fire(), io.read.bits)
|
|
|
|
io.resp := Pipe(r_req.valid, r_req.bits, delay)
|
2015-09-12 00:41:39 +02:00
|
|
|
io.resp.bits.data := Pipe(r_req.valid, array.read(raddr, ren).toBits, delay).bits
|
2015-01-26 00:37:04 +01:00
|
|
|
io.read.ready := !io.write.valid
|
2014-09-30 23:48:02 +02:00
|
|
|
io.write.ready := Bool(true)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2HellaCacheBank(implicit p: Parameters) extends HierarchicalCoherenceAgent()(p)
|
2016-04-05 07:17:11 +02:00
|
|
|
with HasOuterCacheParameters {
|
2014-09-30 23:48:02 +02:00
|
|
|
require(isPow2(nSets))
|
|
|
|
require(isPow2(nWays))
|
|
|
|
|
2015-03-24 10:06:53 +01:00
|
|
|
val meta = Module(new L2MetadataArray) // TODO: add delay knob
|
|
|
|
val data = Module(new L2DataArray(1))
|
2015-04-18 01:55:20 +02:00
|
|
|
val tshrfile = Module(new TSHRFile)
|
2015-09-12 00:41:39 +02:00
|
|
|
io.inner <> tshrfile.io.inner
|
2014-09-30 23:48:02 +02:00
|
|
|
io.outer <> tshrfile.io.outer
|
2015-09-12 00:41:39 +02:00
|
|
|
tshrfile.io.incoherent <> io.incoherent
|
|
|
|
meta.io <> tshrfile.io.meta
|
|
|
|
data.io <> tshrfile.io.data
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
class TSHRFileIO(implicit p: Parameters) extends HierarchicalTLIO()(p)
|
|
|
|
with HasL2MetaRWIO
|
|
|
|
with HasL2DataRWIO
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class TSHRFile(implicit p: Parameters) extends L2HellaCacheModule()(p)
|
|
|
|
with HasCoherenceAgentWiringHelpers {
|
2015-03-01 02:02:13 +01:00
|
|
|
val io = new TSHRFileIO
|
2014-09-30 23:48:02 +02:00
|
|
|
|
|
|
|
// Create TSHRs for outstanding transactions
|
2016-04-05 07:17:11 +02:00
|
|
|
val irelTrackerList =
|
2016-03-07 08:12:16 +01:00
|
|
|
(0 until nReleaseTransactors).map(id =>
|
2016-04-05 07:17:11 +02:00
|
|
|
Module(new CacheVoluntaryReleaseTracker(id)))
|
|
|
|
val iacqTrackerList =
|
2016-03-07 08:12:16 +01:00
|
|
|
(nReleaseTransactors until nTransactors).map(id =>
|
2016-04-05 07:17:11 +02:00
|
|
|
Module(new CacheAcquireTracker(id)))
|
|
|
|
val trackerList = irelTrackerList ++ iacqTrackerList
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-03-24 10:06:53 +01:00
|
|
|
// WritebackUnit evicts data from L2, including invalidating L1s
|
2015-04-18 01:55:20 +02:00
|
|
|
val wb = Module(new L2WritebackUnit(nTransactors))
|
2015-11-06 02:20:03 +01:00
|
|
|
val trackerAndWbIOs = trackerList.map(_.io) :+ wb.io
|
2015-04-14 04:00:40 +02:00
|
|
|
doInternalOutputArbitration(wb.io.wb.req, trackerList.map(_.io.wb.req))
|
|
|
|
doInternalInputRouting(wb.io.wb.resp, trackerList.map(_.io.wb.resp))
|
2014-12-19 12:03:53 +01:00
|
|
|
|
2014-09-30 23:48:02 +02:00
|
|
|
// Propagate incoherence flags
|
2015-08-27 18:57:36 +02:00
|
|
|
(trackerList.map(_.io.incoherent) :+ wb.io.incoherent) foreach { _ := io.incoherent }
|
2014-09-30 23:48:02 +02:00
|
|
|
|
|
|
|
// Handle acquire transaction initiation
|
2016-03-07 08:12:16 +01:00
|
|
|
val irel_vs_iacq_conflict =
|
|
|
|
io.inner.acquire.valid &&
|
|
|
|
io.inner.release.valid &&
|
2016-04-05 07:17:11 +02:00
|
|
|
inSameSet(io.inner.acquire.bits, io.inner.release.bits.addr_block)
|
2016-03-07 08:12:16 +01:00
|
|
|
doInputRoutingWithAllocation(
|
2016-04-05 07:17:11 +02:00
|
|
|
in = io.inner.acquire,
|
|
|
|
outs = trackerList.map(_.io.inner.acquire),
|
|
|
|
allocs = trackerList.map(_.io.alloc_iacq),
|
2016-03-07 08:12:16 +01:00
|
|
|
allocOverride = !irel_vs_iacq_conflict)
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
assert(PopCount(trackerList.map(_.io.alloc_iacq.should)) <= UInt(1),
|
2016-03-07 08:12:16 +01:00
|
|
|
"At most a single tracker should now be allocated for any given Acquire")
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2014-12-19 12:03:53 +01:00
|
|
|
// Wire releases from clients
|
2016-03-07 08:12:16 +01:00
|
|
|
doInputRoutingWithAllocation(
|
2016-04-05 07:17:11 +02:00
|
|
|
in = io.inner.release,
|
|
|
|
outs = trackerAndWbIOs.map(_.inner.release),
|
|
|
|
allocs = trackerAndWbIOs.map(_.alloc_irel))
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
assert(PopCount(trackerAndWbIOs.map(_.alloc_irel.should)) <= UInt(1),
|
2016-03-07 08:12:16 +01:00
|
|
|
"At most a single tracker should now be allocated for any given Release")
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2015-03-01 02:02:13 +01:00
|
|
|
// Wire probe requests and grant reply to clients, finish acks from clients
|
2014-12-19 12:03:53 +01:00
|
|
|
doOutputArbitration(io.inner.probe, trackerList.map(_.io.inner.probe) :+ wb.io.inner.probe)
|
2016-03-07 08:12:16 +01:00
|
|
|
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant) :+ wb.io.inner.grant)
|
2015-03-01 02:02:13 +01:00
|
|
|
doInputRouting(io.inner.finish, trackerList.map(_.io.inner.finish))
|
2014-12-12 10:11:08 +01:00
|
|
|
|
|
|
|
// Create an arbiter for the one memory port
|
2014-12-19 12:03:53 +01:00
|
|
|
val outerList = trackerList.map(_.io.outer) :+ wb.io.outer
|
2015-10-14 08:42:28 +02:00
|
|
|
val outer_arb = Module(new ClientTileLinkIOArbiter(outerList.size)
|
|
|
|
(p.alterPartial({ case TLId => p(OuterTLId)})))
|
2015-04-18 01:55:20 +02:00
|
|
|
outer_arb.io.in <> outerList
|
2014-09-30 23:48:02 +02:00
|
|
|
io.outer <> outer_arb.io.out
|
|
|
|
|
2015-04-14 04:00:40 +02:00
|
|
|
// Wire local memory arrays
|
|
|
|
doInternalOutputArbitration(io.meta.read, trackerList.map(_.io.meta.read))
|
|
|
|
doInternalOutputArbitration(io.meta.write, trackerList.map(_.io.meta.write))
|
|
|
|
doInternalOutputArbitration(io.data.read, trackerList.map(_.io.data.read) :+ wb.io.data.read)
|
|
|
|
doInternalOutputArbitration(io.data.write, trackerList.map(_.io.data.write))
|
|
|
|
doInternalInputRouting(io.meta.resp, trackerList.map(_.io.meta.resp))
|
|
|
|
doInternalInputRouting(io.data.resp, trackerList.map(_.io.data.resp) :+ wb.io.data.resp)
|
2014-12-19 12:03:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
class L2XactTrackerIO(implicit p: Parameters) extends HierarchicalXactTrackerIO()(p)
|
|
|
|
with HasL2DataRWIO
|
|
|
|
with HasL2MetaRWIO
|
|
|
|
with HasL2WritebackIO
|
2015-03-11 23:43:41 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasRowBeatCounters extends HasOuterCacheParameters with HasPendingBitHelpers {
|
|
|
|
def mergeData(dataBits: Int)(beat: UInt, incoming: UInt): Unit
|
2015-03-11 23:43:41 +01:00
|
|
|
|
2015-03-01 02:02:13 +01:00
|
|
|
def connectDataBeatCounter[S <: L2HellaCacheBundle](inc: Bool, data: S, beat: UInt, full_block: Bool) = {
|
|
|
|
if(data.refillCycles > 1) {
|
2015-03-11 09:56:47 +01:00
|
|
|
val (multi_cnt, multi_done) = Counter(full_block && inc, data.refillCycles)
|
2015-03-01 02:02:13 +01:00
|
|
|
(Mux(!full_block, beat, multi_cnt), Mux(!full_block, inc, multi_done))
|
|
|
|
} else { (UInt(0), inc) }
|
2014-12-19 12:03:53 +01:00
|
|
|
}
|
2015-03-18 01:51:00 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def connectInternalDataBeatCounter[T <: L2HellaCacheBundle with HasL2BeatAddr](
|
2015-03-01 02:02:13 +01:00
|
|
|
in: DecoupledIO[T],
|
|
|
|
beat: UInt = UInt(0),
|
2015-07-29 01:24:45 +02:00
|
|
|
full_block: Bool = Bool(true)): (UInt, Bool) = {
|
2015-03-01 02:02:13 +01:00
|
|
|
connectDataBeatCounter(in.fire(), in.bits, beat, full_block)
|
2014-12-19 12:03:53 +01:00
|
|
|
}
|
2015-03-18 01:51:00 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def connectInternalDataBeatCounter[T <: L2HellaCacheBundle with HasL2Data](
|
2015-03-01 02:02:13 +01:00
|
|
|
in: ValidIO[T],
|
2015-07-29 01:24:45 +02:00
|
|
|
full_block: Bool): Bool = {
|
2015-03-01 02:02:13 +01:00
|
|
|
connectDataBeatCounter(in.valid, in.bits, UInt(0), full_block)._2
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
2015-03-18 01:51:00 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def addPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr](in: DecoupledIO[T]) =
|
2015-03-18 01:51:00 +01:00
|
|
|
Fill(in.bits.refillCycles, in.fire()) & UIntToOH(in.bits.addr_beat)
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def addPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr](in: ValidIO[T]) =
|
2015-03-18 06:44:53 +01:00
|
|
|
Fill(in.bits.refillCycles, in.valid) & UIntToOH(in.bits.addr_beat)
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def dropPendingBit[T <: L2HellaCacheBundle with HasL2BeatAddr] (in: DecoupledIO[T]) =
|
2015-03-18 02:07:52 +01:00
|
|
|
~Fill(in.bits.refillCycles, in.fire()) | ~UIntToOH(in.bits.addr_beat)
|
2015-03-18 01:51:00 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
def dropPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr] (in: ValidIO[T]) =
|
2015-03-18 02:07:52 +01:00
|
|
|
~Fill(in.bits.refillCycles, in.valid) | ~UIntToOH(in.bits.addr_beat)
|
2015-04-04 02:24:44 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// TODO: Deal with the possibility that rowBits != tlDataBits
|
|
|
|
def mergeDataInternal[T <: L2HellaCacheBundle with HasL2Data with HasL2BeatAddr](in: ValidIO[T]) {
|
|
|
|
when(in.valid) { mergeData(rowBits)(in.bits.addr_beat, in.bits.data) }
|
2015-04-04 02:24:44 +02:00
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait ReadsFromOuterCacheDataArray extends HasCoherenceMetadataBuffer
|
|
|
|
with HasRowBeatCounters
|
|
|
|
with HasDataBuffer {
|
|
|
|
def io: HasL2DataRWIO
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
val pending_reads = Reg(init=Bits(0, width = innerDataBeats))
|
|
|
|
val pending_resps = Reg(init=Bits(0, width = innerDataBeats))
|
|
|
|
val curr_read_beat = PriorityEncoder(pending_reads)
|
2014-12-12 10:11:08 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
def readDataArray(drop_pending_bit: UInt,
|
|
|
|
add_pending_bit: UInt = UInt(0),
|
|
|
|
block_pending_read: Bool = Bool(false)) {
|
|
|
|
val port = io.data
|
|
|
|
pending_reads := (pending_reads & dropPendingBit(port.read) & drop_pending_bit) | add_pending_bit
|
|
|
|
port.read.valid := state === s_busy && pending_reads.orR && !block_pending_read
|
|
|
|
port.read.bits := L2DataReadReq(
|
|
|
|
id = UInt(trackerId),
|
|
|
|
way_en = xact_way_en,
|
|
|
|
addr_idx = xact_addr_idx,
|
|
|
|
addr_beat = curr_read_beat)
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
pending_resps := (pending_resps & dropPendingBitInternal(port.resp)) |
|
|
|
|
addPendingBitInternal(port.read)
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
scoreboard += (pending_reads.orR, pending_resps.orR)
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
mergeDataInternal(port.resp)
|
|
|
|
}
|
|
|
|
}
|
2014-10-24 06:50:03 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait WritesToOuterCacheDataArray extends HasCoherenceMetadataBuffer
|
|
|
|
with HasRowBeatCounters
|
|
|
|
with HasDataBuffer {
|
|
|
|
def io: HasL2DataRWIO
|
2014-09-30 23:48:02 +02:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
val pending_writes = Reg(init=Bits(0, width = innerDataBeats))
|
2015-04-27 21:56:33 +02:00
|
|
|
val curr_write_beat = PriorityEncoder(pending_writes)
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
def writeDataArray(add_pending_bit: UInt = UInt(0),
|
|
|
|
block_pending_write: Bool = Bool(false)) {
|
|
|
|
val port = io.data
|
|
|
|
pending_writes := (pending_writes & dropPendingBit(port.write)) | add_pending_bit
|
|
|
|
port.write.valid := state === s_busy && pending_writes.orR && !block_pending_write
|
|
|
|
port.write.bits := L2DataWriteReq(
|
|
|
|
id = UInt(trackerId),
|
|
|
|
way_en = xact_way_en,
|
|
|
|
addr_idx = xact_addr_idx,
|
|
|
|
addr_beat = curr_write_beat,
|
|
|
|
wmask = ~UInt(0, port.write.bits.wmask.getWidth),
|
|
|
|
data = data_buffer(curr_write_beat))
|
|
|
|
|
|
|
|
scoreboard += pending_writes.orR
|
2015-04-27 21:56:33 +02:00
|
|
|
}
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasAMOALU extends HasAcquireMetadataBuffer
|
|
|
|
with HasByteWriteMaskBuffer
|
|
|
|
with HasRowBeatCounters {
|
|
|
|
val io: L2XactTrackerIO
|
2015-03-17 07:41:56 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Provide a single ALU per tracker to merge Puts and AMOs with data being
|
|
|
|
// refilled, written back, or extant in the cache
|
|
|
|
val amoalu = Module(new AMOALU(rhsIsAligned = true))
|
|
|
|
val amo_result = Reg(init = UInt(0, innerDataBits))
|
|
|
|
|
|
|
|
def initializeAMOALUIOs() {
|
|
|
|
amoalu.io.addr := Cat(xact_addr_block, xact_addr_beat, xact_addr_byte)
|
|
|
|
amoalu.io.cmd := xact_op_code
|
|
|
|
amoalu.io.typ := xact_op_size
|
|
|
|
amoalu.io.lhs := io.data.resp.bits.data // default, overwritten by calls to mergeData
|
|
|
|
amoalu.io.rhs := data_buffer.head // default, overwritten by calls to mergeData
|
|
|
|
}
|
2014-12-19 12:03:53 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Utility function for applying any buffered stored data to the cache line
|
|
|
|
// before storing it back into the data array
|
|
|
|
override def mergeData(dataBits: Int)(beat: UInt, incoming: UInt) {
|
|
|
|
val old_data = incoming // Refilled, written back, or de-cached data
|
|
|
|
val new_data = data_buffer(beat) // Newly Put data is already in the buffer
|
|
|
|
val amo_shift_bits = xact_amo_shift_bytes << 3
|
|
|
|
amoalu.io.lhs := old_data >> amo_shift_bits
|
|
|
|
amoalu.io.rhs := new_data >> amo_shift_bits
|
|
|
|
val wmask = FillInterleaved(8, wmask_buffer(beat))
|
|
|
|
data_buffer(beat) := ~wmask & old_data |
|
|
|
|
wmask & Mux(xact_iacq.isAtomic(), amoalu.io.out << amo_shift_bits, new_data)
|
|
|
|
when(xact_iacq.isAtomic() && xact_addr_beat === beat) { amo_result := old_data }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
trait HasCoherenceMetadataBuffer extends HasOuterCacheParameters
|
|
|
|
with HasBlockAddressBuffer
|
|
|
|
with HasXactTrackerStates {
|
|
|
|
def trackerId: Int
|
2014-12-12 10:11:08 +01:00
|
|
|
|
|
|
|
val xact_way_en = Reg{ Bits(width = nWays) }
|
2015-03-24 10:06:53 +01:00
|
|
|
val xact_old_meta = Reg{ new L2Metadata }
|
2015-07-16 03:06:27 +02:00
|
|
|
val pending_coh = Reg{ xact_old_meta.coh }
|
2016-04-05 07:17:11 +02:00
|
|
|
val pending_meta_write = Reg{ Bool() } // pending_meta_write has own state (s_meta_write)
|
2015-03-24 10:06:53 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
val inner_coh = pending_coh.inner
|
|
|
|
val outer_coh = pending_coh.outer
|
|
|
|
|
|
|
|
val xact_addr_idx = xact_addr_block(idxMSB,idxLSB)
|
|
|
|
val xact_addr_tag = xact_addr_block >> UInt(tagLSB)
|
2015-03-24 10:06:53 +01:00
|
|
|
|
2015-11-16 22:23:17 +01:00
|
|
|
// Utility function for updating the metadata that will be kept in this cache
|
2015-03-24 10:06:53 +01:00
|
|
|
def updatePendingCohWhen(flag: Bool, next: HierarchicalMetadata) {
|
2016-01-14 22:47:47 +01:00
|
|
|
when(flag && pending_coh =/= next) {
|
2015-03-24 10:06:53 +01:00
|
|
|
pending_meta_write := Bool(true)
|
|
|
|
pending_coh := next
|
|
|
|
}
|
|
|
|
}
|
2015-02-02 04:57:53 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
def metaRead(port: HasL2MetaReadIO, next_state: UInt) {
|
|
|
|
port.read.valid := state === s_meta_read
|
|
|
|
port.read.bits.id := UInt(trackerId)
|
|
|
|
port.read.bits.idx := xact_addr_idx
|
|
|
|
port.read.bits.tag := xact_addr_tag
|
2015-12-17 05:56:29 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
when(state === s_meta_read && port.read.ready) { state := s_meta_resp }
|
2015-12-17 05:56:29 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
when(state === s_meta_resp && port.resp.valid) {
|
|
|
|
xact_old_meta := port.resp.bits.meta
|
|
|
|
xact_way_en := port.resp.bits.way_en
|
|
|
|
state := next_state
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def metaWrite(port: HasL2MetaWriteIO, to_write: L2Metadata, next_state: UInt) {
|
|
|
|
port.write.valid := state === s_meta_write
|
|
|
|
port.write.bits.id := UInt(trackerId)
|
|
|
|
port.write.bits.idx := xact_addr_idx
|
|
|
|
port.write.bits.way_en := xact_way_en
|
|
|
|
port.write.bits.data := to_write
|
|
|
|
|
|
|
|
when(state === s_meta_write && port.write.ready) { state := next_state }
|
|
|
|
}
|
|
|
|
}
|
2015-12-17 05:56:29 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait TriggersWritebacks extends HasCoherenceMetadataBuffer {
|
|
|
|
def triggerWriteback(wb: L2WritebackIO, next_state: UInt) {
|
|
|
|
wb.req.valid := state === s_wb_req
|
|
|
|
wb.req.bits.id := UInt(trackerId)
|
|
|
|
wb.req.bits.idx := xact_addr_idx
|
|
|
|
wb.req.bits.tag := xact_old_meta.tag
|
|
|
|
wb.req.bits.coh := xact_old_meta.coh
|
|
|
|
wb.req.bits.way_en := xact_way_en
|
|
|
|
|
|
|
|
when(state === s_wb_req && wb.req.ready) { state := s_wb_resp }
|
|
|
|
when(state === s_wb_resp && wb.resp.valid) { state := s_outer_acquire }
|
|
|
|
}
|
|
|
|
}
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
class CacheVoluntaryReleaseTracker(trackerId: Int)(implicit p: Parameters)
|
|
|
|
extends VoluntaryReleaseTracker(trackerId)(p)
|
|
|
|
with HasDataBuffer
|
|
|
|
with WritesToOuterCacheDataArray {
|
|
|
|
val io = new L2XactTrackerIO
|
|
|
|
pinAllReadyValidLow(io)
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Avoid metatdata races with writebacks
|
|
|
|
routeInParent(iacqMatches = inSameSet(_, xact_addr_block))
|
|
|
|
io.alloc_iacq.can := Bool(false)
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Initialize and accept pending Release beats
|
|
|
|
innerRelease(
|
|
|
|
block_vol_ignt = pending_writes.orR,
|
|
|
|
next = s_meta_read)
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
io.inner.release.ready := state === s_idle || irel_can_merge || irel_same_xact
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Begin a transaction by getting the current block metadata
|
|
|
|
metaRead(io.meta, s_busy)
|
|
|
|
|
|
|
|
// Write the voluntarily written back data to this cache
|
|
|
|
writeDataArray(add_pending_bit = addPendingBitWhenBeatHasData(io.inner.release))
|
|
|
|
|
|
|
|
// End a transaction by updating the block metadata
|
|
|
|
metaWrite(
|
|
|
|
io.meta,
|
|
|
|
L2Metadata(
|
|
|
|
tag = xact_addr_tag,
|
|
|
|
inner = xact_old_meta.coh.inner.onRelease(xact_vol_irel),
|
|
|
|
outer = Mux(xact_vol_irel.hasData(),
|
|
|
|
xact_old_meta.coh.outer.onHit(M_XWR),
|
|
|
|
xact_old_meta.coh.outer)),
|
|
|
|
s_idle)
|
|
|
|
|
|
|
|
when(io.inner.release.fire()) { data_buffer(io.irel().addr_beat) := io.irel().data }
|
|
|
|
|
|
|
|
when(irel_is_allocating) {
|
|
|
|
pending_writes := addPendingBitWhenBeatHasData(io.inner.release)
|
2015-03-24 10:06:53 +01:00
|
|
|
}
|
2015-03-17 07:41:56 +01:00
|
|
|
|
2016-06-17 00:15:36 +02:00
|
|
|
quiesce(s_meta_write) {}
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
// Checks for illegal behavior
|
|
|
|
assert(!(state === s_meta_resp && io.meta.resp.valid && !io.meta.resp.bits.tag_match),
|
|
|
|
"VoluntaryReleaseTracker accepted Release for a block not resident in this cache!")
|
|
|
|
}
|
|
|
|
|
|
|
|
class CacheAcquireTracker(trackerId: Int)(implicit p: Parameters)
|
|
|
|
extends AcquireTracker(trackerId)(p)
|
|
|
|
with HasByteWriteMaskBuffer
|
|
|
|
with HasAMOALU
|
|
|
|
with TriggersWritebacks
|
|
|
|
with ReadsFromOuterCacheDataArray
|
|
|
|
with WritesToOuterCacheDataArray {
|
|
|
|
val io = new L2XactTrackerIO
|
|
|
|
pinAllReadyValidLow(io)
|
|
|
|
initializeAMOALUIOs()
|
|
|
|
|
|
|
|
|
|
|
|
val pending_coh_on_ognt = HierarchicalMetadata(
|
|
|
|
ManagerMetadata.onReset,
|
|
|
|
pending_coh.outer.onGrant(io.outer.grant.bits, xact_op_code))
|
|
|
|
|
|
|
|
val pending_coh_on_ignt = HierarchicalMetadata(
|
|
|
|
pending_coh.inner.onGrant(io.ignt()),
|
|
|
|
Mux(ognt_counter.down.done,
|
|
|
|
pending_coh_on_ognt.outer,
|
|
|
|
pending_coh.outer))
|
|
|
|
|
|
|
|
val pending_coh_on_irel = HierarchicalMetadata(
|
|
|
|
pending_coh.inner.onRelease(io.irel()), // Drop sharer
|
|
|
|
Mux(io.irel().hasData(), // Dirty writeback
|
|
|
|
pending_coh.outer.onHit(M_XWR),
|
|
|
|
pending_coh.outer))
|
|
|
|
|
|
|
|
val pending_coh_on_hit = HierarchicalMetadata(
|
|
|
|
io.meta.resp.bits.meta.coh.inner,
|
|
|
|
io.meta.resp.bits.meta.coh.outer.onHit(xact_op_code))
|
|
|
|
|
|
|
|
val pending_coh_on_miss = HierarchicalMetadata.onReset
|
|
|
|
|
|
|
|
|
|
|
|
// Setup IOs used for routing in the parent
|
|
|
|
val before_wb_alloc = Vec(s_meta_read, s_meta_resp, s_wb_req).contains(state)
|
|
|
|
|
|
|
|
routeInParent(
|
|
|
|
iacqMatches = inSameSet(_, xact_addr_block),
|
|
|
|
irelMatches = (irel: HasCacheBlockAddress) =>
|
|
|
|
Mux(before_wb_alloc, inSameSet(irel, xact_addr_block), exactAddrMatch(irel)))
|
|
|
|
io.alloc_irel.can := Bool(false)
|
|
|
|
|
|
|
|
// TileLink allows for Gets-under-Get
|
|
|
|
// and Puts-under-Put, and either may also merge with a preceding prefetch
|
2015-11-16 22:23:17 +01:00
|
|
|
// that requested the correct permissions (via op_code)
|
|
|
|
def acquiresAreMergeable(sec: AcquireMetadata): Bool = {
|
|
|
|
val allowedTypes = List((Acquire.getType, Acquire.getType),
|
|
|
|
(Acquire.putType, Acquire.putType),
|
|
|
|
(Acquire.putBlockType, Acquire.putBlockType),
|
2015-11-17 08:26:13 +01:00
|
|
|
(Acquire.getPrefetchType, Acquire.getPrefetchType),
|
|
|
|
(Acquire.putPrefetchType, Acquire.putPrefetchType),
|
|
|
|
(Acquire.getPrefetchType, Acquire.getType),
|
|
|
|
(Acquire.putPrefetchType, Acquire.putType),
|
|
|
|
(Acquire.putPrefetchType, Acquire.putBlockType))
|
2016-04-05 07:17:11 +02:00
|
|
|
allowedTypes.map { case(a, b) => xact_iacq.isBuiltInType(a) && sec.isBuiltInType(b) }.reduce(_||_) &&
|
2015-11-16 22:23:17 +01:00
|
|
|
xact_op_code === sec.op_code() &&
|
|
|
|
sec.conflicts(xact_addr_block) &&
|
2015-12-16 03:17:19 +01:00
|
|
|
xact_allocate
|
2015-11-16 22:23:17 +01:00
|
|
|
}
|
2015-11-12 20:39:36 +01:00
|
|
|
|
2015-11-16 22:23:17 +01:00
|
|
|
// First, take care of accpeting new acquires or secondary misses
|
2016-04-05 07:17:11 +02:00
|
|
|
// Handling of primary and secondary misses' data and write mask merging
|
|
|
|
def iacq_can_merge = acquiresAreMergeable(io.iacq()) &&
|
2016-06-15 02:33:12 +02:00
|
|
|
state =/= s_idle &&
|
|
|
|
state =/= s_meta_resp &&
|
|
|
|
state =/= s_meta_write &&
|
2015-11-16 22:23:17 +01:00
|
|
|
!all_pending_done &&
|
|
|
|
!io.inner.release.fire() &&
|
|
|
|
!io.outer.grant.fire() &&
|
|
|
|
!io.data.resp.valid &&
|
|
|
|
ignt_q.io.enq.ready && ignt_q.io.deq.valid
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
innerAcquire(
|
|
|
|
can_alloc = Bool(true),
|
|
|
|
next = s_meta_read)
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-03-07 08:12:16 +01:00
|
|
|
io.inner.acquire.ready := state === s_idle || iacq_can_merge || iacq_same_xact
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2015-04-27 21:56:33 +02:00
|
|
|
// Begin a transaction by getting the current block metadata
|
2016-04-05 07:17:11 +02:00
|
|
|
// Defined here because of Chisel default wire demands, used in s_meta_resp
|
|
|
|
val coh = io.meta.resp.bits.meta.coh
|
|
|
|
val tag_match = io.meta.resp.bits.tag_match
|
|
|
|
val is_hit = (if(!isLastLevelCache) tag_match && coh.outer.isHit(xact_op_code)
|
|
|
|
else tag_match && coh.outer.isValid())
|
|
|
|
val needs_writeback = !tag_match &&
|
|
|
|
xact_allocate &&
|
|
|
|
(coh.outer.requiresVoluntaryWriteback() ||
|
|
|
|
coh.inner.requiresProbesOnVoluntaryWriteback())
|
|
|
|
val needs_inner_probes = tag_match && coh.inner.requiresProbes(xact_iacq)
|
|
|
|
val should_update_meta = !tag_match && xact_allocate ||
|
|
|
|
is_hit && pending_coh_on_hit =/= coh
|
|
|
|
def full_representation = io.meta.resp.bits.meta.coh.inner.full()
|
|
|
|
|
|
|
|
metaRead(
|
|
|
|
io.meta,
|
|
|
|
Mux(needs_writeback, s_wb_req,
|
|
|
|
Mux(needs_inner_probes, s_inner_probe,
|
|
|
|
Mux(!is_hit, s_outer_acquire, s_busy))))
|
|
|
|
|
|
|
|
updatePendingCohWhen(
|
|
|
|
io.meta.resp.valid,
|
|
|
|
Mux(is_hit, pending_coh_on_hit,
|
|
|
|
Mux(tag_match, coh, pending_coh_on_miss)))
|
2015-04-27 21:56:33 +02:00
|
|
|
|
|
|
|
// Issue a request to the writeback unit
|
2016-04-05 07:17:11 +02:00
|
|
|
triggerWriteback(io.wb, s_outer_acquire)
|
2015-04-27 21:56:33 +02:00
|
|
|
|
2015-03-24 10:06:53 +01:00
|
|
|
// Track which clients yet need to be probed and make Probe message
|
2016-04-05 07:17:11 +02:00
|
|
|
// If we're probing, we know the tag matches, so if this is the
|
|
|
|
// last level cache, we can use the data without upgrading permissions
|
|
|
|
val skip_outer_acquire =
|
|
|
|
(if(!isLastLevelCache) xact_old_meta.coh.outer.isHit(xact_op_code)
|
|
|
|
else xact_old_meta.coh.outer.isValid())
|
|
|
|
|
|
|
|
innerProbe(
|
|
|
|
inner_coh.makeProbe(curr_probe_dst, xact_iacq, xact_addr_block),
|
|
|
|
Mux(!skip_outer_acquire, s_outer_acquire, s_busy))
|
2015-03-24 10:06:53 +01:00
|
|
|
|
|
|
|
// Handle incoming releases from clients, which may reduce sharer counts
|
2016-03-07 08:12:16 +01:00
|
|
|
// and/or write back dirty data, and may be unexpected voluntary releases
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
innerRelease() // Don't block on pending_writes because they won't happen until s_busy
|
|
|
|
|
|
|
|
def irel_can_merge = io.irel().conflicts(xact_addr_block) &&
|
2016-03-07 08:12:16 +01:00
|
|
|
io.irel().isVoluntary() &&
|
|
|
|
!Vec(s_idle, s_meta_read, s_meta_resp, s_meta_write).contains(state) &&
|
|
|
|
!all_pending_done &&
|
|
|
|
!io.outer.grant.fire() &&
|
|
|
|
!io.inner.grant.fire() &&
|
2016-04-05 07:17:11 +02:00
|
|
|
!vol_ignt_counter.pending
|
2016-03-07 08:12:16 +01:00
|
|
|
|
|
|
|
io.inner.release.ready := irel_can_merge || irel_same_xact
|
2016-04-05 07:17:11 +02:00
|
|
|
|
2015-03-24 10:06:53 +01:00
|
|
|
updatePendingCohWhen(io.inner.release.fire(), pending_coh_on_irel)
|
2016-04-05 07:17:11 +02:00
|
|
|
|
2015-03-24 10:06:53 +01:00
|
|
|
mergeDataInner(io.inner.release)
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Send outer request
|
|
|
|
outerAcquire(
|
|
|
|
caching = xact_allocate,
|
|
|
|
coh = xact_old_meta.coh.outer, // TODO outer_coh?
|
|
|
|
data = data_buffer(ognt_counter.up.idx),
|
|
|
|
wmask = wmask_buffer(ognt_counter.up.idx),
|
|
|
|
next = s_busy)
|
|
|
|
|
2015-04-27 21:56:33 +02:00
|
|
|
// Handle the response from outer memory
|
2016-04-05 07:17:11 +02:00
|
|
|
updatePendingCohWhen(ognt_counter.down.done, pending_coh_on_ognt)
|
2015-03-24 10:06:53 +01:00
|
|
|
mergeDataOuter(io.outer.grant)
|
2014-12-19 12:03:53 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Send read request and get resp
|
2015-11-16 22:23:17 +01:00
|
|
|
// Going back to the original inner transaction:
|
2015-04-14 00:57:06 +02:00
|
|
|
// We read from the the cache at this level if data wasn't written back or refilled.
|
2015-11-16 22:23:17 +01:00
|
|
|
// We may still merge further Gets, requiring further beats to be read.
|
2015-04-14 00:57:06 +02:00
|
|
|
// If ECC requires a full writemask, we'll read out data on partial writes as well.
|
2016-04-05 07:17:11 +02:00
|
|
|
readDataArray(
|
|
|
|
drop_pending_bit = (dropPendingBitWhenBeatHasData(io.inner.release) &
|
|
|
|
dropPendingBitWhenBeatHasData(io.outer.grant)),
|
|
|
|
add_pending_bit = addPendingBitWhenBeatNeedsRead(io.inner.acquire, Bool(alwaysWriteFullBeat)),
|
|
|
|
block_pending_read = ognt_counter.pending)
|
2015-03-24 10:06:53 +01:00
|
|
|
|
2016-06-15 02:33:12 +02:00
|
|
|
// No override for first accepted acquire
|
|
|
|
val alloc_override = xact_allocate && (state =/= s_idle)
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
// Do write
|
2015-04-14 00:57:06 +02:00
|
|
|
// We write data to the cache at this level if it was Put here with allocate flag,
|
|
|
|
// written back dirty, or refilled from outer memory.
|
2016-04-05 07:17:11 +02:00
|
|
|
writeDataArray(
|
2016-06-15 02:33:12 +02:00
|
|
|
add_pending_bit = (addPendingBitWhenBeatHasDataAndAllocs(io.inner.acquire, alloc_override) |
|
2016-04-05 07:17:11 +02:00
|
|
|
addPendingBitWhenBeatHasData(io.inner.release) |
|
|
|
|
addPendingBitWhenBeatHasData(io.outer.grant, xact_allocate)),
|
|
|
|
block_pending_write = (ognt_counter.pending ||
|
|
|
|
pending_put_data.orR ||
|
|
|
|
pending_reads(curr_write_beat) ||
|
|
|
|
pending_resps(curr_write_beat)))
|
|
|
|
|
|
|
|
// Acknowledge or respond with data
|
|
|
|
innerGrant(
|
|
|
|
data = Mux(xact_iacq.isAtomic(), amo_result, data_buffer(ignt_data_idx)),
|
|
|
|
external_pending = pending_writes.orR || ognt_counter.pending,
|
|
|
|
add = addPendingBitInternal(io.data.resp))
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2016-03-02 19:59:18 +01:00
|
|
|
updatePendingCohWhen(io.inner.grant.fire() && io.ignt().last(), pending_coh_on_ignt)
|
2015-11-16 22:23:17 +01:00
|
|
|
|
2015-04-27 21:56:33 +02:00
|
|
|
// End a transaction by updating the block metadata
|
2016-04-05 07:17:11 +02:00
|
|
|
metaWrite(io.meta, L2Metadata(xact_addr_tag, pending_coh), s_idle)
|
|
|
|
|
|
|
|
// Initialization of some scoreboard logic based on the original
|
|
|
|
// Acquire message on on the results of the metadata read:
|
2015-03-24 10:06:53 +01:00
|
|
|
when(state === s_meta_resp && io.meta.resp.valid) {
|
2015-11-16 22:23:17 +01:00
|
|
|
// If some kind of Put is marked no-allocate but is already in the cache,
|
|
|
|
// we need to write its data to the data array
|
2016-04-05 07:17:11 +02:00
|
|
|
when(is_hit && !xact_allocate && xact_iacq.hasData()) {
|
|
|
|
pending_writes := addPendingBitsFromAcquire(xact_iacq)
|
2015-12-16 03:17:19 +01:00
|
|
|
xact_allocate := Bool(true)
|
2015-11-12 20:39:36 +01:00
|
|
|
}
|
2016-04-05 07:17:11 +02:00
|
|
|
when (needs_inner_probes) { initializeProbes() }
|
|
|
|
pending_meta_write := should_update_meta //TODO what edge case was this covering?
|
2015-03-24 10:06:53 +01:00
|
|
|
}
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
// Initialize more transaction metadata. Pla
|
|
|
|
when(iacq_is_allocating) {
|
|
|
|
amo_result := UInt(0)
|
|
|
|
pending_meta_write := Bool(false)
|
|
|
|
pending_reads := Mux( // Pick out the specific beats of data that need to be read
|
|
|
|
io.iacq().isBuiltInType(Acquire.getBlockType) || !io.iacq().isBuiltInType(),
|
|
|
|
~UInt(0, width = innerDataBeats),
|
|
|
|
addPendingBitWhenBeatNeedsRead(io.inner.acquire, Bool(alwaysWriteFullBeat)))
|
|
|
|
pending_writes := addPendingBitWhenBeatHasDataAndAllocs(io.inner.acquire)
|
|
|
|
pending_resps := UInt(0)
|
2015-03-17 07:41:56 +01:00
|
|
|
}
|
2016-04-05 07:17:11 +02:00
|
|
|
|
2016-06-17 00:15:36 +02:00
|
|
|
initDataInner(io.inner.acquire, iacq_is_allocating || iacq_is_merging)
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
// Wait for everything to quiesce
|
2016-06-17 00:15:36 +02:00
|
|
|
quiesce(Mux(pending_meta_write, s_meta_write, s_idle)) { clearWmaskBuffer() }
|
2014-09-30 23:48:02 +02:00
|
|
|
}
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2WritebackReq(implicit p: Parameters) extends L2Metadata()(p) with HasL2Id {
|
2015-04-22 07:23:04 +02:00
|
|
|
val idx = Bits(width = idxBits)
|
2015-03-01 02:02:13 +01:00
|
|
|
val way_en = Bits(width = nWays)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2WritebackResp(implicit p: Parameters) extends L2HellaCacheBundle()(p) with HasL2Id
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class L2WritebackIO(implicit p: Parameters) extends L2HellaCacheBundle()(p) {
|
2015-03-01 02:02:13 +01:00
|
|
|
val req = Decoupled(new L2WritebackReq)
|
|
|
|
val resp = Valid(new L2WritebackResp).flip
|
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
trait HasL2WritebackIO extends HasOuterCacheParameters {
|
|
|
|
val wb = new L2WritebackIO()
|
2015-03-01 02:02:13 +01:00
|
|
|
}
|
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
class L2WritebackUnitIO(implicit p: Parameters) extends HierarchicalXactTrackerIO()(p)
|
|
|
|
with HasL2DataRWIO {
|
|
|
|
val wb = new L2WritebackIO().flip()
|
|
|
|
}
|
|
|
|
|
|
|
|
class L2WritebackUnit(val trackerId: Int)(implicit p: Parameters) extends XactTracker()(p)
|
|
|
|
with AcceptsVoluntaryReleases
|
|
|
|
with EmitsVoluntaryReleases
|
|
|
|
with EmitsInnerProbes
|
|
|
|
with ReadsFromOuterCacheDataArray
|
|
|
|
with RoutesInParent {
|
2015-03-01 02:02:13 +01:00
|
|
|
val io = new L2WritebackUnitIO
|
2015-04-27 21:56:33 +02:00
|
|
|
pinAllReadyValidLow(io)
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
val xact_id = Reg{ io.wb.req.bits.id }
|
|
|
|
|
|
|
|
val pending_coh_on_irel = HierarchicalMetadata(
|
|
|
|
inner_coh.onRelease(io.irel()), // Drop sharer
|
|
|
|
Mux(io.irel().hasData(), // Dirty writeback
|
|
|
|
outer_coh.onHit(M_XWR),
|
|
|
|
outer_coh))
|
|
|
|
|
|
|
|
routeInParent()
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2015-04-22 07:23:04 +02:00
|
|
|
// Start the writeback sub-transaction
|
|
|
|
io.wb.req.ready := state === s_idle
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-04-22 07:23:04 +02:00
|
|
|
// Track which clients yet need to be probed and make Probe message
|
2016-04-05 07:17:11 +02:00
|
|
|
innerProbe(
|
|
|
|
inner_coh.makeProbeForVoluntaryWriteback(curr_probe_dst, xact_addr_block),
|
|
|
|
s_busy)
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-04-22 07:23:04 +02:00
|
|
|
// Handle incoming releases from clients, which may reduce sharer counts
|
|
|
|
// and/or write back dirty data
|
2016-04-05 07:17:11 +02:00
|
|
|
innerRelease()
|
|
|
|
|
|
|
|
def irel_can_merge = io.irel().conflicts(xact_addr_block) &&
|
2016-03-07 08:12:16 +01:00
|
|
|
io.irel().isVoluntary() &&
|
2016-04-05 07:17:11 +02:00
|
|
|
(state =/= s_idle) &&
|
|
|
|
!(state === s_busy && all_pending_done) &&
|
|
|
|
!vol_ignt_counter.pending
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
io.inner.release.ready := irel_can_merge || irel_same_xact
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
updatePendingCohWhen(io.inner.release.fire(), pending_coh_on_irel)
|
2016-03-07 08:12:16 +01:00
|
|
|
|
2016-04-05 07:17:11 +02:00
|
|
|
mergeDataInner(io.inner.release)
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-04-22 07:23:04 +02:00
|
|
|
// If a release didn't write back data, have to read it from data array
|
2016-04-05 07:17:11 +02:00
|
|
|
readDataArray(drop_pending_bit = dropPendingBitWhenBeatHasData(io.inner.release))
|
2015-04-22 07:23:04 +02:00
|
|
|
|
|
|
|
// Once the data is buffered we can write it back to outer memory
|
2016-04-05 07:17:11 +02:00
|
|
|
outerRelease(
|
|
|
|
coh = outer_coh,
|
|
|
|
data = data_buffer(vol_ognt_counter.up.idx),
|
|
|
|
add_pending_bit = addPendingBitInternal(io.data.resp))
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2015-04-22 07:23:04 +02:00
|
|
|
// Respond to the initiating transaction handler signalling completion of the writeback
|
2016-04-05 07:17:11 +02:00
|
|
|
io.wb.resp.valid := state === s_busy && all_pending_done
|
|
|
|
io.wb.resp.bits.id := xact_id
|
2015-03-01 02:02:13 +01:00
|
|
|
|
2016-06-17 00:15:36 +02:00
|
|
|
quiesce() {}
|
2016-04-05 07:17:11 +02:00
|
|
|
|
|
|
|
def full_representation = io.wb.req.bits.coh.inner.full()
|
2015-04-22 07:23:04 +02:00
|
|
|
// State machine updates and transaction handler metadata intialization
|
|
|
|
when(state === s_idle && io.wb.req.valid) {
|
2016-04-05 07:17:11 +02:00
|
|
|
xact_id := io.wb.req.bits.id
|
|
|
|
xact_way_en := io.wb.req.bits.way_en
|
|
|
|
xact_addr_block := (if (cacheIdBits == 0) Cat(io.wb.req.bits.tag, io.wb.req.bits.idx)
|
|
|
|
else Cat(io.wb.req.bits.tag, io.wb.req.bits.idx, UInt(cacheId, cacheIdBits)))
|
2015-04-22 07:23:04 +02:00
|
|
|
val coh = io.wb.req.bits.coh
|
|
|
|
val needs_inner_probes = coh.inner.requiresProbesOnVoluntaryWriteback()
|
2016-04-05 07:17:11 +02:00
|
|
|
val needs_outer_release = coh.outer.requiresVoluntaryWriteback()
|
|
|
|
when(needs_inner_probes) { initializeProbes() }
|
|
|
|
pending_reads := Mux(needs_outer_release, ~UInt(0, width = innerDataBeats), UInt(0))
|
2015-04-22 07:23:04 +02:00
|
|
|
pending_resps := UInt(0)
|
2016-04-05 07:17:11 +02:00
|
|
|
pending_orel := needs_outer_release
|
|
|
|
//pending_orel_data := UInt(0)
|
|
|
|
pending_coh := coh
|
2015-04-22 07:23:04 +02:00
|
|
|
state := Mux(needs_inner_probes, s_inner_probe, s_busy)
|
|
|
|
}
|
2015-03-01 02:02:13 +01:00
|
|
|
}
|