1
0
rocket-chip/uncore/src/main/scala/cache.scala

1042 lines
45 KiB
Scala
Raw Normal View History

2014-09-13 00:31:38 +02:00
// See LICENSE for license details.
2014-04-23 01:55:35 +02:00
package uncore
import Chisel._
import scala.reflect.ClassTag
2015-07-30 03:04:30 +02:00
import junctions._
2015-10-22 03:16:44 +02:00
import cde.{Parameters, Field}
2014-04-23 01:55:35 +02:00
2014-08-23 10:19:36 +02:00
case object CacheName extends Field[String]
case object NSets extends Field[Int]
case object NWays extends Field[Int]
case object RowBits extends Field[Int]
case object Replacer extends Field[() => ReplacementPolicy]
case object AmoAluOperandBits extends Field[Int]
2015-03-18 06:53:50 +01:00
case object NPrimaryMisses extends Field[Int]
case object NSecondaryMisses extends Field[Int]
case object CacheBlockBytes extends Field[Int]
case object CacheBlockOffsetBits extends Field[Int]
case object ECCCode extends Field[Option[Code]]
2015-10-06 06:41:46 +02:00
trait HasCacheParameters {
implicit val p: Parameters
val nSets = p(NSets)
val blockOffBits = p(CacheBlockOffsetBits)
val idxBits = log2Up(nSets)
val untagBits = blockOffBits + idxBits
2015-10-06 06:41:46 +02:00
val tagBits = p(PAddrBits) - untagBits
val nWays = p(NWays)
val wayBits = log2Up(nWays)
val isDM = nWays == 1
2015-10-06 06:41:46 +02:00
val rowBits = p(RowBits)
2014-08-12 23:55:44 +02:00
val rowBytes = rowBits/8
val rowOffBits = log2Up(rowBytes)
2015-10-06 06:41:46 +02:00
val code = p(ECCCode).getOrElse(new IdentityCode)
}
2015-10-06 06:41:46 +02:00
abstract class CacheModule(implicit val p: Parameters) extends Module
with HasCacheParameters
abstract class CacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasCacheParameters
2014-04-23 01:55:35 +02:00
abstract class ReplacementPolicy {
def way: UInt
def miss: Unit
def hit: Unit
}
class RandomReplacement(ways: Int) extends ReplacementPolicy {
2015-07-16 05:24:03 +02:00
private val replace = Wire(Bool())
2014-04-23 01:55:35 +02:00
replace := Bool(false)
val lfsr = LFSR16(replace)
def way = if(ways == 1) UInt(0) else lfsr(log2Up(ways)-1,0)
2014-04-23 01:55:35 +02:00
def miss = replace := Bool(true)
def hit = {}
}
2015-10-06 06:41:46 +02:00
abstract class Metadata(implicit p: Parameters) extends CacheBundle()(p) {
val tag = Bits(width = tagBits)
val coh: CoherenceMetadata
2014-04-23 01:55:35 +02:00
}
2015-10-06 06:41:46 +02:00
class MetaReadReq(implicit p: Parameters) extends CacheBundle()(p) {
val idx = Bits(width = idxBits)
2014-04-23 01:55:35 +02:00
}
2015-10-06 06:41:46 +02:00
class MetaWriteReq[T <: Metadata](gen: T)(implicit p: Parameters) extends MetaReadReq()(p) {
val way_en = Bits(width = nWays)
2015-07-16 03:06:27 +02:00
val data = gen.cloneType
2015-10-06 06:41:46 +02:00
override def cloneType = new MetaWriteReq(gen)(p).asInstanceOf[this.type]
2014-04-23 01:55:35 +02:00
}
2015-10-06 06:41:46 +02:00
class MetadataArray[T <: Metadata](onReset: () => T)(implicit p: Parameters) extends CacheModule()(p) {
val rstVal = onReset()
2014-04-23 01:55:35 +02:00
val io = new Bundle {
2014-04-24 01:24:20 +02:00
val read = Decoupled(new MetaReadReq).flip
2015-07-16 03:06:27 +02:00
val write = Decoupled(new MetaWriteReq(rstVal)).flip
2015-09-12 00:41:39 +02:00
val resp = Vec(rstVal.cloneType, nWays).asOutput
2014-04-23 01:55:35 +02:00
}
val rst_cnt = Reg(init=UInt(0, log2Up(nSets+1)))
val rst = rst_cnt < UInt(nSets)
val waddr = Mux(rst, rst_cnt, io.write.bits.idx)
val wdata = Mux(rst, rstVal, io.write.bits.data).toBits
2015-09-12 00:41:39 +02:00
val wmask = Mux(rst, SInt(-1), io.write.bits.way_en.toSInt).toBools
2014-04-23 01:55:35 +02:00
when (rst) { rst_cnt := rst_cnt+UInt(1) }
2015-03-01 02:02:13 +01:00
val metabits = rstVal.getWidth
2015-09-12 00:41:39 +02:00
val tag_arr = SeqMem(Vec(UInt(width = metabits), nWays), nSets)
2014-04-23 01:55:35 +02:00
when (rst || io.write.valid) {
tag_arr.write(waddr, Vec.fill(nWays)(wdata), wmask)
2014-04-23 01:55:35 +02:00
}
2015-09-12 00:41:39 +02:00
val tags = tag_arr.read(io.read.bits.idx, io.read.valid).toBits
2015-03-01 02:02:13 +01:00
io.resp := io.resp.fromBits(tags)
2014-04-23 01:55:35 +02:00
io.read.ready := !rst && !io.write.valid // so really this could be a 6T RAM
io.write.ready := !rst
}
2014-09-30 23:48:02 +02:00
2015-10-07 03:19:45 +02:00
case object L2DirectoryRepresentation extends Field[DirectoryRepresentation]
2015-10-06 06:41:46 +02:00
trait HasL2HellaCacheParameters extends HasCacheParameters with HasCoherenceAgentParameters {
2014-12-17 23:28:14 +01:00
val idxMSB = idxBits-1
val idxLSB = 0
2015-10-14 08:42:28 +02:00
//val blockAddrBits = p(TLBlockAddrBits)
2015-03-01 02:02:13 +01:00
val refillCyclesPerBeat = outerDataBits/rowBits
val refillCycles = refillCyclesPerBeat*outerDataBeats
2015-10-06 06:41:46 +02:00
val internalDataBeats = p(CacheBlockBytes)*8/rowBits
require(refillCyclesPerBeat == 1)
2015-10-06 06:41:46 +02:00
val amoAluOperandBits = p(AmoAluOperandBits)
2015-03-01 02:02:13 +01:00
require(amoAluOperandBits <= innerDataBits)
require(rowBits == innerDataBits) // TODO: relax this by improving s_data_* states
2015-10-06 06:41:46 +02:00
val nSecondaryMisses = p(NSecondaryMisses)
val isLastLevelCache = true
2015-10-06 06:41:46 +02:00
val ignoresWriteMask = !p(ECCCode).isEmpty
2014-12-17 23:28:14 +01:00
}
2014-09-30 23:48:02 +02:00
2015-10-06 06:41:46 +02:00
abstract class L2HellaCacheModule(implicit val p: Parameters) extends Module
with HasL2HellaCacheParameters {
2015-04-14 04:00:40 +02:00
def doInternalOutputArbitration[T <: Data : ClassTag](
out: DecoupledIO[T],
ins: Seq[DecoupledIO[T]]) {
2015-07-16 03:06:27 +02:00
val arb = Module(new RRArbiter(out.bits, ins.size))
2015-04-14 04:00:40 +02:00
out <> arb.io.out
arb.io.in <> ins
2015-04-14 04:00:40 +02:00
}
2015-10-06 06:41:46 +02:00
def doInternalInputRouting[T <: Bundle with HasL2Id](in: ValidIO[T], outs: Seq[ValidIO[T]]) {
2015-04-14 04:00:40 +02:00
outs.map(_.bits := in.bits)
2015-04-22 07:23:04 +02:00
outs.zipWithIndex.map { case (o,i) => o.valid := in.valid && in.bits.id === UInt(i) }
2015-04-14 04:00:40 +02:00
}
}
2014-09-30 23:48:02 +02:00
2015-10-06 06:41:46 +02:00
abstract class L2HellaCacheBundle(implicit val p: Parameters) extends ParameterizedBundle()(p)
with HasL2HellaCacheParameters
trait HasL2Id extends HasCoherenceAgentParameters {
val id = UInt(width = log2Up(nTransactors + 1))
2014-09-30 23:48:02 +02:00
}
2015-10-06 06:41:46 +02:00
trait HasL2InternalRequestState extends HasL2HellaCacheParameters {
2014-09-30 23:48:02 +02:00
val tag_match = Bool()
val meta = new L2Metadata
val way_en = Bits(width = nWays)
}
2015-10-06 06:41:46 +02:00
trait HasL2BeatAddr extends HasL2HellaCacheParameters {
2015-03-01 02:02:13 +01:00
val addr_beat = UInt(width = log2Up(refillCycles))
}
2015-10-06 06:41:46 +02:00
trait HasL2Data extends HasL2HellaCacheParameters
2015-03-01 02:02:13 +01:00
with HasL2BeatAddr {
val data = UInt(width = rowBits)
def hasData(dummy: Int = 0) = Bool(true)
2015-03-01 02:02:13 +01:00
def hasMultibeatData(dummy: Int = 0) = Bool(refillCycles > 1)
}
2015-10-06 06:41:46 +02:00
class L2Metadata(implicit p: Parameters) extends Metadata()(p) with HasL2HellaCacheParameters {
2015-03-01 02:02:13 +01:00
val coh = new HierarchicalMetadata
}
2014-09-30 23:48:02 +02:00
object L2Metadata {
2015-10-06 06:41:46 +02:00
def apply(tag: Bits, coh: HierarchicalMetadata)(implicit p: Parameters) = {
2015-07-16 05:24:03 +02:00
val meta = Wire(new L2Metadata)
2014-09-30 23:48:02 +02:00
meta.tag := tag
meta.coh := coh
meta
}
}
2015-10-06 06:41:46 +02:00
class L2MetaReadReq(implicit p: Parameters) extends MetaReadReq()(p) with HasL2Id {
2014-09-30 23:48:02 +02:00
val tag = Bits(width = tagBits)
}
2015-10-06 06:41:46 +02:00
class L2MetaWriteReq(implicit p: Parameters) extends MetaWriteReq[L2Metadata](new L2Metadata)(p)
with HasL2Id {
2015-07-16 03:06:27 +02:00
override def cloneType = new L2MetaWriteReq().asInstanceOf[this.type]
}
2015-10-06 06:41:46 +02:00
class L2MetaResp(implicit p: Parameters) extends L2HellaCacheBundle()(p)
2014-09-30 23:48:02 +02:00
with HasL2Id
with HasL2InternalRequestState
2015-10-06 06:41:46 +02:00
trait HasL2MetaReadIO extends HasL2HellaCacheParameters {
2014-11-20 00:55:25 +01:00
val read = Decoupled(new L2MetaReadReq)
val resp = Valid(new L2MetaResp).flip
}
2015-10-06 06:41:46 +02:00
trait HasL2MetaWriteIO extends HasL2HellaCacheParameters {
2014-11-20 00:55:25 +01:00
val write = Decoupled(new L2MetaWriteReq)
}
2015-10-06 06:41:46 +02:00
class L2MetaRWIO(implicit p: Parameters) extends L2HellaCacheBundle()(p)
with HasL2MetaReadIO
with HasL2MetaWriteIO
2014-11-20 00:55:25 +01:00
2015-10-06 06:41:46 +02:00
class L2MetadataArray(implicit p: Parameters) extends L2HellaCacheModule()(p) {
2014-11-20 00:55:25 +01:00
val io = new L2MetaRWIO().flip
2014-09-30 23:48:02 +02:00
2015-03-01 02:02:13 +01:00
def onReset = L2Metadata(UInt(0), HierarchicalMetadata.onReset)
val meta = Module(new MetadataArray(onReset _))
2014-09-30 23:48:02 +02:00
meta.io.read <> io.read
meta.io.write <> io.write
val s1_tag = RegEnable(io.read.bits.tag, io.read.valid)
val s1_id = RegEnable(io.read.bits.id, io.read.valid)
def wayMap[T <: Data](f: Int => T) = Vec((0 until nWays).map(f))
val s1_clk_en = Reg(next = io.read.fire())
2014-09-30 23:48:02 +02:00
val s1_tag_eq_way = wayMap((w: Int) => meta.io.resp(w).tag === s1_tag)
2015-03-01 02:02:13 +01:00
val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && meta.io.resp(w).coh.outer.isValid()).toBits
2014-09-30 23:48:02 +02:00
val s2_tag_match_way = RegEnable(s1_tag_match_way, s1_clk_en)
val s2_tag_match = s2_tag_match_way.orR
val s2_hit_coh = Mux1H(s2_tag_match_way, wayMap((w: Int) => RegEnable(meta.io.resp(w).coh, s1_clk_en)))
2015-10-06 06:41:46 +02:00
val replacer = p(Replacer)()
2014-09-30 23:48:02 +02:00
val s1_replaced_way_en = UIntToOH(replacer.way)
val s2_replaced_way_en = UIntToOH(RegEnable(replacer.way, s1_clk_en))
val s2_repl_meta = Mux1H(s2_replaced_way_en, wayMap((w: Int) =>
RegEnable(meta.io.resp(w), s1_clk_en && s1_replaced_way_en(w))).toSeq)
2014-10-24 06:50:03 +02:00
when(!s2_tag_match) { replacer.miss }
2014-09-30 23:48:02 +02:00
io.resp.valid := Reg(next = s1_clk_en)
io.resp.bits.id := RegEnable(s1_id, s1_clk_en)
io.resp.bits.tag_match := s2_tag_match
io.resp.bits.meta := Mux(s2_tag_match,
L2Metadata(s2_repl_meta.tag, s2_hit_coh),
s2_repl_meta)
io.resp.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
}
2015-10-06 06:41:46 +02:00
class L2DataReadReq(implicit p: Parameters) extends L2HellaCacheBundle()(p)
2015-03-01 02:02:13 +01:00
with HasL2BeatAddr
with HasL2Id {
val addr_idx = UInt(width = idxBits)
2014-09-30 23:48:02 +02:00
val way_en = Bits(width = nWays)
}
2015-10-06 06:41:46 +02:00
class L2DataWriteReq(implicit p: Parameters) extends L2DataReadReq()(p)
with HasL2Data {
2015-03-01 02:02:13 +01:00
val wmask = Bits(width = rowBits/8)
2014-09-30 23:48:02 +02:00
}
2015-10-06 06:41:46 +02:00
class L2DataResp(implicit p: Parameters) extends L2HellaCacheBundle()(p)
with HasL2Id
with HasL2Data
2014-09-30 23:48:02 +02:00
2015-10-06 06:41:46 +02:00
trait HasL2DataReadIO extends HasL2HellaCacheParameters {
2014-11-20 00:55:25 +01:00
val read = Decoupled(new L2DataReadReq)
val resp = Valid(new L2DataResp).flip
}
2015-10-06 06:41:46 +02:00
trait HasL2DataWriteIO extends HasL2HellaCacheParameters {
2014-11-20 00:55:25 +01:00
val write = Decoupled(new L2DataWriteReq)
}
2015-10-06 06:41:46 +02:00
class L2DataRWIO(implicit p: Parameters) extends L2HellaCacheBundle()(p)
with HasL2DataReadIO
with HasL2DataWriteIO
2014-11-20 00:55:25 +01:00
2015-10-06 06:41:46 +02:00
class L2DataArray(delay: Int)(implicit p: Parameters) extends L2HellaCacheModule()(p) {
2014-11-20 00:55:25 +01:00
val io = new L2DataRWIO().flip
2014-09-30 23:48:02 +02:00
2015-09-12 00:41:39 +02:00
val array = SeqMem(Vec(Bits(width=8), rowBits/8), nWays*nSets*refillCycles)
2015-07-11 22:36:14 +02:00
val ren = !io.write.valid && io.read.valid
val raddr = Cat(OHToUInt(io.read.bits.way_en), io.read.bits.addr_idx, io.read.bits.addr_beat)
2015-07-11 22:36:14 +02:00
val waddr = Cat(OHToUInt(io.write.bits.way_en), io.write.bits.addr_idx, io.write.bits.addr_beat)
2015-09-12 00:41:39 +02:00
val wdata = Vec.tabulate(rowBits/8)(i => io.write.bits.data(8*(i+1)-1,8*i))
val wmask = io.write.bits.wmask.toBools
when (io.write.valid) { array.write(waddr, wdata, wmask) }
2015-01-26 00:37:04 +01:00
val r_req = Pipe(io.read.fire(), io.read.bits)
io.resp := Pipe(r_req.valid, r_req.bits, delay)
2015-09-12 00:41:39 +02:00
io.resp.bits.data := Pipe(r_req.valid, array.read(raddr, ren).toBits, delay).bits
2015-01-26 00:37:04 +01:00
io.read.ready := !io.write.valid
2014-09-30 23:48:02 +02:00
io.write.ready := Bool(true)
}
2015-10-06 06:41:46 +02:00
class L2HellaCacheBank(implicit p: Parameters) extends HierarchicalCoherenceAgent()(p)
with HasL2HellaCacheParameters {
2014-09-30 23:48:02 +02:00
require(isPow2(nSets))
require(isPow2(nWays))
val meta = Module(new L2MetadataArray) // TODO: add delay knob
val data = Module(new L2DataArray(1))
val tshrfile = Module(new TSHRFile)
2015-09-12 00:41:39 +02:00
io.inner <> tshrfile.io.inner
2014-09-30 23:48:02 +02:00
io.outer <> tshrfile.io.outer
2015-09-12 00:41:39 +02:00
tshrfile.io.incoherent <> io.incoherent
meta.io <> tshrfile.io.meta
data.io <> tshrfile.io.data
2014-09-30 23:48:02 +02:00
}
2015-10-06 06:41:46 +02:00
class TSHRFileIO(implicit p: Parameters) extends HierarchicalTLIO()(p) {
2015-03-01 02:02:13 +01:00
val meta = new L2MetaRWIO
val data = new L2DataRWIO
}
2014-09-30 23:48:02 +02:00
2015-10-06 06:41:46 +02:00
class TSHRFile(implicit p: Parameters) extends L2HellaCacheModule()(p)
with HasCoherenceAgentWiringHelpers {
2015-03-01 02:02:13 +01:00
val io = new TSHRFileIO
2014-09-30 23:48:02 +02:00
// Create TSHRs for outstanding transactions
val trackerList = (0 until nReleaseTransactors).map(id => Module(new L2VoluntaryReleaseTracker(id))) ++
(nReleaseTransactors until nTransactors).map(id => Module(new L2AcquireTracker(id)))
2014-09-30 23:48:02 +02:00
// WritebackUnit evicts data from L2, including invalidating L1s
val wb = Module(new L2WritebackUnit(nTransactors))
val trackerAndWbIOs = trackerList.map(_.io) :+ wb.io
2015-04-14 04:00:40 +02:00
doInternalOutputArbitration(wb.io.wb.req, trackerList.map(_.io.wb.req))
doInternalInputRouting(wb.io.wb.resp, trackerList.map(_.io.wb.resp))
2014-09-30 23:48:02 +02:00
// Propagate incoherence flags
2015-08-27 18:57:36 +02:00
(trackerList.map(_.io.incoherent) :+ wb.io.incoherent) foreach { _ := io.incoherent }
2014-09-30 23:48:02 +02:00
// Handle acquire transaction initiation
val trackerAcquireIOs = trackerList.map(_.io.inner.acquire)
val acquireConflicts = Vec(trackerList.map(_.io.has_acquire_conflict)).toBits
val acquireMatches = Vec(trackerList.map(_.io.has_acquire_match)).toBits
val acquireReadys = Vec(trackerAcquireIOs.map(_.ready)).toBits
val acquire_idx = Mux(acquireMatches.orR,
OHToUInt(acquireMatches),
PriorityEncoder(acquireReadys))
val block_acquires = acquireConflicts.orR
io.inner.acquire.ready := acquireReadys.orR && !block_acquires
trackerAcquireIOs.zipWithIndex.foreach {
case(tracker, i) =>
tracker.bits := io.inner.acquire.bits
tracker.valid := io.inner.acquire.valid && !block_acquires && (acquire_idx === UInt(i))
2014-09-30 23:48:02 +02:00
}
assert(PopCount(acquireMatches) <= UInt(1),
"At most a single tracker should match for any given Acquire")
2014-09-30 23:48:02 +02:00
// Wire releases from clients
val releaseReadys = Vec(trackerAndWbIOs.map(_.inner.release.ready)).toBits
val releaseMatches = Vec(trackerAndWbIOs.map(_.has_release_match)).toBits
io.inner.release.ready := (releaseMatches & releaseReadys).orR
trackerAndWbIOs foreach { tracker =>
tracker.inner.release.bits := io.inner.release.bits
tracker.inner.release.valid := io.inner.release.valid && tracker.has_release_match
2014-09-30 23:48:02 +02:00
}
assert(PopCount(releaseMatches) <= UInt(nReleaseTransactors),
"At most a single tracker should match for any given Release")
assert(!(io.inner.release.valid && !releaseMatches.orR),
"Non-voluntary release should always have a Tracker waiting for it.")
2014-09-30 23:48:02 +02:00
2015-03-01 02:02:13 +01:00
// Wire probe requests and grant reply to clients, finish acks from clients
doOutputArbitration(io.inner.probe, trackerList.map(_.io.inner.probe) :+ wb.io.inner.probe)
2015-03-01 02:02:13 +01:00
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant))
doInputRouting(io.inner.finish, trackerList.map(_.io.inner.finish))
// Create an arbiter for the one memory port
val outerList = trackerList.map(_.io.outer) :+ wb.io.outer
2015-10-14 08:42:28 +02:00
val outer_arb = Module(new ClientTileLinkIOArbiter(outerList.size)
(p.alterPartial({ case TLId => p(OuterTLId)})))
outer_arb.io.in <> outerList
2014-09-30 23:48:02 +02:00
io.outer <> outer_arb.io.out
2015-04-14 04:00:40 +02:00
// Wire local memory arrays
doInternalOutputArbitration(io.meta.read, trackerList.map(_.io.meta.read))
doInternalOutputArbitration(io.meta.write, trackerList.map(_.io.meta.write))
doInternalOutputArbitration(io.data.read, trackerList.map(_.io.data.read) :+ wb.io.data.read)
doInternalOutputArbitration(io.data.write, trackerList.map(_.io.data.write))
doInternalInputRouting(io.meta.resp, trackerList.map(_.io.meta.resp))
doInternalInputRouting(io.data.resp, trackerList.map(_.io.data.resp) :+ wb.io.data.resp)
}
2015-10-06 06:41:46 +02:00
class L2XactTrackerIO(implicit p: Parameters) extends HierarchicalXactTrackerIO()(p) {
2015-03-01 02:02:13 +01:00
val data = new L2DataRWIO
val meta = new L2MetaRWIO
val wb = new L2WritebackIO
2014-09-30 23:48:02 +02:00
}
2015-10-06 06:41:46 +02:00
abstract class L2XactTracker(implicit p: Parameters) extends XactTracker()(p)
with HasL2HellaCacheParameters {
2015-04-27 21:56:33 +02:00
class CacheBlockBuffer { // TODO
2015-10-06 06:41:46 +02:00
val buffer = Reg(Bits(width = p(CacheBlockBytes)*8))
def internal = Vec(Bits(width = rowBits), internalDataBeats).fromBits(buffer)
def inner = Vec(Bits(width = innerDataBits), innerDataBeats).fromBits(buffer)
def outer = Vec(Bits(width = outerDataBits), outerDataBeats).fromBits(buffer)
}
2015-03-01 02:02:13 +01:00
def connectDataBeatCounter[S <: L2HellaCacheBundle](inc: Bool, data: S, beat: UInt, full_block: Bool) = {
if(data.refillCycles > 1) {
2015-03-11 09:56:47 +01:00
val (multi_cnt, multi_done) = Counter(full_block && inc, data.refillCycles)
2015-03-01 02:02:13 +01:00
(Mux(!full_block, beat, multi_cnt), Mux(!full_block, inc, multi_done))
} else { (UInt(0), inc) }
}
2015-03-18 01:51:00 +01:00
2015-10-06 06:41:46 +02:00
def connectInternalDataBeatCounter[T <: L2HellaCacheBundle with HasL2BeatAddr](
2015-03-01 02:02:13 +01:00
in: DecoupledIO[T],
beat: UInt = UInt(0),
full_block: Bool = Bool(true)): (UInt, Bool) = {
2015-03-01 02:02:13 +01:00
connectDataBeatCounter(in.fire(), in.bits, beat, full_block)
}
2015-03-18 01:51:00 +01:00
2015-10-06 06:41:46 +02:00
def connectInternalDataBeatCounter[T <: L2HellaCacheBundle with HasL2Data](
2015-03-01 02:02:13 +01:00
in: ValidIO[T],
full_block: Bool): Bool = {
2015-03-01 02:02:13 +01:00
connectDataBeatCounter(in.valid, in.bits, UInt(0), full_block)._2
2014-09-30 23:48:02 +02:00
}
2015-03-18 01:51:00 +01:00
2015-10-06 06:41:46 +02:00
def addPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr](in: DecoupledIO[T]) =
2015-03-18 01:51:00 +01:00
Fill(in.bits.refillCycles, in.fire()) & UIntToOH(in.bits.addr_beat)
2015-10-06 06:41:46 +02:00
def addPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr](in: ValidIO[T]) =
2015-03-18 06:44:53 +01:00
Fill(in.bits.refillCycles, in.valid) & UIntToOH(in.bits.addr_beat)
2015-10-06 06:41:46 +02:00
def dropPendingBit[T <: L2HellaCacheBundle with HasL2BeatAddr] (in: DecoupledIO[T]) =
2015-03-18 02:07:52 +01:00
~Fill(in.bits.refillCycles, in.fire()) | ~UIntToOH(in.bits.addr_beat)
2015-03-18 01:51:00 +01:00
2015-10-06 06:41:46 +02:00
def dropPendingBitInternal[T <: L2HellaCacheBundle with HasL2BeatAddr] (in: ValidIO[T]) =
2015-03-18 02:07:52 +01:00
~Fill(in.bits.refillCycles, in.valid) | ~UIntToOH(in.bits.addr_beat)
def addPendingBitWhenBeatHasPartialWritemask(in: DecoupledIO[AcquireFromSrc]): UInt = {
val a = in.bits
val isPartial = a.wmask() != Acquire.fullWriteMask
addPendingBitWhenBeat(in.fire() && isPartial && Bool(ignoresWriteMask), a)
}
2015-04-27 21:56:33 +02:00
def pinAllReadyValidLow[T <: Data](b: Bundle) {
b.elements.foreach {
_._2 match {
2015-10-06 06:41:46 +02:00
case d: DecoupledIO[_] =>
2015-04-27 21:56:33 +02:00
if(d.ready.dir == OUTPUT) d.ready := Bool(false)
else if(d.valid.dir == OUTPUT) d.valid := Bool(false)
2015-10-06 06:41:46 +02:00
case v: ValidIO[_] => if(v.valid.dir == OUTPUT) v.valid := Bool(false)
2015-04-27 21:56:33 +02:00
case b: Bundle => pinAllReadyValidLow(b)
case _ =>
}
}
}
2014-09-30 23:48:02 +02:00
}
2015-10-06 06:41:46 +02:00
class L2VoluntaryReleaseTracker(trackerId: Int)(implicit p: Parameters) extends L2XactTracker()(p) {
2015-03-01 02:02:13 +01:00
val io = new L2XactTrackerIO
2015-04-27 21:56:33 +02:00
pinAllReadyValidLow(io)
2015-03-01 02:02:13 +01:00
2015-04-27 21:56:33 +02:00
val s_idle :: s_meta_read :: s_meta_resp :: s_busy :: s_meta_write :: Nil = Enum(UInt(), 5)
2014-09-30 23:48:02 +02:00
val state = Reg(init=s_idle)
2015-10-22 03:16:44 +02:00
val xact = Reg(new BufferedReleaseFromSrc()(p.alterPartial({case TLId => p(InnerTLId)})))
val xact_way_en = Reg{ Bits(width = nWays) }
2015-04-27 21:56:33 +02:00
val xact_old_meta = Reg{ new L2Metadata }
val coh = xact_old_meta.coh
2015-04-27 21:56:33 +02:00
val pending_irels = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_writes = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_ignt = Reg(init=Bool(false))
2014-09-30 23:48:02 +02:00
2015-04-27 21:56:33 +02:00
val all_pending_done =
!(pending_writes.orR ||
pending_ignt)
2014-09-30 23:48:02 +02:00
2015-04-27 21:56:33 +02:00
// Accept a voluntary Release (and any further beats of data)
pending_irels := (pending_irels & dropPendingBitWhenBeatHasData(io.inner.release))
io.inner.release.ready := state === s_idle || pending_irels.orR
2015-10-14 08:42:28 +02:00
when(io.inner.release.fire()) { xact.data_buffer(io.irel().addr_beat) := io.irel().data }
2014-10-24 06:50:03 +02:00
2015-04-27 21:56:33 +02:00
// Begin a transaction by getting the current block metadata
io.meta.read.valid := state === s_meta_read
io.meta.read.bits.id := UInt(trackerId)
io.meta.read.bits.idx := xact.addr_block(idxMSB,idxLSB)
io.meta.read.bits.tag := xact.addr_block >> UInt(idxBits)
2014-09-30 23:48:02 +02:00
2015-04-27 21:56:33 +02:00
// Write the voluntarily written back data to this cache
pending_writes := (pending_writes & dropPendingBit(io.data.write)) |
addPendingBitWhenBeatHasData(io.inner.release)
val curr_write_beat = PriorityEncoder(pending_writes)
io.data.write.valid := state === s_busy && pending_writes.orR
2014-11-20 00:55:25 +01:00
io.data.write.bits.id := UInt(trackerId)
io.data.write.bits.way_en := xact_way_en
2015-03-01 02:02:13 +01:00
io.data.write.bits.addr_idx := xact.addr_block(idxMSB,idxLSB)
2015-04-27 21:56:33 +02:00
io.data.write.bits.addr_beat := curr_write_beat
2015-08-27 18:57:36 +02:00
io.data.write.bits.wmask := ~UInt(0, io.data.write.bits.wmask.getWidth)
2015-10-14 08:42:28 +02:00
io.data.write.bits.data := xact.data_buffer(curr_write_beat)
2015-04-27 21:56:33 +02:00
// Send an acknowledgement
io.inner.grant.valid := state === s_busy && pending_ignt && !pending_irels
io.inner.grant.bits := coh.inner.makeGrant(xact, UInt(trackerId))
when(io.inner.grant.fire()) { pending_ignt := Bool(false) }
// End a transaction by updating the block metadata
io.meta.write.valid := state === s_meta_write
2014-11-20 00:55:25 +01:00
io.meta.write.bits.id := UInt(trackerId)
2015-03-01 02:02:13 +01:00
io.meta.write.bits.idx := xact.addr_block(idxMSB,idxLSB)
io.meta.write.bits.way_en := xact_way_en
2015-03-01 02:02:13 +01:00
io.meta.write.bits.data.tag := xact.addr_block >> UInt(idxBits)
io.meta.write.bits.data.coh.inner := xact_old_meta.coh.inner.onRelease(xact)
2015-04-27 21:56:33 +02:00
io.meta.write.bits.data.coh.outer := Mux(xact.hasData(),
xact_old_meta.coh.outer.onHit(M_XWR),
xact_old_meta.coh.outer)
2014-09-30 23:48:02 +02:00
2015-04-27 21:56:33 +02:00
// State machine updates and transaction handler metadata intialization
when(state === s_idle && io.inner.release.valid) {
xact := io.irel()
when(io.irel().hasMultibeatData()) {
pending_irels := dropPendingBitWhenBeatHasData(io.inner.release)
}. otherwise {
pending_irels := UInt(0)
2014-09-30 23:48:02 +02:00
}
2015-04-27 21:56:33 +02:00
pending_writes := addPendingBitWhenBeatHasData(io.inner.release)
pending_ignt := io.irel().requiresAck()
state := s_meta_read
2014-09-30 23:48:02 +02:00
}
2015-04-27 21:56:33 +02:00
when(state === s_meta_read && io.meta.read.ready) { state := s_meta_resp }
when(state === s_meta_resp && io.meta.resp.valid) {
xact_old_meta := io.meta.resp.bits.meta
xact_way_en := io.meta.resp.bits.way_en
state := s_busy
}
when(state === s_busy && all_pending_done) { state := s_meta_write }
when(state === s_meta_write && io.meta.write.ready) { state := s_idle }
// These IOs are used for routing in the parent
io.has_release_match := io.irel().isVoluntary()
io.has_acquire_match := Bool(false)
io.has_acquire_conflict := Bool(false)
2015-04-14 00:57:06 +02:00
// Checks for illegal behavior
assert(!(state === s_meta_resp && io.meta.resp.valid && !io.meta.resp.bits.tag_match),
"VoluntaryReleaseTracker accepted Release for a block not resident in this cache!")
2015-04-27 21:56:33 +02:00
assert(!(state === s_idle && io.inner.release.fire() && !io.irel().isVoluntary()),
"VoluntaryReleaseTracker accepted Release that wasn't voluntary!")
2014-09-30 23:48:02 +02:00
}
2015-03-17 07:41:56 +01:00
2015-10-06 06:41:46 +02:00
class L2AcquireTracker(trackerId: Int)(implicit p: Parameters) extends L2XactTracker()(p) {
2015-03-01 02:02:13 +01:00
val io = new L2XactTrackerIO
2015-04-27 21:56:33 +02:00
pinAllReadyValidLow(io)
val s_idle :: s_meta_read :: s_meta_resp :: s_wb_req :: s_wb_resp :: s_inner_probe :: s_outer_acquire :: s_busy :: s_meta_write :: Nil = Enum(UInt(), 9)
2014-09-30 23:48:02 +02:00
val state = Reg(init=s_idle)
// State holding transaction metadata
2015-10-22 03:16:44 +02:00
val xact = Reg(new BufferedAcquireFromSrc()(p.alterPartial({ case TLId => p(InnerTLId) })))
val wmask_buffer = Reg(init=Vec.fill(innerDataBeats)(UInt(0, width = innerDataBits/8)))
val xact_tag_match = Reg{ Bool() }
val xact_way_en = Reg{ Bits(width = nWays) }
val xact_old_meta = Reg{ new L2Metadata }
2015-07-16 03:06:27 +02:00
val pending_coh = Reg{ xact_old_meta.coh }
2015-03-01 02:02:13 +01:00
// Secondary miss queue
2015-10-14 08:42:28 +02:00
val ignt_q = Module(new Queue(new SecondaryMissInfo()(p.alterPartial({ case TLId => p(InnerTLId) })),
nSecondaryMisses))
// State holding progress made on processing this transaction
val iacq_data_done = connectIncomingDataBeatCounter(io.inner.acquire)
val pending_irels = connectTwoWayBeatCounter(
max = io.inner.tlNCachingClients,
up = io.inner.probe,
down = io.inner.release)._1
2015-04-22 07:23:04 +02:00
val (pending_ognt, oacq_data_idx, oacq_data_done, ognt_data_idx, ognt_data_done) =
connectTwoWayBeatCounter(
max = 1,
up = io.outer.acquire,
down = io.outer.grant,
beat = xact.addr_beat)
val (ignt_data_idx, ignt_data_done) = connectOutgoingDataBeatCounter(io.inner.grant, ignt_q.io.deq.bits.addr_beat)
val pending_ifins = connectTwoWayBeatCounter(
max = nSecondaryMisses,
up = io.inner.grant,
down = io.inner.finish,
track = (g: Grant) => g.requiresAck())._1
2015-04-14 04:00:40 +02:00
val pending_puts = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_iprbs = Reg(init = Bits(0, width = io.inner.tlNCachingClients))
2015-04-14 04:00:40 +02:00
val pending_reads = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_writes = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_resps = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_ignt_data = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_ignt_ack = Reg(init = Bool(false))
val pending_meta_write = Reg{ Bool() }
val all_pending_done =
!(pending_reads.orR ||
pending_writes.orR ||
pending_resps.orR ||
pending_puts.orR ||
2015-04-22 07:23:04 +02:00
pending_ognt ||
ignt_q.io.count > UInt(0) ||
//pending_meta_write || // Has own state: s_meta_write
pending_ifins)
// Provide a single ALU per tracker to merge Puts and AMOs with data being
// refilled, written back, or extant in the cache
val amoalu = Module(new AMOALU(rhsIsAligned = true))
2015-05-20 03:40:34 +02:00
amoalu.io.addr := xact.full_addr()
amoalu.io.cmd := xact.op_code()
amoalu.io.typ := xact.op_size()
amoalu.io.lhs := io.data.resp.bits.data // default, overwritten by calls to mergeData
amoalu.io.rhs := xact.data_buffer.head // default, overwritten by calls to mergeData
2015-10-14 08:42:28 +02:00
val amo_result = Reg(init = UInt(0, xact.tlDataBits))
// Utility functions for updating the data and metadata that will be kept in
// the cache or granted to the original requestor after this transaction:
def updatePendingCohWhen(flag: Bool, next: HierarchicalMetadata) {
when(flag && pending_coh != next) {
pending_meta_write := Bool(true)
pending_coh := next
}
}
2015-03-17 07:41:56 +01:00
def mergeData(dataBits: Int)(beat: UInt, incoming: UInt) {
2015-03-16 21:27:05 +01:00
val old_data = incoming // Refilled, written back, or de-cached data
2015-10-14 08:42:28 +02:00
val new_data = xact.data_buffer(beat) // Newly Put data is already in the buffer
2015-03-17 07:41:56 +01:00
amoalu.io.lhs := old_data >> xact.amo_shift_bits()
amoalu.io.rhs := new_data >> xact.amo_shift_bits()
2015-03-17 23:54:21 +01:00
val wmask = FillInterleaved(8, wmask_buffer(beat))
2015-10-14 08:42:28 +02:00
xact.data_buffer(beat) := ~wmask & old_data |
2015-03-17 12:58:54 +01:00
wmask & Mux(xact.isBuiltInType(Acquire.putAtomicType),
2015-03-17 07:41:56 +01:00
amoalu.io.out << xact.amo_shift_bits(),
new_data)
2015-08-27 18:57:36 +02:00
wmask_buffer(beat) := ~UInt(0, wmask_buffer.head.getWidth)
2015-03-17 23:54:21 +01:00
when(xact.is(Acquire.putAtomicType) && xact.addr_beat === beat) { amo_result := old_data }
}
2015-10-06 06:41:46 +02:00
def mergeDataInternal[T <: L2HellaCacheBundle with HasL2Data with HasL2BeatAddr](in: ValidIO[T]) {
when(in.valid) { mergeData(rowBits)(in.bits.addr_beat, in.bits.data) }
}
2015-10-06 06:41:46 +02:00
def mergeDataInner[T <: TLBundle with HasTileLinkData with HasTileLinkBeatId](in: DecoupledIO[T]) {
when(in.fire() && in.bits.hasData()) {
mergeData(innerDataBits)(in.bits.addr_beat, in.bits.data)
}
}
2015-10-06 06:41:46 +02:00
def mergeDataOuter[T <: TLBundle with HasTileLinkData with HasTileLinkBeatId](in: DecoupledIO[T]) {
when(in.fire() && in.bits.hasData()) {
mergeData(outerDataBits)(in.bits.addr_beat, in.bits.data)
}
}
2015-03-17 07:41:56 +01:00
def addPendingBitFromBufferedAcquire(xact: AcquireMetadata): UInt =
Mux(xact.hasMultibeatData(),
Fill(innerDataBeats, UInt(1, 1)),
UIntToOH(xact.addr_beat))
// Actual transaction processing logic begins here:
//
// First, take care of accpeting new requires or secondary misses
// For now, the only allowed secondary miss types are Gets-under-Get
// and Puts-under-Put from the same client
2015-03-18 06:43:00 +01:00
val can_merge_iacq_get = (xact.isBuiltInType(Acquire.getType) &&
2015-03-17 07:41:56 +01:00
io.iacq().isBuiltInType(Acquire.getType)) &&
xact.client_id === io.iacq().client_id && //TODO remove
xact.conflicts(io.iacq()) &&
state != s_idle && state != s_meta_write &&
!all_pending_done &&
xact.allocate() &&
!io.inner.release.fire() &&
!io.outer.grant.fire() &&
!io.data.resp.valid &&
ignt_q.io.enq.ready
// This logic also allows the tail beats of a PutBlock to be merged in
2015-03-18 04:53:27 +01:00
val can_merge_iacq_put = ((xact.isBuiltInType(Acquire.putType) &&
io.iacq().isBuiltInType(Acquire.putType)) ||
(xact.isBuiltInType(Acquire.putBlockType) &&
io.iacq().isBuiltInType(Acquire.putBlockType))) &&
xact.client_id === io.iacq().client_id && //TODO remove
xact.conflicts(io.iacq()) &&
state != s_idle && state != s_meta_write &&
!all_pending_done &&
2015-05-14 21:37:35 +02:00
(xact.allocate() || xact.isBuiltInType(Acquire.putBlockType)) &&
!io.inner.release.fire() &&
!io.outer.grant.fire() &&
!io.data.resp.valid &&
ignt_q.io.enq.ready
io.inner.acquire.ready := state === s_idle ||
can_merge_iacq_put ||
can_merge_iacq_get
2015-04-21 01:32:09 +02:00
// Enqueue secondary miss information
ignt_q.io.enq.valid := iacq_data_done
ignt_q.io.enq.bits.client_xact_id := io.iacq().client_xact_id
ignt_q.io.enq.bits.addr_beat := io.iacq().addr_beat
// TODO add ignt.dst <- iacq.src
// Track whether any beats are missing from a PutBlock
pending_puts := (pending_puts & dropPendingBitWhenBeatHasData(io.inner.acquire))
2014-09-30 23:48:02 +02:00
2015-04-27 21:56:33 +02:00
// Begin a transaction by getting the current block metadata
io.meta.read.valid := state === s_meta_read
io.meta.read.bits.id := UInt(trackerId)
io.meta.read.bits.idx := xact.addr_block(idxMSB,idxLSB)
io.meta.read.bits.tag := xact.addr_block >> UInt(idxBits)
// Issue a request to the writeback unit
io.wb.req.valid := state === s_wb_req
io.wb.req.bits.id := UInt(trackerId)
io.wb.req.bits.idx := xact.addr_block(idxMSB,idxLSB)
io.wb.req.bits.tag := xact_old_meta.tag
io.wb.req.bits.coh := xact_old_meta.coh
io.wb.req.bits.way_en := xact_way_en
// Track which clients yet need to be probed and make Probe message
pending_iprbs := pending_iprbs & dropPendingBitAtDest(io.inner.probe)
val curr_probe_dst = PriorityEncoder(pending_iprbs)
io.inner.probe.valid := state === s_inner_probe && pending_iprbs.orR
io.inner.probe.bits := pending_coh.inner.makeProbe(curr_probe_dst, xact)
// Handle incoming releases from clients, which may reduce sharer counts
// and/or write back dirty data
io.inner.release.ready := state === s_inner_probe
val pending_coh_on_irel = HierarchicalMetadata(
pending_coh.inner.onRelease(io.irel()), // Drop sharer
Mux(io.irel().hasData(), // Dirty writeback
pending_coh.outer.onHit(M_XWR),
pending_coh.outer))
updatePendingCohWhen(io.inner.release.fire(), pending_coh_on_irel)
mergeDataInner(io.inner.release)
2015-04-14 00:57:06 +02:00
// Handle misses or coherence permission upgrades by initiating a new transaction in the outer memory:
//
2015-03-01 02:02:13 +01:00
// If we're allocating in this cache, we can use the current metadata
// to make an appropriate custom Acquire, otherwise we copy over the
// built-in Acquire from the inner TL to the outer TL
2015-05-14 21:37:35 +02:00
io.outer.acquire.valid := state === s_outer_acquire &&
(xact.allocate() || !pending_puts(oacq_data_idx))
2015-10-14 08:42:28 +02:00
io.outer.acquire.bits := Mux(xact.allocate(), xact_old_meta.coh.outer, ClientMetadata.onReset)
.makeAcquire(
2015-05-14 21:37:35 +02:00
client_xact_id = UInt(0),
addr_block = xact.addr_block,
2015-10-14 08:42:28 +02:00
op_code = xact.op_code())
io.oacq().data := xact.data_buffer(oacq_data_idx)
2015-04-27 21:56:33 +02:00
// Handle the response from outer memory
io.outer.grant.ready := state === s_busy
val pending_coh_on_ognt = HierarchicalMetadata(
ManagerMetadata.onReset,
pending_coh.outer.onGrant(io.outer.grant.bits, xact.op_code()))
updatePendingCohWhen(ognt_data_done, pending_coh_on_ognt)
mergeDataOuter(io.outer.grant)
// Going back to the original inner transaction, we can issue a Grant as
// soon as the data is released, granted, put, or read from the cache
pending_ignt_data := pending_ignt_data |
addPendingBitWhenBeatHasData(io.inner.release) |
addPendingBitWhenBeatHasData(io.outer.grant) |
addPendingBitInternal(io.data.resp)
// We can issue a grant for a pending write once the write is committed
pending_ignt_ack := pending_ignt_ack |
io.data.write.fire() |
io.outer.grant.fire() && !io.outer.grant.bits.hasData()
ignt_q.io.deq.ready := ignt_data_done
io.inner.grant.valid := state === s_busy &&
ignt_q.io.deq.valid &&
Mux(io.ignt().hasData(),
pending_ignt_data(ignt_data_idx),
pending_ignt_ack)
2015-04-21 01:32:09 +02:00
// Make the Grant message using the data stored in the secondary miss queue
io.inner.grant.bits := pending_coh.inner.makeGrant(
2015-04-21 01:32:09 +02:00
pri = xact,
sec = ignt_q.io.deq.bits,
manager_xact_id = UInt(trackerId),
data = Mux(xact.is(Acquire.putAtomicType),
amo_result,
2015-10-14 08:42:28 +02:00
xact.data_buffer(ignt_data_idx)))
2015-04-21 01:32:09 +02:00
io.inner.grant.bits.addr_beat := ignt_data_idx // override based on outgoing counter
2014-09-30 23:48:02 +02:00
val pending_coh_on_ignt = HierarchicalMetadata(
pending_coh.inner.onGrant(io.ignt()),
2015-04-14 00:57:06 +02:00
Mux(ognt_data_done,
pending_coh_on_ognt.outer,
pending_coh.outer))
updatePendingCohWhen(io.inner.grant.fire(), pending_coh_on_ignt)
// We must wait for as many Finishes as we sent Grants
io.inner.finish.ready := state === s_busy
2014-09-30 23:48:02 +02:00
2015-04-14 00:57:06 +02:00
// We read from the the cache at this level if data wasn't written back or refilled.
// We may merge Gets, requiring further beats to be read.
// If ECC requires a full writemask, we'll read out data on partial writes as well.
pending_reads := (pending_reads &
dropPendingBit(io.data.read) &
dropPendingBitWhenBeatHasData(io.inner.release) &
dropPendingBitWhenBeatHasData(io.outer.grant)) |
addPendingBitWhenBeatIsGetOrAtomic(io.inner.acquire) |
addPendingBitWhenBeatHasPartialWritemask(io.inner.acquire)
val curr_read_beat = PriorityEncoder(pending_reads)
2015-04-22 07:23:04 +02:00
io.data.read.valid := state === s_busy && pending_reads.orR && !pending_ognt
2014-11-20 00:55:25 +01:00
io.data.read.bits.id := UInt(trackerId)
io.data.read.bits.way_en := xact_way_en
2015-03-01 02:02:13 +01:00
io.data.read.bits.addr_idx := xact.addr_block(idxMSB,idxLSB)
2015-03-17 07:41:56 +01:00
io.data.read.bits.addr_beat := curr_read_beat
pending_resps := (pending_resps & dropPendingBitInternal(io.data.resp)) |
addPendingBitInternal(io.data.read)
mergeDataInternal(io.data.resp)
2015-04-14 00:57:06 +02:00
// We write data to the cache at this level if it was Put here with allocate flag,
// written back dirty, or refilled from outer memory.
pending_writes := (pending_writes & dropPendingBit(io.data.write)) |
2015-05-13 02:14:06 +02:00
addPendingBitWhenBeatHasDataAndAllocs(io.inner.acquire) |
addPendingBitWhenBeatHasData(io.inner.release) |
addPendingBitWhenBeatHasData(io.outer.grant)
val curr_write_beat = PriorityEncoder(pending_writes)
io.data.write.valid := state === s_busy &&
pending_writes.orR &&
2015-04-22 07:23:04 +02:00
!pending_ognt &&
!pending_reads(curr_write_beat) &&
!pending_resps(curr_write_beat)
2014-11-20 00:55:25 +01:00
io.data.write.bits.id := UInt(trackerId)
io.data.write.bits.way_en := xact_way_en
2015-03-01 02:02:13 +01:00
io.data.write.bits.addr_idx := xact.addr_block(idxMSB,idxLSB)
2015-03-17 07:41:56 +01:00
io.data.write.bits.addr_beat := curr_write_beat
io.data.write.bits.wmask := wmask_buffer(curr_write_beat)
2015-10-14 08:42:28 +02:00
io.data.write.bits.data := xact.data_buffer(curr_write_beat)
2015-04-27 21:56:33 +02:00
// End a transaction by updating the block metadata
io.meta.write.valid := state === s_meta_write
2014-11-20 00:55:25 +01:00
io.meta.write.bits.id := UInt(trackerId)
2015-03-01 02:02:13 +01:00
io.meta.write.bits.idx := xact.addr_block(idxMSB,idxLSB)
io.meta.write.bits.way_en := xact_way_en
2015-03-01 02:02:13 +01:00
io.meta.write.bits.data.tag := xact.addr_block >> UInt(idxBits)
io.meta.write.bits.data.coh := pending_coh
// Handling of secondary misses (Gets and Puts only for now)
when(io.inner.acquire.fire() && io.iacq().hasData()) { // state <= s_meta_wrtie
val beat = io.iacq().addr_beat
val wmask = io.iacq().wmask()
val full = FillInterleaved(8, wmask)
2015-10-14 08:42:28 +02:00
xact.data_buffer(beat) := (~full & xact.data_buffer(beat)) | (full & io.iacq().data)
wmask_buffer(beat) := wmask | Mux(state === s_idle, Bits(0), wmask_buffer(beat))
}
// Defined here because of Chisel default wire demands, used in s_meta_resp
val pending_coh_on_hit = HierarchicalMetadata(
io.meta.resp.bits.meta.coh.inner,
io.meta.resp.bits.meta.coh.outer.onHit(xact.op_code()))
val pending_coh_on_miss = HierarchicalMetadata.onReset
// State machine updates and transaction handler metadata intialization
when(state === s_idle && io.inner.acquire.valid) {
xact := io.iacq()
2015-10-14 08:42:28 +02:00
amo_result := UInt(0)
2015-04-14 00:57:06 +02:00
pending_puts := Mux( // Make sure to collect all data from a PutBlock
io.iacq().isBuiltInType(Acquire.putBlockType),
dropPendingBitWhenBeatHasData(io.inner.acquire),
UInt(0))
2015-04-14 00:57:06 +02:00
pending_reads := Mux( // GetBlocks and custom types read all beats
io.iacq().isBuiltInType(Acquire.getBlockType) || !io.iacq().isBuiltInType(),
2015-07-11 23:05:39 +02:00
SInt(-1),
(addPendingBitWhenBeatIsGetOrAtomic(io.inner.acquire) |
2015-07-11 23:05:39 +02:00
addPendingBitWhenBeatHasPartialWritemask(io.inner.acquire)).toSInt).toUInt
2015-05-13 02:14:06 +02:00
pending_writes := addPendingBitWhenBeatHasDataAndAllocs(io.inner.acquire)
pending_resps := UInt(0)
pending_ignt_data := UInt(0)
pending_ignt_ack := Bool(false)
pending_meta_write := UInt(0)
state := s_meta_read
2014-09-30 23:48:02 +02:00
}
when(state === s_meta_read && io.meta.read.ready) { state := s_meta_resp }
when(state === s_meta_resp && io.meta.resp.valid) {
xact_tag_match := io.meta.resp.bits.tag_match
xact_old_meta := io.meta.resp.bits.meta
xact_way_en := io.meta.resp.bits.way_en
val coh = io.meta.resp.bits.meta.coh
val tag_match = io.meta.resp.bits.tag_match
val is_hit = (if(!isLastLevelCache) tag_match && coh.outer.isHit(xact.op_code())
else xact.isBuiltInType(Acquire.putBlockType) ||
tag_match && coh.outer.isValid())
val needs_writeback = !tag_match &&
xact.allocate() &&
(coh.outer.requiresVoluntaryWriteback() ||
coh.inner.requiresProbesOnVoluntaryWriteback())
val needs_inner_probes = tag_match && coh.inner.requiresProbes(xact)
when(!tag_match || is_hit && pending_coh_on_hit != coh) { pending_meta_write := Bool(true) }
pending_coh := Mux(is_hit, pending_coh_on_hit, Mux(tag_match, coh, pending_coh_on_miss))
when(needs_inner_probes) {
val full_sharers = coh.inner.full()
val mask_self = Mux(
xact.requiresSelfProbe(),
coh.inner.full() | UIntToOH(xact.client_id),
coh.inner.full() & ~UIntToOH(xact.client_id))
val mask_incoherent = mask_self & ~io.incoherent.toBits
pending_iprbs := mask_incoherent
}
// If the write is marked no-allocate but is already in the cache,
// we do, in fact, need to write the data to the cache
when (is_hit && !xact.allocate() && xact.hasData()) {
pending_writes := addPendingBitFromBufferedAcquire(xact)
}
state := Mux(needs_writeback, s_wb_req,
Mux(needs_inner_probes, s_inner_probe,
Mux(!is_hit, s_outer_acquire, s_busy)))
}
when(state === s_wb_req && io.wb.req.ready) { state := s_wb_resp }
when(state === s_wb_resp && io.wb.resp.valid) {
// If we're overwriting the whole block in a last level cache we can
// just do it without fetching any data from memory
2015-04-14 00:57:06 +02:00
val skip_outer_acquire = Bool(isLastLevelCache) && xact.isBuiltInType(Acquire.putBlockType)
state := Mux(!skip_outer_acquire, s_outer_acquire, s_busy)
}
when(state === s_inner_probe && !(pending_iprbs.orR || pending_irels)) {
// Tag matches, so if this is the last level cache we can use the data without upgrading permissions
val skip_outer_acquire =
(if(!isLastLevelCache) xact_old_meta.coh.outer.isHit(xact.op_code())
else xact.isBuiltInType(Acquire.putBlockType) || xact_old_meta.coh.outer.isValid())
state := Mux(!skip_outer_acquire, s_outer_acquire, s_busy)
}
when(state === s_outer_acquire && oacq_data_done) { state := s_busy }
when(state === s_busy && all_pending_done) { state := s_meta_write }
when(state === s_meta_write && (io.meta.write.ready || !pending_meta_write)) {
wmask_buffer.foreach { w => w := UInt(0) }
state := s_idle
2015-03-17 07:41:56 +01:00
}
// These IOs are used for routing in the parent
val in_same_set = xact.addr_block(idxMSB,idxLSB) === io.iacq().addr_block(idxMSB,idxLSB)
2015-04-22 07:23:04 +02:00
io.has_release_match := xact.conflicts(io.irel()) && !io.irel().isVoluntary() && io.inner.release.ready
io.has_acquire_match := can_merge_iacq_put || can_merge_iacq_get
io.has_acquire_conflict := in_same_set && (state != s_idle) && !io.has_acquire_match
//TODO: relax from in_same_set to xact.conflicts(io.iacq())?
// Checks for illegal behavior
2015-03-17 07:41:56 +01:00
assert(!(state != s_idle && io.inner.acquire.fire() &&
io.inner.acquire.bits.client_id != xact.client_id),
2015-03-17 07:41:56 +01:00
"AcquireTracker accepted data beat from different network source than initial request.")
2014-09-30 23:48:02 +02:00
}
2015-03-01 02:02:13 +01:00
2015-10-06 06:41:46 +02:00
class L2WritebackReq(implicit p: Parameters) extends L2Metadata()(p) with HasL2Id {
2015-04-22 07:23:04 +02:00
val idx = Bits(width = idxBits)
2015-03-01 02:02:13 +01:00
val way_en = Bits(width = nWays)
}
2015-10-06 06:41:46 +02:00
class L2WritebackResp(implicit p: Parameters) extends L2HellaCacheBundle()(p) with HasL2Id
2015-03-01 02:02:13 +01:00
2015-10-06 06:41:46 +02:00
class L2WritebackIO(implicit p: Parameters) extends L2HellaCacheBundle()(p) {
2015-03-01 02:02:13 +01:00
val req = Decoupled(new L2WritebackReq)
val resp = Valid(new L2WritebackResp).flip
}
2015-10-06 06:41:46 +02:00
class L2WritebackUnitIO(implicit p: Parameters) extends HierarchicalXactTrackerIO()(p) {
2015-03-01 02:02:13 +01:00
val wb = new L2WritebackIO().flip
val data = new L2DataRWIO
}
2015-10-06 06:41:46 +02:00
class L2WritebackUnit(trackerId: Int)(implicit p: Parameters) extends L2XactTracker()(p) {
2015-03-01 02:02:13 +01:00
val io = new L2WritebackUnitIO
2015-04-27 21:56:33 +02:00
pinAllReadyValidLow(io)
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
val s_idle :: s_inner_probe :: s_busy :: s_outer_grant :: s_wb_resp :: Nil = Enum(UInt(), 5)
2015-03-01 02:02:13 +01:00
val state = Reg(init=s_idle)
2015-04-22 07:23:04 +02:00
val xact = Reg(new L2WritebackReq)
val data_buffer = Reg(init=Vec.fill(innerDataBeats)(UInt(0, width = innerDataBits)))
2015-04-22 07:23:04 +02:00
val xact_addr_block = Cat(xact.tag, xact.idx)
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
val pending_irels =
connectTwoWayBeatCounter(max = io.inner.tlNCachingClients, up = io.inner.probe, down = io.inner.release)._1
val (pending_ognt, orel_data_idx, orel_data_done, ognt_data_idx, ognt_data_done) =
connectTwoWayBeatCounter(max = 1, up = io.outer.release, down = io.outer.grant)
val pending_iprbs = Reg(init = Bits(0, width = io.inner.tlNCachingClients))
val pending_reads = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_resps = Reg(init=Bits(0, width = io.inner.tlDataBeats))
val pending_orel_data = Reg(init=Bits(0, width = io.inner.tlDataBeats))
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// Start the writeback sub-transaction
io.wb.req.ready := state === s_idle
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// Track which clients yet need to be probed and make Probe message
pending_iprbs := pending_iprbs & dropPendingBitAtDest(io.inner.probe)
val curr_probe_dst = PriorityEncoder(pending_iprbs)
io.inner.probe.valid := state === s_inner_probe && pending_iprbs.orR
io.inner.probe.bits := xact.coh.inner.makeProbeForVoluntaryWriteback(curr_probe_dst, xact_addr_block)
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// Handle incoming releases from clients, which may reduce sharer counts
// and/or write back dirty data
val inner_coh_on_irel = xact.coh.inner.onRelease(io.irel())
val outer_coh_on_irel = xact.coh.outer.onHit(M_XWR)
io.inner.release.ready := state === s_inner_probe || state === s_busy
when(io.inner.release.fire()) {
xact.coh.inner := inner_coh_on_irel
data_buffer(io.inner.release.bits.addr_beat) := io.inner.release.bits.data
}
2015-07-02 22:52:40 +02:00
when(io.inner.release.valid && io.irel().conflicts(xact_addr_block) && io.irel().hasData()) {
xact.coh.outer := outer_coh_on_irel // must writeback dirty data supplied by any matching release, even voluntary ones
}
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// If a release didn't write back data, have to read it from data array
pending_reads := (pending_reads &
dropPendingBit(io.data.read) &
dropPendingBitWhenBeatHasData(io.inner.release))
val curr_read_beat = PriorityEncoder(pending_reads)
io.data.read.valid := state === s_busy && pending_reads.orR
io.data.read.bits.id := UInt(trackerId)
io.data.read.bits.way_en := xact.way_en
io.data.read.bits.addr_idx := xact.idx
io.data.read.bits.addr_beat := curr_read_beat
io.data.write.valid := Bool(false)
pending_resps := (pending_resps & dropPendingBitInternal(io.data.resp)) |
addPendingBitInternal(io.data.read)
when(io.data.resp.valid) {
data_buffer(io.data.resp.bits.addr_beat) := io.data.resp.bits.data
}
// Once the data is buffered we can write it back to outer memory
pending_orel_data := pending_orel_data |
addPendingBitWhenBeatHasData(io.inner.release) |
addPendingBitInternal(io.data.resp)
io.outer.release.valid := state === s_busy &&
(!io.orel().hasData() || pending_orel_data(orel_data_idx))
io.outer.release.bits := xact.coh.outer.makeVoluntaryWriteback(
client_xact_id = UInt(trackerId),
addr_block = xact_addr_block,
2015-04-22 07:23:04 +02:00
addr_beat = orel_data_idx,
data = data_buffer(orel_data_idx))
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// Wait for an acknowledgement
io.outer.grant.ready := state === s_outer_grant
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// Respond to the initiating transaction handler signalling completion of the writeback
io.wb.resp.valid := state === s_wb_resp
io.wb.resp.bits.id := xact.id
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// State machine updates and transaction handler metadata intialization
when(state === s_idle && io.wb.req.valid) {
xact := io.wb.req.bits
val coh = io.wb.req.bits.coh
val needs_inner_probes = coh.inner.requiresProbesOnVoluntaryWriteback()
when(needs_inner_probes) { pending_iprbs := coh.inner.full() & ~io.incoherent.toBits }
2015-08-27 18:57:36 +02:00
pending_reads := ~UInt(0, width = innerDataBeats)
2015-04-22 07:23:04 +02:00
pending_resps := UInt(0)
pending_orel_data := UInt(0)
state := Mux(needs_inner_probes, s_inner_probe, s_busy)
}
when(state === s_inner_probe && !(pending_iprbs.orR || pending_irels)) {
state := Mux(xact.coh.outer.requiresVoluntaryWriteback(), s_busy, s_wb_resp)
}
when(state === s_busy && orel_data_done) {
state := Mux(io.orel().requiresAck(), s_outer_grant, s_wb_resp)
}
when(state === s_outer_grant && ognt_data_done) { state := s_wb_resp }
when(state === s_wb_resp ) { state := s_idle }
2015-03-01 02:02:13 +01:00
2015-04-22 07:23:04 +02:00
// These IOs are used for routing in the parent
io.has_release_match := io.irel().conflicts(xact_addr_block) && !io.irel().isVoluntary() && io.inner.release.ready
io.has_acquire_match := Bool(false)
io.has_acquire_conflict := Bool(false)
2015-03-01 02:02:13 +01:00
}