Support for uncached sub-block reads and writes, major TileLink and CoherencePolicy refactor.
This commit is contained in:
parent
7b4e9dd137
commit
3aa030f960
@ -2,32 +2,26 @@
|
||||
|
||||
package uncore
|
||||
import Chisel._
|
||||
import scala.reflect.ClassTag
|
||||
|
||||
case object CacheName extends Field[String]
|
||||
case object NSets extends Field[Int]
|
||||
case object NWays extends Field[Int]
|
||||
case object BlockOffBits extends Field[Int]
|
||||
case object RowBits extends Field[Int]
|
||||
case object WordBits extends Field[Int]
|
||||
case object Replacer extends Field[() => ReplacementPolicy]
|
||||
case object AmoAluOperandBits extends Field[Int]
|
||||
|
||||
abstract trait CacheParameters extends UsesParameters {
|
||||
val paddrBits = params(PAddrBits)
|
||||
val vaddrBits = params(VAddrBits)
|
||||
val pgIdxBits = params(PgIdxBits)
|
||||
val nSets = params(NSets)
|
||||
val blockOffBits = params(BlockOffBits)
|
||||
val idxBits = log2Up(nSets)
|
||||
val untagBits = blockOffBits + idxBits
|
||||
val tagBits = paddrBits - untagBits
|
||||
val tagBits = params(PAddrBits) - untagBits
|
||||
val nWays = params(NWays)
|
||||
val wayBits = log2Up(nWays)
|
||||
val isDM = nWays == 1
|
||||
val wordBits = params(WordBits)
|
||||
val wordBytes = wordBits/8
|
||||
val wordOffBits = log2Up(wordBytes)
|
||||
val rowBits = params(RowBits)
|
||||
val rowWords = rowBits/wordBits
|
||||
val rowBytes = rowBits/8
|
||||
val rowOffBits = log2Up(rowBytes)
|
||||
}
|
||||
@ -35,6 +29,79 @@ abstract trait CacheParameters extends UsesParameters {
|
||||
abstract class CacheBundle extends Bundle with CacheParameters
|
||||
abstract class CacheModule extends Module with CacheParameters
|
||||
|
||||
class StoreGen(typ: Bits, addr: Bits, dat: Bits) {
|
||||
val byte = typ === MT_B || typ === MT_BU
|
||||
val half = typ === MT_H || typ === MT_HU
|
||||
val word = typ === MT_W || typ === MT_WU
|
||||
def mask =
|
||||
Mux(byte, Bits( 1) << addr(2,0),
|
||||
Mux(half, Bits( 3) << Cat(addr(2,1), Bits(0,1)),
|
||||
Mux(word, Bits( 15) << Cat(addr(2), Bits(0,2)),
|
||||
Bits(255))))
|
||||
def data =
|
||||
Mux(byte, Fill(8, dat( 7,0)),
|
||||
Mux(half, Fill(4, dat(15,0)),
|
||||
wordData))
|
||||
lazy val wordData =
|
||||
Mux(word, Fill(2, dat(31,0)),
|
||||
dat)
|
||||
}
|
||||
|
||||
class LoadGen(typ: Bits, addr: Bits, dat: Bits, zero: Bool) {
|
||||
val t = new StoreGen(typ, addr, dat)
|
||||
val sign = typ === MT_B || typ === MT_H || typ === MT_W || typ === MT_D
|
||||
|
||||
val wordShift = Mux(addr(2), dat(63,32), dat(31,0))
|
||||
val word = Cat(Mux(t.word, Fill(32, sign && wordShift(31)), dat(63,32)), wordShift)
|
||||
val halfShift = Mux(addr(1), word(31,16), word(15,0))
|
||||
val half = Cat(Mux(t.half, Fill(48, sign && halfShift(15)), word(63,16)), halfShift)
|
||||
val byteShift = Mux(zero, UInt(0), Mux(addr(0), half(15,8), half(7,0)))
|
||||
val byte = Cat(Mux(zero || t.byte, Fill(56, sign && byteShift(7)), half(63,8)), byteShift)
|
||||
}
|
||||
|
||||
class AMOALU extends CacheModule {
|
||||
val operandBits = params(AmoAluOperandBits)
|
||||
require(operandBits == 64)
|
||||
val io = new Bundle {
|
||||
val addr = Bits(INPUT, blockOffBits)
|
||||
val cmd = Bits(INPUT, M_SZ)
|
||||
val typ = Bits(INPUT, MT_SZ)
|
||||
val lhs = Bits(INPUT, operandBits)
|
||||
val rhs = Bits(INPUT, operandBits)
|
||||
val out = Bits(OUTPUT, operandBits)
|
||||
}
|
||||
|
||||
val storegen = new StoreGen(io.typ, io.addr, io.rhs)
|
||||
val rhs = storegen.wordData
|
||||
|
||||
val sgned = io.cmd === M_XA_MIN || io.cmd === M_XA_MAX
|
||||
val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU
|
||||
val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU
|
||||
val word = io.typ === MT_W || io.typ === MT_WU || // Logic minimization:
|
||||
io.typ === MT_B || io.typ === MT_BU
|
||||
|
||||
val mask = SInt(-1,64) ^ (io.addr(2) << UInt(31))
|
||||
val adder_out = (io.lhs & mask).toUInt + (rhs & mask)
|
||||
|
||||
val cmp_lhs = Mux(word && !io.addr(2), io.lhs(31), io.lhs(63))
|
||||
val cmp_rhs = Mux(word && !io.addr(2), rhs(31), rhs(63))
|
||||
val lt_lo = io.lhs(31,0) < rhs(31,0)
|
||||
val lt_hi = io.lhs(63,32) < rhs(63,32)
|
||||
val eq_hi = io.lhs(63,32) === rhs(63,32)
|
||||
val lt = Mux(word, Mux(io.addr(2), lt_hi, lt_lo), lt_hi || eq_hi && lt_lo)
|
||||
val less = Mux(cmp_lhs === cmp_rhs, lt, Mux(sgned, cmp_lhs, cmp_rhs))
|
||||
|
||||
val out = Mux(io.cmd === M_XA_ADD, adder_out,
|
||||
Mux(io.cmd === M_XA_AND, io.lhs & rhs,
|
||||
Mux(io.cmd === M_XA_OR, io.lhs | rhs,
|
||||
Mux(io.cmd === M_XA_XOR, io.lhs ^ rhs,
|
||||
Mux(Mux(less, min, max), io.lhs,
|
||||
storegen.data)))))
|
||||
|
||||
val wmask = FillInterleaved(8, storegen.mask)
|
||||
io.out := wmask & out | ~wmask & io.lhs
|
||||
}
|
||||
|
||||
abstract class ReplacementPolicy {
|
||||
def way: UInt
|
||||
def miss: Unit
|
||||
@ -96,16 +163,35 @@ class MetadataArray[T <: Metadata](makeRstVal: () => T) extends CacheModule {
|
||||
io.write.ready := !rst
|
||||
}
|
||||
|
||||
abstract trait L2HellaCacheParameters extends CacheParameters
|
||||
with CoherenceAgentParameters {
|
||||
abstract trait L2HellaCacheParameters extends CacheParameters with CoherenceAgentParameters {
|
||||
val idxMSB = idxBits-1
|
||||
val idxLSB = 0
|
||||
val refillCyclesPerBeat = params(TLDataBits)/rowBits
|
||||
val refillCycles = refillCyclesPerBeat*params(TLDataBeats)
|
||||
require(refillCyclesPerBeat == 1)
|
||||
}
|
||||
|
||||
abstract class L2HellaCacheBundle extends Bundle with L2HellaCacheParameters
|
||||
abstract class L2HellaCacheModule extends Module with L2HellaCacheParameters
|
||||
abstract class L2HellaCacheBundle extends TLBundle with L2HellaCacheParameters
|
||||
abstract class L2HellaCacheModule extends TLModule with L2HellaCacheParameters {
|
||||
def connectDataBeatCounter[S <: HasTileLinkData](inc: Bool, data: S) = {
|
||||
val (cnt, cnt_done) =
|
||||
Counter(inc && data.hasMultibeatData(), tlDataBeats)
|
||||
val done = (inc && !data.hasMultibeatData()) || cnt_done
|
||||
(cnt, done)
|
||||
}
|
||||
def connectOutgoingDataBeatCounter[T <: HasTileLinkData : ClassTag](in: DecoupledIO[LogicalNetworkIO[T]]) = {
|
||||
connectDataBeatCounter(in.fire(), in.bits.payload)
|
||||
}
|
||||
def connectIncomingDataBeatCounter[T <: HasTileLinkData](in: DecoupledIO[LogicalNetworkIO[T]]) = {
|
||||
connectDataBeatCounter(in.fire(), in.bits.payload)._2
|
||||
}
|
||||
def connectOutgoingDataBeatCounter[T <: HasTileLinkData](in: DecoupledIO[T]) = {
|
||||
connectDataBeatCounter(in.fire(), in.bits)
|
||||
}
|
||||
def connectIncomingDataBeatCounter[T <: HasTileLinkData](in: ValidIO[T]) = {
|
||||
connectDataBeatCounter(in.valid, in.bits)._2
|
||||
}
|
||||
}
|
||||
|
||||
trait HasL2Id extends Bundle with CoherenceAgentParameters {
|
||||
val id = UInt(width = log2Up(nTransactors + 1))
|
||||
@ -117,6 +203,11 @@ trait HasL2InternalRequestState extends L2HellaCacheBundle {
|
||||
val way_en = Bits(width = nWays)
|
||||
}
|
||||
|
||||
trait HasL2Data extends HasTileLinkData {
|
||||
def hasData(dummy: Int = 0) = Bool(true)
|
||||
def hasMultibeatData(dummy: Int = 0) = Bool(tlDataBeats > 1)
|
||||
}
|
||||
|
||||
object L2Metadata {
|
||||
def apply(tag: Bits, coh: ManagerMetadata) = {
|
||||
val meta = new L2Metadata
|
||||
@ -186,19 +277,19 @@ class L2MetadataArray extends L2HellaCacheModule {
|
||||
io.resp.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
|
||||
}
|
||||
|
||||
class L2DataReadReq extends L2HellaCacheBundle with HasL2Id {
|
||||
class L2DataReadReq extends L2HellaCacheBundle
|
||||
with HasCacheBlockAddress
|
||||
with HasTileLinkBeatId
|
||||
with HasL2Id {
|
||||
val way_en = Bits(width = nWays)
|
||||
val addr = Bits(width = tlAddrBits)
|
||||
}
|
||||
|
||||
class L2DataWriteReq extends L2DataReadReq {
|
||||
class L2DataWriteReq extends L2DataReadReq
|
||||
with HasL2Data {
|
||||
val wmask = Bits(width = tlWriteMaskBits)
|
||||
val data = Bits(width = tlDataBits)
|
||||
}
|
||||
|
||||
class L2DataResp extends Bundle with HasL2Id with TileLinkParameters {
|
||||
val data = Bits(width = tlDataBits)
|
||||
}
|
||||
class L2DataResp extends L2HellaCacheBundle with HasL2Id with HasL2Data
|
||||
|
||||
trait HasL2DataReadIO extends L2HellaCacheBundle {
|
||||
val read = Decoupled(new L2DataReadReq)
|
||||
@ -217,8 +308,8 @@ class L2DataArray extends L2HellaCacheModule {
|
||||
val wmask = FillInterleaved(8, io.write.bits.wmask)
|
||||
val reg_raddr = Reg(UInt())
|
||||
val array = Mem(Bits(width=rowBits), nWays*nSets*refillCycles, seqRead = true)
|
||||
val waddr = Cat(OHToUInt(io.write.bits.way_en), io.write.bits.addr)
|
||||
val raddr = Cat(OHToUInt(io.read.bits.way_en), io.read.bits.addr)
|
||||
val waddr = Cat(OHToUInt(io.write.bits.way_en), io.write.bits.addr_block, io.write.bits.addr_beat)
|
||||
val raddr = Cat(OHToUInt(io.read.bits.way_en), io.read.bits.addr_block, io.read.bits.addr_beat)
|
||||
|
||||
when (io.write.bits.way_en.orR && io.write.valid) {
|
||||
array.write(waddr, io.write.bits.data, wmask)
|
||||
@ -228,6 +319,7 @@ class L2DataArray extends L2HellaCacheModule {
|
||||
|
||||
io.resp.valid := ShiftRegister(io.read.fire(), 1)
|
||||
io.resp.bits.id := ShiftRegister(io.read.bits.id, 1)
|
||||
io.resp.bits.addr_beat := ShiftRegister(io.read.bits.addr_beat, 1)
|
||||
io.resp.bits.data := array(reg_raddr)
|
||||
io.read.ready := !io.write.valid
|
||||
io.write.ready := Bool(true)
|
||||
@ -261,10 +353,11 @@ class TSHRFile(bankId: Int, innerId: String, outerId: String) extends L2HellaCac
|
||||
}
|
||||
|
||||
// Wiring helper funcs
|
||||
def doOutputArbitration[T <: Data](out: DecoupledIO[T],
|
||||
ins: Seq[DecoupledIO[T]],
|
||||
count: Int = 1,
|
||||
lock: T => Bool = (a: T) => Bool(true)) {
|
||||
def doOutputArbitration[T <: Data](
|
||||
out: DecoupledIO[T],
|
||||
ins: Seq[DecoupledIO[T]],
|
||||
count: Int = 1,
|
||||
lock: T => Bool = (a: T) => Bool(true)) {
|
||||
val arb = Module(new LockingRRArbiter(out.bits.clone, ins.size, count, lock))
|
||||
out <> arb.io.out
|
||||
arb.io.in zip ins map { case (a, in) => a <> in }
|
||||
@ -331,8 +424,11 @@ class TSHRFile(bankId: Int, innerId: String, outerId: String) extends L2HellaCac
|
||||
doOutputArbitration(io.inner.probe, trackerList.map(_.io.inner.probe) :+ wb.io.inner.probe)
|
||||
|
||||
// Wire grant reply to initiating client
|
||||
def hasData(m: LogicalNetworkIO[Grant]) = co.messageHasData(m.payload)
|
||||
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant), tlDataBeats, hasData _)
|
||||
doOutputArbitration(
|
||||
io.inner.grant,
|
||||
trackerList.map(_.io.inner.grant),
|
||||
tlDataBeats,
|
||||
(m: LogicalNetworkIO[Grant]) => m.payload.hasMultibeatData())
|
||||
|
||||
// Create an arbiter for the one memory port
|
||||
val outerList = trackerList.map(_.io.outer) :+ wb.io.outer
|
||||
@ -344,15 +440,15 @@ class TSHRFile(bankId: Int, innerId: String, outerId: String) extends L2HellaCac
|
||||
// Wire local memories
|
||||
doOutputArbitration(io.meta.read, trackerList.map(_.io.meta.read))
|
||||
doOutputArbitration(io.meta.write, trackerList.map(_.io.meta.write))
|
||||
doOutputArbitration(io.data.read, trackerList.map(_.io.data.read) :+ wb.io.data.read, tlDataBeats)
|
||||
doOutputArbitration(io.data.write, trackerList.map(_.io.data.write), tlDataBeats)
|
||||
doOutputArbitration(io.data.read, trackerList.map(_.io.data.read) :+ wb.io.data.read)
|
||||
doOutputArbitration(io.data.write, trackerList.map(_.io.data.write))
|
||||
doInputRouting(io.meta.resp, trackerList.map(_.io.meta.resp))
|
||||
doInputRouting(io.data.resp, trackerList.map(_.io.data.resp) :+ wb.io.data.resp)
|
||||
}
|
||||
|
||||
class L2WritebackReq extends L2HellaCacheBundle
|
||||
with HasL2Id {
|
||||
val addr = UInt(width = tlAddrBits)
|
||||
val addr_block = UInt(width = tlBlockAddrBits)
|
||||
val coh = new ManagerMetadata
|
||||
val way_en = Bits(width = nWays)
|
||||
}
|
||||
@ -373,16 +469,16 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
val has_release_match = Bool(OUTPUT)
|
||||
val data = new L2DataRWIO
|
||||
}
|
||||
val c_acq = io.inner.acquire.bits
|
||||
val c_rel = io.inner.release.bits
|
||||
val c_gnt = io.inner.grant.bits
|
||||
val cacq = io.inner.acquire.bits
|
||||
val crel = io.inner.release.bits
|
||||
val cgnt = io.inner.grant.bits
|
||||
val c_ack = io.inner.finish.bits
|
||||
val m_gnt = io.outer.grant.bits
|
||||
val mgnt = io.outer.grant.bits
|
||||
|
||||
val s_idle :: s_probe :: s_data_read :: s_data_resp :: s_outer_write :: Nil = Enum(UInt(), 5)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
val xact_addr = Reg(io.inner.acquire.bits.payload.addr.clone)
|
||||
val xact_addr_block = Reg(io.inner.acquire.bits.payload.addr_block.clone)
|
||||
val xact_coh = Reg{ new ManagerMetadata }
|
||||
val xact_way_en = Reg{ Bits(width = nWays) }
|
||||
val xact_data = Vec.fill(tlDataBeats){ Reg(io.inner.acquire.bits.payload.data.clone) }
|
||||
@ -393,30 +489,30 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
val pending_probes = Reg(init = co.dir.flush)
|
||||
val curr_p_id = co.dir.next(pending_probes)
|
||||
|
||||
val (crel_data_cnt, crel_data_done) =
|
||||
Counter(io.inner.release.fire() && co.messageHasData(io.inner.release.bits.payload), tlDataBeats)
|
||||
val (outer_data_write_cnt, outer_data_write_done) =
|
||||
Counter(io.outer.acquire.fire() && co.messageHasData(io.outer.acquire.bits.payload), tlDataBeats)
|
||||
val (local_data_read_cnt, local_data_read_done) = Counter(io.data.read.fire(), tlDataBeats)
|
||||
val (local_data_resp_cnt, local_data_resp_done) = Counter(io.data.resp.valid, tlDataBeats)
|
||||
val crel_data_done = connectIncomingDataBeatCounter(io.inner.release)
|
||||
val (macq_data_cnt, macq_data_done) = connectOutgoingDataBeatCounter(io.outer.acquire)
|
||||
val (read_data_cnt, read_data_done) = Counter(io.data.read.fire(), tlDataBeats)
|
||||
val resp_data_done = connectIncomingDataBeatCounter(io.data.resp)
|
||||
|
||||
io.has_release_match := !co.isVoluntary(c_rel.payload) &&
|
||||
co.isCoherenceConflict(xact_addr, c_rel.payload.addr) &&
|
||||
io.has_release_match := !crel.payload.isVoluntary() &&
|
||||
co.isCoherenceConflict(xact_addr_block, crel.payload.addr_block) &&
|
||||
(state === s_probe)
|
||||
|
||||
val next_coh_on_rel = co.managerMetadataOnRelease(c_rel.payload, xact_coh, c_rel.header.src)
|
||||
val next_coh_on_rel = co.managerMetadataOnRelease(crel.payload, xact_coh, crel.header.src)
|
||||
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
io.outer.acquire.bits.payload := Bundle(UncachedWrite(xact_addr,
|
||||
UInt(trackerId),
|
||||
xact_data(outer_data_write_cnt)),
|
||||
io.outer.acquire.bits.payload := Bundle(UncachedWriteBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = macq_data_cnt,
|
||||
data = xact_data(macq_data_cnt)),
|
||||
{ case TLId => outerId })
|
||||
io.outer.grant.ready := Bool(false) // Never gets mgnts
|
||||
|
||||
io.inner.probe.valid := Bool(false)
|
||||
io.inner.probe.bits.header.src := UInt(bankId)
|
||||
io.inner.probe.bits.header.dst := curr_p_id
|
||||
io.inner.probe.bits.payload := Probe(co.getProbeTypeOnVoluntaryWriteback, xact_addr)
|
||||
io.inner.probe.bits.payload := Probe.onVoluntaryWriteback(xact_coh, xact_addr_block)
|
||||
|
||||
io.inner.grant.valid := Bool(false)
|
||||
io.inner.acquire.ready := Bool(false)
|
||||
@ -426,7 +522,8 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
io.data.read.valid := Bool(false)
|
||||
io.data.read.bits.id := UInt(trackerId)
|
||||
io.data.read.bits.way_en := xact_way_en
|
||||
io.data.read.bits.addr := Cat(xact_addr, local_data_read_cnt)
|
||||
io.data.read.bits.addr_block := xact_addr_block
|
||||
io.data.read.bits.addr_beat := read_data_cnt
|
||||
io.data.write.valid := Bool(false)
|
||||
|
||||
io.wb.req.ready := Bool(false)
|
||||
@ -437,7 +534,7 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
is(s_idle) {
|
||||
io.wb.req.ready := Bool(true)
|
||||
when(io.wb.req.valid) {
|
||||
xact_addr := io.wb.req.bits.addr
|
||||
xact_addr_block := io.wb.req.bits.addr_block
|
||||
xact_coh := io.wb.req.bits.coh
|
||||
xact_way_en := io.wb.req.bits.way_en
|
||||
xact_id := io.wb.req.bits.id
|
||||
@ -463,12 +560,12 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
when(io.inner.release.valid) {
|
||||
xact_coh := next_coh_on_rel
|
||||
// Handle released dirty data
|
||||
when(co.messageHasData(c_rel.payload)) {
|
||||
when(crel.payload.hasData()) {
|
||||
crel_had_data := Bool(true)
|
||||
xact_data(crel_data_cnt) := c_rel.payload.data
|
||||
xact_data(crel.payload.addr_beat) := crel.payload.data
|
||||
}
|
||||
// We don't decrement release_count until we've received all the data beats.
|
||||
when(!co.messageHasData(c_rel.payload) || crel_data_done) {
|
||||
when(!crel.payload.hasData() || crel_data_done) {
|
||||
release_count := release_count - UInt(1)
|
||||
}
|
||||
}
|
||||
@ -478,16 +575,16 @@ class L2WritebackUnit(trackerId: Int, bankId: Int, innerId: String, outerId: Str
|
||||
}
|
||||
is(s_data_read) {
|
||||
io.data.read.valid := Bool(true)
|
||||
when(io.data.resp.valid) { xact_data(local_data_resp_cnt) := io.data.resp.bits.data }
|
||||
when(local_data_read_done) { state := s_data_resp }
|
||||
when(io.data.resp.valid) { xact_data(io.data.resp.bits.addr_beat) := io.data.resp.bits.data }
|
||||
when(read_data_done) { state := s_data_resp }
|
||||
}
|
||||
is(s_data_resp) {
|
||||
when(io.data.resp.valid) { xact_data(local_data_resp_cnt) := io.data.resp.bits.data }
|
||||
when(local_data_resp_done) { state := s_outer_write }
|
||||
when(io.data.resp.valid) { xact_data(io.data.resp.bits.addr_beat) := io.data.resp.bits.data }
|
||||
when(resp_data_done) { state := s_outer_write }
|
||||
}
|
||||
is(s_outer_write) {
|
||||
io.outer.acquire.valid := Bool(true)
|
||||
when(outer_data_write_done) {
|
||||
when(macq_data_done) {
|
||||
io.wb.resp.valid := Bool(true)
|
||||
state := s_idle
|
||||
}
|
||||
@ -508,41 +605,40 @@ abstract class L2XactTracker(innerId: String, outerId: String) extends L2HellaCa
|
||||
val wb = new L2WritebackIO
|
||||
}
|
||||
|
||||
val c_acq = io.inner.acquire.bits
|
||||
val c_rel = io.inner.release.bits
|
||||
val c_gnt = io.inner.grant.bits
|
||||
val c_ack = io.inner.finish.bits
|
||||
val m_gnt = io.outer.grant.bits
|
||||
|
||||
def mergeData(acq: Acquire, old_data: UInt, new_data: UInt): UInt = {
|
||||
//TODO apply acq's write mask
|
||||
Mux(co.messageHasData(acq), old_data, new_data)
|
||||
}
|
||||
val cacq = io.inner.acquire.bits
|
||||
val crel = io.inner.release.bits
|
||||
val cgnt = io.inner.grant.bits
|
||||
val cack = io.inner.finish.bits
|
||||
val mgnt = io.outer.grant.bits
|
||||
}
|
||||
|
||||
class L2VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, outerId: String) extends L2XactTracker(innerId, outerId) {
|
||||
val s_idle :: s_meta_read :: s_meta_resp :: s_data_write :: s_meta_write :: s_grant :: s_busy :: Nil = Enum(UInt(), 7)
|
||||
val s_idle :: s_meta_read :: s_meta_resp :: s_data_write :: s_meta_write :: s_grant :: s_ack :: Nil = Enum(UInt(), 7)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
val xact_src = Reg(io.inner.release.bits.header.src.clone)
|
||||
val xact_r_type = Reg(io.inner.release.bits.payload.r_type)
|
||||
val xact_addr = Reg(io.inner.release.bits.payload.addr.clone)
|
||||
val xact_addr_block = Reg(io.inner.release.bits.payload.addr_block.clone)
|
||||
val xact_addr_beat = Reg(io.inner.release.bits.payload.addr_beat.clone)
|
||||
val xact_client_xact_id = Reg(io.inner.release.bits.payload.client_xact_id.clone)
|
||||
val xact_data = Vec.fill(tlDataBeats){ Reg(io.inner.release.bits.payload.data.clone) }
|
||||
val xact_tag_match = Reg{ Bool() }
|
||||
val xact_meta = Reg{ new L2Metadata }
|
||||
val xact_way_en = Reg{ Bits(width = nWays) }
|
||||
val xact = Release(xact_r_type, xact_addr, xact_client_xact_id)
|
||||
val xact = Release(
|
||||
voluntary = Bool(true),
|
||||
r_type = xact_r_type,
|
||||
client_xact_id = xact_client_xact_id,
|
||||
addr_block = xact_addr_block)
|
||||
|
||||
val collect_inner_data = Reg(init=Bool(false))
|
||||
val (inner_data_cnt, inner_data_done) =
|
||||
Counter(io.inner.release.fire() && co.messageHasData(io.inner.release.bits.payload), tlDataBeats)
|
||||
val (local_data_cnt, local_data_done) =
|
||||
Counter(io.data.write.fire(), tlDataBeats)
|
||||
val collect_crel_data = Reg(init=Bool(false))
|
||||
val crel_data_valid = Reg(init=Bits(0, width = tlDataBeats))
|
||||
val crel_data_done = connectIncomingDataBeatCounter(io.inner.release)
|
||||
val (write_data_cnt, write_data_done) = connectOutgoingDataBeatCounter(io.data.write)
|
||||
|
||||
io.has_acquire_conflict := Bool(false)
|
||||
io.has_acquire_match := Bool(false)
|
||||
io.has_release_match := co.isVoluntary(c_rel.payload)
|
||||
io.has_release_match := crel.payload.isVoluntary()
|
||||
|
||||
io.outer.grant.ready := Bool(false)
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
@ -554,50 +650,51 @@ class L2VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, ou
|
||||
|
||||
io.inner.grant.bits.header.src := UInt(bankId)
|
||||
io.inner.grant.bits.header.dst := xact_src
|
||||
io.inner.grant.bits.payload := Grant(Bool(false),
|
||||
co.getGrantTypeOnVoluntaryWriteback(xact_meta.coh),
|
||||
xact_client_xact_id,
|
||||
UInt(trackerId))
|
||||
io.inner.grant.bits.payload := xact.makeGrant(UInt(trackerId), xact_meta.coh)
|
||||
|
||||
io.data.read.valid := Bool(false)
|
||||
io.data.write.valid := Bool(false)
|
||||
io.data.write.bits.id := UInt(trackerId)
|
||||
io.data.write.bits.way_en := xact_way_en
|
||||
io.data.write.bits.addr := Cat(xact_addr, local_data_cnt)
|
||||
io.data.write.bits.addr_block := xact_addr_block
|
||||
io.data.write.bits.addr_beat := write_data_cnt
|
||||
io.data.write.bits.wmask := SInt(-1)
|
||||
io.data.write.bits.data := xact_data(local_data_cnt)
|
||||
io.data.write.bits.data := xact_data(write_data_cnt)
|
||||
io.meta.read.valid := Bool(false)
|
||||
io.meta.read.bits.id := UInt(trackerId)
|
||||
io.meta.read.bits.idx := xact_addr(idxMSB,idxLSB)
|
||||
io.meta.read.bits.tag := xact_addr >> UInt(idxBits)
|
||||
io.meta.read.bits.idx := xact_addr_block(idxMSB,idxLSB)
|
||||
io.meta.read.bits.tag := xact_addr_block >> UInt(idxBits)
|
||||
io.meta.write.valid := Bool(false)
|
||||
io.meta.write.bits.id := UInt(trackerId)
|
||||
io.meta.write.bits.idx := xact_addr(idxMSB,idxLSB)
|
||||
io.meta.write.bits.idx := xact_addr_block(idxMSB,idxLSB)
|
||||
io.meta.write.bits.way_en := xact_way_en
|
||||
io.meta.write.bits.data.tag := xact_addr >> UInt(idxBits)
|
||||
io.meta.write.bits.data.tag := xact_addr_block >> UInt(idxBits)
|
||||
io.meta.write.bits.data.coh := co.managerMetadataOnRelease(xact,
|
||||
xact_meta.coh,
|
||||
xact_src)
|
||||
io.wb.req.valid := Bool(false)
|
||||
|
||||
when(collect_inner_data) {
|
||||
when(collect_crel_data) {
|
||||
io.inner.release.ready := Bool(true)
|
||||
when(io.inner.release.valid) {
|
||||
xact_data(inner_data_cnt) := c_rel.payload.data
|
||||
xact_data(crel.payload.addr_beat) := crel.payload.data
|
||||
crel_data_valid(crel.payload.addr_beat) := Bool(true)
|
||||
}
|
||||
when(inner_data_done) { collect_inner_data := Bool(false) }
|
||||
when(crel_data_done) { collect_crel_data := Bool(false) }
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
is(s_idle) {
|
||||
io.inner.release.ready := Bool(true)
|
||||
when( io.inner.release.valid ) {
|
||||
xact_src := c_rel.header.src
|
||||
xact_r_type := c_rel.payload.r_type
|
||||
xact_addr := c_rel.payload.addr
|
||||
xact_client_xact_id := c_rel.payload.client_xact_id
|
||||
xact_data(UInt(0)) := c_rel.payload.data
|
||||
collect_inner_data := co.messageHasData(c_rel.payload)
|
||||
xact_src := crel.header.src
|
||||
xact_r_type := crel.payload.r_type
|
||||
xact_addr_block := crel.payload.addr_block
|
||||
xact_addr_beat := crel.payload.addr_beat
|
||||
xact_client_xact_id := crel.payload.client_xact_id
|
||||
xact_data(UInt(0)) := crel.payload.data
|
||||
collect_crel_data := crel.payload.hasMultibeatData()
|
||||
crel_data_valid := Bits(1)
|
||||
state := s_meta_read
|
||||
}
|
||||
}
|
||||
@ -611,27 +708,30 @@ class L2VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, ou
|
||||
xact_meta := io.meta.resp.bits.meta
|
||||
xact_way_en := io.meta.resp.bits.way_en
|
||||
state := Mux(io.meta.resp.bits.tag_match,
|
||||
Mux(co.messageHasData(xact), s_data_write, s_meta_write),
|
||||
s_grant)
|
||||
Mux(xact.hasData(), s_data_write, s_meta_write),
|
||||
Mux(xact.requiresAck(), s_grant, s_idle))
|
||||
}
|
||||
}
|
||||
is(s_data_write) {
|
||||
io.data.write.valid := (if(tlDataBeats == 1) Bool(true)
|
||||
else !collect_inner_data || (local_data_cnt < inner_data_cnt))
|
||||
when(local_data_done) { state := s_meta_write }
|
||||
io.data.write.valid := !collect_crel_data || crel_data_valid(write_data_cnt)
|
||||
when(write_data_done) { state := s_meta_write }
|
||||
}
|
||||
is(s_meta_write) {
|
||||
io.meta.write.valid := Bool(true)
|
||||
when(io.meta.write.ready) { state := s_grant }
|
||||
when(io.meta.write.ready) {
|
||||
state := Mux(xact.requiresAck(), s_grant, s_idle) // Need a Grant.voluntaryAck?
|
||||
}
|
||||
}
|
||||
is(s_grant) {
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(io.inner.grant.ready) {
|
||||
state := Mux(co.requiresAckForGrant(c_gnt.payload),
|
||||
s_busy, s_idle)
|
||||
state := Mux(cgnt.payload.requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_busy) {
|
||||
is(s_ack) {
|
||||
// TODO: This state is unnecessary if no client will ever issue the
|
||||
// pending Acquire that caused this writeback until it receives the
|
||||
// Grant.voluntaryAck for this writeback
|
||||
io.inner.finish.ready := Bool(true)
|
||||
when(io.inner.finish.valid) { state := s_idle }
|
||||
}
|
||||
@ -640,72 +740,109 @@ class L2VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, ou
|
||||
|
||||
|
||||
class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: String) extends L2XactTracker(innerId, outerId) {
|
||||
val s_idle :: s_meta_read :: s_meta_resp :: s_wb_req :: s_wb_resp :: s_probe :: s_outer_read :: s_outer_resp :: s_data_read :: s_data_resp :: s_data_write :: s_meta_write :: s_grant :: s_busy :: Nil = Enum(UInt(), 14)
|
||||
val s_idle :: s_meta_read :: s_meta_resp :: s_wb_req :: s_wb_resp :: s_probe :: s_outer_read :: s_outer_resp :: s_data_read :: s_data_resp :: s_data_write :: s_meta_write :: s_grant :: s_ack :: Nil = Enum(UInt(), 14)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
val xact_src = Reg(io.inner.acquire.bits.header.src.clone)
|
||||
val xact_uncached = Reg(io.inner.acquire.bits.payload.uncached.clone)
|
||||
val xact_a_type = Reg(io.inner.acquire.bits.payload.a_type.clone)
|
||||
val xact_addr = Reg(io.inner.acquire.bits.payload.addr.clone)
|
||||
val xact_addr_block = Reg(io.inner.acquire.bits.payload.addr_block.clone)
|
||||
val xact_addr_beat = Reg(io.inner.acquire.bits.payload.addr_beat.clone)
|
||||
val xact_client_xact_id = Reg(io.inner.acquire.bits.payload.client_xact_id.clone)
|
||||
val xact_subblock = Reg(io.inner.acquire.bits.payload.subblock.clone)
|
||||
val xact_data = Vec.fill(tlDataBeats){ Reg(io.inner.acquire.bits.payload.data.clone) }
|
||||
val xact_tag_match = Reg{ Bool() }
|
||||
val xact_meta = Reg{ new L2Metadata }
|
||||
val xact_way_en = Reg{ Bits(width = nWays) }
|
||||
val xact = Acquire(xact_uncached, xact_a_type, xact_addr, xact_client_xact_id, UInt(0), xact_subblock)
|
||||
val xact = Acquire(
|
||||
uncached = xact_uncached,
|
||||
a_type = xact_a_type,
|
||||
client_xact_id = xact_client_xact_id,
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = xact_addr_beat,
|
||||
data = UInt(0),
|
||||
subblock = xact_subblock)
|
||||
|
||||
val collect_cacq_data = Reg(init=Bool(false))
|
||||
val cacq_data_valid = Reg(init=Bits(0, width = tlDataBeats))
|
||||
val crel_had_data = Reg(init = Bool(false))
|
||||
val release_count = Reg(init = UInt(0, width = log2Up(nClients+1)))
|
||||
val pending_probes = Reg(init = UInt(0, width = nCoherentClients))
|
||||
val curr_p_id = co.dir.next(pending_probes)
|
||||
val full_sharers = co.dir.full(io.meta.resp.bits.meta.coh.sharers)
|
||||
val mask_self = Mux(co.requiresSelfProbe(xact),
|
||||
val mask_self = Mux(xact.requiresSelfProbe(),
|
||||
full_sharers | (UInt(1) << xact_src),
|
||||
full_sharers & ~UInt(UInt(1) << xact_src, width = nClients))
|
||||
val mask_incoherent = mask_self & ~io.tile_incoherent
|
||||
|
||||
val collect_cacq_data = Reg(init=Bool(false))
|
||||
//TODO: zero width wires
|
||||
val (cacq_data_cnt, cacq_data_done) =
|
||||
Counter(io.inner.acquire.fire() && co.messageHasData(io.inner.acquire.bits.payload), tlDataBeats)
|
||||
val (crel_data_cnt, crel_data_done) =
|
||||
Counter(io.inner.release.fire() && co.messageHasData(io.inner.release.bits.payload), tlDataBeats)
|
||||
val (cgnt_data_cnt, cgnt_data_done) =
|
||||
Counter(io.inner.grant.fire() && co.messageHasData(io.inner.grant.bits.payload), tlDataBeats)
|
||||
val (outer_data_write_cnt, outer_data_write_done) =
|
||||
Counter(io.outer.acquire.fire() && co.messageHasData(io.outer.acquire.bits.payload), tlDataBeats)
|
||||
val (outer_data_resp_cnt, outer_data_resp_done) =
|
||||
Counter(io.outer.grant.fire() && co.messageHasData(io.outer.grant.bits.payload), tlDataBeats)
|
||||
val (local_data_read_cnt, local_data_read_done) = Counter(io.data.read.fire(), tlDataBeats)
|
||||
val (local_data_write_cnt, local_data_write_done) = Counter(io.data.write.fire(), tlDataBeats)
|
||||
val (local_data_resp_cnt, local_data_resp_done) = Counter(io.data.resp.valid, tlDataBeats)
|
||||
val cacq_data_done = connectIncomingDataBeatCounter(io.inner.acquire)
|
||||
val crel_data_done = connectIncomingDataBeatCounter(io.inner.release)
|
||||
val (macq_data_cnt, macq_data_done) = connectOutgoingDataBeatCounter(io.outer.acquire)
|
||||
val mgnt_data_done = connectIncomingDataBeatCounter(io.outer.grant)
|
||||
val cgnt_data_cnt = Reg(init = UInt(0, width = tlBeatAddrBits+1))
|
||||
val cgnt_data_max = Reg(init = UInt(0, width = tlBeatAddrBits+1))
|
||||
val read_data_cnt = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
val read_data_max = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
val write_data_cnt = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
val write_data_max = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
val resp_data_cnt = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
val resp_data_max = Reg(init = UInt(0, width = log2Up(refillCycles)+1))
|
||||
|
||||
val needs_writeback = !xact_tag_match && co.needsWriteback(xact_meta.coh)
|
||||
val needs_writeback = !xact_tag_match && co.isValid(xact_meta.coh) // TODO: dirty bit
|
||||
val is_hit = xact_tag_match && co.isHit(xact, xact_meta.coh)
|
||||
val needs_probes = co.requiresProbes(xact, xact_meta.coh)
|
||||
//TODO: uncached does or does not allocate
|
||||
//val do_allocate = !xact_uncached || xact.allocate()
|
||||
|
||||
val amoalu = Module(new AMOALU)
|
||||
amoalu.io.addr := xact.addr()
|
||||
amoalu.io.cmd := xact.op_code()
|
||||
amoalu.io.typ := xact.op_size()
|
||||
amoalu.io.lhs := io.data.resp.bits.data //default
|
||||
amoalu.io.rhs := xact.data(0) // default
|
||||
|
||||
def mergeData[T <: HasTileLinkData](buffer: Vec[UInt], incoming: T) {
|
||||
val old_data = incoming.data
|
||||
val new_data = buffer(incoming.addr_beat)
|
||||
amoalu.io.lhs := old_data
|
||||
amoalu.io.rhs := new_data
|
||||
val wmask = FillInterleaved(8, xact.write_mask())
|
||||
buffer(incoming.addr_beat) :=
|
||||
Mux(xact.is(Acquire.uncachedAtomic), amoalu.io.out,
|
||||
Mux(xact.is(Acquire.uncachedWriteBlock) || xact.is(Acquire.uncachedWrite),
|
||||
wmask & new_data | ~wmask & old_data, old_data))
|
||||
}
|
||||
|
||||
//TODO: Are there any races between lines with the same idx?
|
||||
//TODO: Allow hit under miss for stores
|
||||
io.has_acquire_conflict := (co.isCoherenceConflict(xact.addr, c_acq.payload.addr) ||
|
||||
xact.addr(idxMSB,idxLSB) === c_acq.payload.addr(idxMSB,idxLSB)) &&
|
||||
io.has_acquire_conflict := (co.isCoherenceConflict(xact.addr_block, cacq.payload.addr_block) ||
|
||||
xact.addr_block(idxMSB,idxLSB) === cacq.payload.addr_block(idxMSB,idxLSB)) &&
|
||||
(state != s_idle) &&
|
||||
!collect_cacq_data
|
||||
io.has_acquire_match := co.messageHasData(xact) &&
|
||||
(xact.addr === c_acq.payload.addr) &&
|
||||
io.has_acquire_match := xact.hasMultibeatData() &&
|
||||
(xact.addr_block === cacq.payload.addr_block) &&
|
||||
collect_cacq_data
|
||||
io.has_release_match := !co.isVoluntary(c_rel.payload) &&
|
||||
(xact.addr === c_rel.payload.addr) &&
|
||||
io.has_release_match := !crel.payload.isVoluntary() &&
|
||||
(xact.addr_block === crel.payload.addr_block) &&
|
||||
(state === s_probe)
|
||||
|
||||
val next_coh_on_rel = co.managerMetadataOnRelease(c_rel.payload, xact_meta.coh, c_rel.header.src)
|
||||
val next_coh_on_gnt = co.managerMetadataOnGrant(c_gnt.payload, xact_meta.coh,
|
||||
c_gnt.header.dst)
|
||||
val next_coh_on_rel = co.managerMetadataOnRelease(
|
||||
incoming = crel.payload,
|
||||
meta = xact_meta.coh,
|
||||
src = crel.header.src)
|
||||
val next_coh_on_gnt = co.managerMetadataOnGrant(
|
||||
outgoing = cgnt.payload,
|
||||
meta = xact_meta.coh,
|
||||
dst = cgnt.header.dst)
|
||||
|
||||
val outer_write = Bundle(UncachedWrite(xact_addr, UInt(trackerId), xact_data(outer_data_write_cnt)),
|
||||
{ case TLId => outerId })
|
||||
val outer_read = Bundle(UncachedRead( xact_addr, UInt(trackerId)), { case TLId => outerId })
|
||||
val outer_write = Bundle(UncachedWriteBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = macq_data_cnt,
|
||||
data = xact_data(macq_data_cnt)),
|
||||
{ case TLId => outerId })
|
||||
val outer_read = Bundle(UncachedReadBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block),
|
||||
{ case TLId => outerId })
|
||||
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
io.outer.acquire.bits.payload := outer_read //default
|
||||
@ -714,15 +851,16 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
io.inner.probe.valid := Bool(false)
|
||||
io.inner.probe.bits.header.src := UInt(bankId)
|
||||
io.inner.probe.bits.header.dst := curr_p_id
|
||||
io.inner.probe.bits.payload := Probe(co.getProbeType(xact, xact_meta.coh), xact_addr)
|
||||
io.inner.probe.bits.payload := xact.makeProbe(xact_meta.coh)
|
||||
|
||||
io.inner.grant.valid := Bool(false)
|
||||
io.inner.grant.bits.header.src := UInt(bankId)
|
||||
io.inner.grant.bits.header.dst := xact_src
|
||||
io.inner.grant.bits.payload := Grant(xact_uncached, co.getGrantType(xact, xact_meta.coh),
|
||||
xact_client_xact_id,
|
||||
UInt(trackerId),
|
||||
xact_data(cgnt_data_cnt))
|
||||
io.inner.grant.bits.payload := xact.makeGrant(
|
||||
manager_xact_id = UInt(trackerId),
|
||||
meta = xact_meta.coh,
|
||||
addr_beat = cgnt_data_cnt,
|
||||
data = xact_data(cgnt_data_cnt))
|
||||
|
||||
io.inner.acquire.ready := Bool(false)
|
||||
io.inner.release.ready := Bool(false)
|
||||
@ -731,26 +869,28 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
io.data.read.valid := Bool(false)
|
||||
io.data.read.bits.id := UInt(trackerId)
|
||||
io.data.read.bits.way_en := xact_way_en
|
||||
io.data.read.bits.addr := Cat(xact_addr, local_data_read_cnt)
|
||||
io.data.read.bits.addr_block := xact_addr_block
|
||||
io.data.read.bits.addr_beat := read_data_cnt
|
||||
io.data.write.valid := Bool(false)
|
||||
io.data.write.bits.id := UInt(trackerId)
|
||||
io.data.write.bits.way_en := xact_way_en
|
||||
io.data.write.bits.addr := Cat(xact_addr, local_data_write_cnt)
|
||||
io.data.write.bits.addr_block := xact_addr_block
|
||||
io.data.write.bits.addr_beat := write_data_cnt
|
||||
io.data.write.bits.wmask := SInt(-1)
|
||||
io.data.write.bits.data := xact_data(local_data_write_cnt)
|
||||
io.data.write.bits.data := xact_data(write_data_cnt)
|
||||
io.meta.read.valid := Bool(false)
|
||||
io.meta.read.bits.id := UInt(trackerId)
|
||||
io.meta.read.bits.idx := xact_addr(idxMSB,idxLSB)
|
||||
io.meta.read.bits.tag := xact_addr >> UInt(idxBits)
|
||||
io.meta.read.bits.idx := xact_addr_block(idxMSB,idxLSB)
|
||||
io.meta.read.bits.tag := xact_addr_block >> UInt(idxBits)
|
||||
io.meta.write.valid := Bool(false)
|
||||
io.meta.write.bits.id := UInt(trackerId)
|
||||
io.meta.write.bits.idx := xact_addr(idxMSB,idxLSB)
|
||||
io.meta.write.bits.idx := xact_addr_block(idxMSB,idxLSB)
|
||||
io.meta.write.bits.way_en := xact_way_en
|
||||
io.meta.write.bits.data.tag := xact_addr >> UInt(idxBits)
|
||||
io.meta.write.bits.data.tag := xact_addr_block >> UInt(idxBits)
|
||||
io.meta.write.bits.data.coh := next_coh_on_gnt
|
||||
|
||||
io.wb.req.valid := Bool(false)
|
||||
io.wb.req.bits.addr := Cat(xact_meta.tag, xact_addr(idxMSB,idxLSB))
|
||||
io.wb.req.bits.addr_block := Cat(xact_meta.tag, xact_addr_block(idxMSB,idxLSB))
|
||||
io.wb.req.bits.coh := xact_meta.coh
|
||||
io.wb.req.bits.way_en := xact_way_en
|
||||
io.wb.req.bits.id := UInt(trackerId)
|
||||
@ -758,7 +898,8 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
when(collect_cacq_data) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
when(io.inner.acquire.valid) {
|
||||
xact_data(cacq_data_cnt) := c_acq.payload.data
|
||||
xact_data(cacq.payload.addr_beat) := cacq.payload.data
|
||||
cacq_data_valid(cacq.payload.addr_beat) := Bool(true)
|
||||
}
|
||||
when(cacq_data_done) { collect_cacq_data := Bool(false) }
|
||||
}
|
||||
@ -767,14 +908,15 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
is(s_idle) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
when( io.inner.acquire.valid ) {
|
||||
xact_uncached := c_acq.payload.uncached
|
||||
xact_a_type := c_acq.payload.a_type
|
||||
xact_addr := c_acq.payload.addr
|
||||
xact_client_xact_id := c_acq.payload.client_xact_id
|
||||
xact_data(UInt(0)) := c_acq.payload.data
|
||||
xact_subblock := c_acq.payload.subblock
|
||||
xact_src := c_acq.header.src
|
||||
collect_cacq_data := co.messageHasData(c_acq.payload)
|
||||
xact_uncached := cacq.payload.uncached
|
||||
xact_a_type := cacq.payload.a_type
|
||||
xact_addr_block := cacq.payload.addr_block
|
||||
xact_addr_beat := cacq.payload.addr_beat
|
||||
xact_client_xact_id := cacq.payload.client_xact_id
|
||||
xact_data(UInt(0)) := cacq.payload.data
|
||||
xact_subblock := cacq.payload.subblock
|
||||
xact_src := cacq.header.src
|
||||
collect_cacq_data := cacq.payload.hasMultibeatData()
|
||||
state := s_meta_read
|
||||
}
|
||||
}
|
||||
@ -789,9 +931,20 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
xact_way_en := io.meta.resp.bits.way_en
|
||||
val coh = io.meta.resp.bits.meta.coh
|
||||
val _tag_match = io.meta.resp.bits.tag_match
|
||||
val _needs_writeback = !_tag_match && co.needsWriteback(coh)
|
||||
val _needs_writeback = !_tag_match && co.isValid(coh) //TODO: dirty bit
|
||||
val _needs_probes = _tag_match && co.requiresProbes(xact, coh)
|
||||
val _is_hit = _tag_match && co.isHit(xact, coh)
|
||||
val full_block = !xact.uncached ||
|
||||
xact.is(Acquire.uncachedReadBlock) ||
|
||||
xact.is(Acquire.uncachedWriteBlock)
|
||||
read_data_cnt := Mux(full_block, UInt(0), xact_addr_beat)
|
||||
read_data_max := Mux(full_block, UInt(refillCycles-1), xact_addr_beat)
|
||||
write_data_cnt := Mux(full_block || !_is_hit, UInt(0), xact_addr_beat)
|
||||
write_data_max := Mux(full_block || !_is_hit, UInt(refillCycles-1), xact_addr_beat)
|
||||
resp_data_cnt := Mux(full_block, UInt(0), xact_addr_beat)
|
||||
resp_data_max := Mux(full_block, UInt(refillCycles-1), xact_addr_beat)
|
||||
cgnt_data_cnt := Mux(full_block, UInt(0), xact_addr_beat)
|
||||
cgnt_data_max := Mux(full_block, UInt(tlDataBeats-1), xact_addr_beat)
|
||||
when(_needs_probes) {
|
||||
pending_probes := mask_incoherent(nCoherentClients-1,0)
|
||||
release_count := co.dir.count(mask_incoherent)
|
||||
@ -816,19 +969,18 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
pending_probes := co.dir.pop(pending_probes, curr_p_id)
|
||||
}
|
||||
// Handle releases, which may have data being written back
|
||||
//TODO: make sure cacq data is actually present before accpeting
|
||||
// release data to merge!
|
||||
io.inner.release.ready := Bool(true)
|
||||
when(io.inner.release.valid) {
|
||||
xact_meta.coh := next_coh_on_rel
|
||||
// Handle released dirty data
|
||||
when(co.messageHasData(c_rel.payload)) {
|
||||
when(crel.payload.hasData()) {
|
||||
crel_had_data := Bool(true)
|
||||
//TODO make sure cacq data is actually present before merging
|
||||
xact_data(crel_data_cnt) := mergeData(xact,
|
||||
xact_data(crel_data_cnt),
|
||||
c_rel.payload.data)
|
||||
mergeData(xact_data, crel.payload)
|
||||
}
|
||||
// We don't decrement release_count until we've received all the data beats.
|
||||
when(!co.messageHasData(c_rel.payload) || crel_data_done) {
|
||||
when(!crel.payload.hasMultibeatData() || crel_data_done) {
|
||||
release_count := release_count - UInt(1)
|
||||
}
|
||||
}
|
||||
@ -847,37 +999,39 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
io.outer.grant.ready := Bool(true)
|
||||
when(io.outer.grant.valid) {
|
||||
//TODO make sure cacq data is actually present before merging
|
||||
xact_data(outer_data_resp_cnt) := mergeData(xact, xact_data(outer_data_resp_cnt),
|
||||
io.outer.grant.bits.payload.data)
|
||||
//TODO: set pending client state in xact_meta.coh
|
||||
when(outer_data_resp_done) {
|
||||
state := Mux(co.messageHasData(io.outer.grant.bits.payload),
|
||||
s_data_write, s_data_read)
|
||||
mergeData(xact_data, mgnt.payload)
|
||||
when(mgnt_data_done) {
|
||||
state := Mux(mgnt.payload.hasData(), s_data_write, s_data_read)
|
||||
}
|
||||
}
|
||||
}
|
||||
is(s_data_read) {
|
||||
io.data.read.valid := (if(tlDataBeats == 1) Bool(true)
|
||||
else !collect_cacq_data || (local_data_resp_cnt < cacq_data_cnt))
|
||||
io.data.read.valid := !collect_cacq_data || cacq_data_valid(read_data_cnt)
|
||||
when(io.data.resp.valid) {
|
||||
xact_data(local_data_resp_cnt) := mergeData(xact, xact_data(local_data_resp_cnt),
|
||||
io.data.resp.bits.data)
|
||||
mergeData(xact_data, io.data.resp.bits)
|
||||
resp_data_cnt := resp_data_cnt + UInt(1)
|
||||
}
|
||||
when(io.data.read.ready) {
|
||||
read_data_cnt := read_data_cnt + UInt(1)
|
||||
when(read_data_cnt === read_data_max) { state := s_data_resp }
|
||||
}
|
||||
when(local_data_read_done) { state := s_data_resp }
|
||||
}
|
||||
is(s_data_resp) {
|
||||
when(io.data.resp.valid) {
|
||||
xact_data(local_data_resp_cnt) := mergeData(xact, xact_data(local_data_resp_cnt),
|
||||
io.data.resp.bits.data)
|
||||
mergeData(xact_data, io.data.resp.bits)
|
||||
resp_data_cnt := resp_data_cnt + UInt(1)
|
||||
}
|
||||
when(local_data_resp_done) {
|
||||
state := Mux(co.messageHasData(xact), s_data_write, s_meta_write)
|
||||
when(resp_data_cnt === resp_data_max) {
|
||||
state := Mux(xact.hasData(), s_data_write, s_meta_write)
|
||||
}
|
||||
}
|
||||
is(s_data_write) {
|
||||
io.data.write.valid := Bool(true)
|
||||
when(local_data_write_done) {
|
||||
state := s_meta_write
|
||||
when(io.data.write.ready) {
|
||||
write_data_cnt := write_data_cnt + UInt(1)
|
||||
when(write_data_cnt === write_data_max) {
|
||||
state := s_meta_write
|
||||
}
|
||||
}
|
||||
}
|
||||
is(s_meta_write) {
|
||||
@ -886,12 +1040,14 @@ class L2AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: St
|
||||
}
|
||||
is(s_grant) {
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(!co.messageHasData(c_gnt.payload) || cgnt_data_done) {
|
||||
state := Mux(co.requiresAckForGrant(c_gnt.payload),
|
||||
s_busy, s_idle)
|
||||
when(io.inner.grant.ready) {
|
||||
cgnt_data_cnt := cgnt_data_cnt + UInt(1)
|
||||
when(cgnt_data_cnt === cgnt_data_max) {
|
||||
state := Mux(cgnt.payload.requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
}
|
||||
is(s_busy) {
|
||||
is(s_ack) {
|
||||
io.inner.finish.ready := Bool(true)
|
||||
when(io.inner.finish.valid) { state := s_idle }
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -16,7 +16,6 @@ trait MemoryOpConstants {
|
||||
val MT_BU = Bits("b100")
|
||||
val MT_HU = Bits("b101")
|
||||
val MT_WU = Bits("b110")
|
||||
val MT_CB = Bits("b111") // cache block
|
||||
|
||||
val NUM_XA_OPS = 9
|
||||
val M_SZ = 5
|
||||
@ -37,8 +36,9 @@ trait MemoryOpConstants {
|
||||
val M_XA_MAX = Bits("b01101");
|
||||
val M_XA_MINU = Bits("b01110");
|
||||
val M_XA_MAXU = Bits("b01111");
|
||||
val M_INV = Bits("b10000"); // write back and invalidate line
|
||||
val M_CLN = Bits("b10001"); // write back line
|
||||
val M_FLUSH = Bits("b10000") // write back dirty data and cede R/W permissions
|
||||
val M_PRODUCE = Bits("b10001") // write back dirty data and cede W permissions
|
||||
val M_CLEAN = Bits("b10011") // write back dirty data and retain R/W permissions
|
||||
|
||||
def isAMO(cmd: Bits) = cmd(3) || cmd === M_XA_SWAP
|
||||
def isPrefetch(cmd: Bits) = cmd === M_PFR || cmd === M_PFW
|
||||
|
@ -136,7 +136,7 @@ class HTIF(pcr_RESET: Int) extends Module with HTIFParameters {
|
||||
mem_acked := Bool(true)
|
||||
mem_gxid := io.mem.grant.bits.payload.manager_xact_id
|
||||
mem_gsrc := io.mem.grant.bits.header.src
|
||||
mem_needs_ack := co.requiresAckForGrant(io.mem.grant.bits.payload)
|
||||
mem_needs_ack := io.mem.grant.bits.payload.requiresAck()
|
||||
}
|
||||
io.mem.grant.ready := Bool(true)
|
||||
|
||||
@ -184,22 +184,26 @@ class HTIF(pcr_RESET: Int) extends Module with HTIFParameters {
|
||||
state := Mux(cmd === cmd_readmem && pos != UInt(0), state_mem_rreq, state_rx)
|
||||
}
|
||||
|
||||
var mem_req_data: UInt = null
|
||||
for (i <- 0 until dataBits/short_request_bits) {
|
||||
val idx = Cat(cnt, UInt(i, log2Up(dataBits/short_request_bits)))
|
||||
val n = dataBits/short_request_bits
|
||||
val mem_req_data = (0 until n).map { i =>
|
||||
val ui = UInt(i, log2Up(n))
|
||||
when (state === state_mem_rresp && io.mem.grant.valid) {
|
||||
packet_ram(idx) := io.mem.grant.bits.payload.data((i+1)*short_request_bits-1, i*short_request_bits)
|
||||
packet_ram(Cat(io.mem.grant.bits.payload.addr_beat, ui)) :=
|
||||
io.mem.grant.bits.payload.data((i+1)*short_request_bits-1, i*short_request_bits)
|
||||
}
|
||||
mem_req_data = Cat(packet_ram(idx), mem_req_data)
|
||||
}
|
||||
packet_ram(Cat(cnt, ui))
|
||||
}.reverse.reduce(_##_)
|
||||
|
||||
val init_addr = addr.toUInt >> UInt(offsetBits-3)
|
||||
io.mem.acquire.valid := state === state_mem_rreq || state === state_mem_wreq
|
||||
io.mem.acquire.bits.payload := Mux(cmd === cmd_writemem,
|
||||
UncachedWrite(init_addr, mem_req_data),
|
||||
UncachedRead(init_addr))
|
||||
UncachedWriteBlock(
|
||||
addr_block = init_addr,
|
||||
addr_beat = cnt,
|
||||
client_xact_id = UInt(0),
|
||||
data = mem_req_data),
|
||||
UncachedReadBlock(addr_block = init_addr))
|
||||
io.mem.acquire.bits.payload.data := mem_req_data
|
||||
io.mem.acquire.bits.header.src := UInt(params(LNClients)) // By convention HTIF is the client with the largest id
|
||||
io.mem.acquire.bits.header.dst := UInt(0) // DNC; Overwritten outside module
|
||||
io.mem.finish.valid := (state === state_mem_finish) && mem_needs_ack
|
||||
io.mem.finish.bits.payload.manager_xact_id := mem_gxid
|
||||
io.mem.finish.bits.header.dst := mem_gsrc
|
||||
|
@ -228,7 +228,9 @@ class MemIOUncachedTileLinkIOConverter(qDepth: Int) extends Module {
|
||||
mem_cmd_q.io.enq.valid := Bool(false)
|
||||
mem_data_q.io.enq.valid := Bool(false)
|
||||
|
||||
val acq_has_data = co.messageHasData(io.uncached.acquire.bits.payload)
|
||||
//TODO: Assert that only WriteUncachedBlock and ReadUncachedBlock are
|
||||
//acceptable Acquire types
|
||||
val acq_has_data = io.uncached.acquire.bits.payload.hasData()
|
||||
val (tl_cnt_out, tl_wrap_out) = Counter(io.uncached.acquire.fire() && acq_has_data, tlDataBeats)
|
||||
val (mif_cnt_out, mif_wrap_out) = Counter(mem_data_q.io.enq.fire(), mifDataBeats)
|
||||
val active_out = Reg(init=Bool(false))
|
||||
@ -250,7 +252,7 @@ class MemIOUncachedTileLinkIOConverter(qDepth: Int) extends Module {
|
||||
active_out := Bool(true)
|
||||
cmd_sent_out := Bool(false)
|
||||
tag_out := io.uncached.acquire.bits.payload.client_xact_id
|
||||
addr_out := io.uncached.acquire.bits.payload.addr
|
||||
addr_out := io.uncached.acquire.bits.payload.addr_block
|
||||
has_data := acq_has_data
|
||||
tl_done_out := tl_wrap_out
|
||||
mif_done_out := Bool(false)
|
||||
@ -323,8 +325,12 @@ class MemIOUncachedTileLinkIOConverter(qDepth: Int) extends Module {
|
||||
when(tl_wrap_in) { active_in := Bool(false) }
|
||||
}
|
||||
|
||||
io.uncached.grant.bits.payload := Grant(Bool(true), Grant.uncachedRead, tag_in, UInt(0),
|
||||
tl_buf_in(tl_cnt_in))
|
||||
io.uncached.grant.bits.payload := Grant(uncached = Bool(true),
|
||||
g_type = Grant.uncachedReadBlock,
|
||||
client_xact_id = tag_in,
|
||||
manager_xact_id = UInt(0),
|
||||
addr_beat = tl_cnt_in,
|
||||
data = tl_buf_in(tl_cnt_in))
|
||||
}
|
||||
|
||||
class HellaFlowQueue[T <: Data](val entries: Int)(data: => T) extends Module
|
||||
|
@ -16,20 +16,22 @@ case object TLDataBits extends Field[Int]
|
||||
case object TLDataBeats extends Field[Int]
|
||||
|
||||
abstract trait TileLinkParameters extends UsesParameters {
|
||||
val tlAddrBits = params(TLAddrBits)
|
||||
val tlBlockAddrBits = params(TLAddrBits)
|
||||
val tlClientXactIdBits = params(TLClientXactIdBits)
|
||||
val tlManagerXactIdBits = params(TLManagerXactIdBits)
|
||||
val tlDataBits = params(TLDataBits)
|
||||
val tlDataBeats = params(TLDataBeats)
|
||||
val tlWriteMaskBits = if(tlDataBits/8 < 1) 1 else tlDataBits
|
||||
val tlSubblockAddrBits = log2Up(tlWriteMaskBits)
|
||||
val tlAtomicOpcodeBits = log2Up(NUM_XA_OPS)
|
||||
val tlWriteMaskBits = if(tlDataBits/8 < 1) 1 else tlDataBits/8
|
||||
val tlBeatAddrBits = log2Up(tlDataBeats)
|
||||
val tlByteAddrBits = log2Up(tlWriteMaskBits)
|
||||
val tlAtomicOpcodeBits = M_SZ
|
||||
val tlUncachedOperandSizeBits = MT_SZ
|
||||
val tlSubblockUnionBits = max(tlWriteMaskBits,
|
||||
(tlSubblockAddrBits +
|
||||
tlUncachedOperandSizeBits +
|
||||
tlAtomicOpcodeBits)) + 1
|
||||
(tlByteAddrBits +
|
||||
tlUncachedOperandSizeBits +
|
||||
tlAtomicOpcodeBits)) + 1
|
||||
val co = params(TLCoherence)
|
||||
val networkPreservesPointToPointOrdering = false //TODO: check physical network type
|
||||
}
|
||||
|
||||
abstract class TLBundle extends Bundle with TileLinkParameters
|
||||
@ -45,8 +47,12 @@ trait ClientToClientChannel extends TileLinkChannel // Unused for now
|
||||
// Common signals that are used in multiple channels.
|
||||
// These traits are useful for type parameterization.
|
||||
//
|
||||
trait HasPhysicalAddress extends TLBundle {
|
||||
val addr = UInt(width = tlAddrBits)
|
||||
trait HasCacheBlockAddress extends TLBundle {
|
||||
val addr_block = UInt(width = tlBlockAddrBits)
|
||||
}
|
||||
|
||||
trait HasTileLinkBeatId extends TLBundle {
|
||||
val addr_beat = UInt(width = tlBeatAddrBits)
|
||||
}
|
||||
|
||||
trait HasClientTransactionId extends TLBundle {
|
||||
@ -57,55 +63,109 @@ trait HasManagerTransactionId extends TLBundle {
|
||||
val manager_xact_id = Bits(width = tlManagerXactIdBits)
|
||||
}
|
||||
|
||||
trait HasTileLinkData extends TLBundle {
|
||||
abstract trait HasTileLinkData extends HasTileLinkBeatId {
|
||||
val data = UInt(width = tlDataBits)
|
||||
def hasData(dummy: Int = 0): Bool
|
||||
def hasMultibeatData(dummy: Int = 0): Bool
|
||||
}
|
||||
|
||||
// Actual TileLink channel bundle definitions
|
||||
|
||||
class Acquire extends ClientToManagerChannel
|
||||
with HasPhysicalAddress
|
||||
with HasCacheBlockAddress
|
||||
with HasClientTransactionId
|
||||
with HasTileLinkData {
|
||||
// Actual bundle fields
|
||||
val uncached = Bool()
|
||||
val a_type = UInt(width = max(log2Up(Acquire.nUncachedAcquireTypes), co.acquireTypeWidth))
|
||||
val a_type = UInt(width = max(log2Up(Acquire.nBuiltinAcquireTypes), co.acquireTypeWidth))
|
||||
val subblock = Bits(width = tlSubblockUnionBits)
|
||||
val sbAddrOff = tlSubblockAddrBits + tlUncachedOperandSizeBits
|
||||
val opSzOff = tlUncachedOperandSizeBits + sbAddrOff
|
||||
|
||||
// Utility funcs for accessing uncached/subblock union
|
||||
val opSizeOff = tlByteAddrBits + 1
|
||||
val opCodeOff = tlUncachedOperandSizeBits + opSizeOff
|
||||
val opMSB = tlAtomicOpcodeBits + opCodeOff
|
||||
def allocate(dummy: Int = 0) = subblock(0)
|
||||
def operand_sz(dummy: Int = 0) = subblock(tlUncachedOperandSizeBits, 1)
|
||||
def subblock_addr(dummy: Int = 0) = subblock(sbAddrOff, tlUncachedOperandSizeBits+1)
|
||||
def atomic_op(dummy: Int = 0) = subblock(opSzOff, sbAddrOff+1)
|
||||
def addr_byte(dummy: Int = 0) = subblock(opSizeOff-1, 1)
|
||||
def op_size(dummy: Int = 0) = subblock(opCodeOff-1, opSizeOff)
|
||||
def op_code(dummy: Int = 0) = subblock(opMSB-1, opCodeOff)
|
||||
def write_mask(dummy: Int = 0) = subblock(tlWriteMaskBits, 1)
|
||||
def addr(dummy: Int = 0) = Cat(addr_block, addr_beat, this.addr_byte(0))
|
||||
|
||||
// Other helper funcs
|
||||
def is(t: UInt) = a_type === t
|
||||
|
||||
def hasData(dummy: Int = 0): Bool = uncached && Acquire.typesWithData.contains(a_type)
|
||||
|
||||
def hasMultibeatData(dummy: Int = 0): Bool = Bool(tlDataBeats > 1) && uncached &&
|
||||
Acquire.typesWithMultibeatData.contains(a_type)
|
||||
|
||||
//TODO: This function is a hack to support Rocket icache snooping Rocket nbdcache:
|
||||
def requiresSelfProbe(dummy: Int = 0) = uncached && Acquire.requiresSelfProbe(a_type)
|
||||
|
||||
def makeProbe(meta: ManagerMetadata = co.managerMetadataOnFlush): Probe =
|
||||
Probe(co.getProbeType(this, meta), this.addr_block)
|
||||
|
||||
def makeGrant(
|
||||
manager_xact_id: UInt,
|
||||
meta: ManagerMetadata = co.managerMetadataOnFlush,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0)): Grant = {
|
||||
Grant(
|
||||
uncached = this.uncached,
|
||||
g_type = co.getGrantType(this, meta),
|
||||
client_xact_id = this.client_xact_id,
|
||||
manager_xact_id = manager_xact_id,
|
||||
addr_beat = addr_beat,
|
||||
data = data
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
object Acquire {
|
||||
val nUncachedAcquireTypes = 3
|
||||
//TODO: val uncachedRead :: uncachedWrite :: uncachedAtomic :: Nil = Enum(UInt(), nUncachedAcquireTypes)
|
||||
def uncachedRead = UInt(0)
|
||||
def uncachedWrite = UInt(1)
|
||||
def uncachedAtomic = UInt(2)
|
||||
def hasData(a_type: UInt) = Vec(uncachedWrite, uncachedAtomic).contains(a_type)
|
||||
def requiresOuterRead(a_type: UInt) = a_type != uncachedWrite
|
||||
def requiresOuterWrite(a_type: UInt) = a_type === uncachedWrite
|
||||
val nBuiltinAcquireTypes = 5
|
||||
//TODO: Use Enum
|
||||
def uncachedRead = UInt(0)
|
||||
def uncachedReadBlock = UInt(1)
|
||||
def uncachedWrite = UInt(2)
|
||||
def uncachedWriteBlock = UInt(3)
|
||||
def uncachedAtomic = UInt(4)
|
||||
def typesWithData = Vec(uncachedWrite, uncachedWriteBlock, uncachedAtomic)
|
||||
def typesWithMultibeatData = Vec(uncachedWriteBlock)
|
||||
def requiresOuterRead(a_type: UInt) = a_type != uncachedWriteBlock
|
||||
def requiresOuterWrite(a_type: UInt) = typesWithData.contains(a_type)
|
||||
//TODO: This function is a hack to support Rocket icache snooping Rocket nbdcache:
|
||||
def requiresSelfProbe(a_type: UInt) = a_type === uncachedReadBlock
|
||||
|
||||
def apply(uncached: Bool, a_type: Bits, addr: UInt, client_xact_id: UInt, data: UInt, subblock: UInt): Acquire = {
|
||||
def fullWriteMask = SInt(-1, width = new Acquire().tlWriteMaskBits).toUInt
|
||||
|
||||
// Most generic constructor
|
||||
def apply(
|
||||
uncached: Bool,
|
||||
a_type: Bits,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0),
|
||||
subblock: UInt = UInt(0)): Acquire = {
|
||||
val acq = new Acquire
|
||||
acq.uncached := uncached
|
||||
acq.a_type := a_type
|
||||
acq.addr := addr
|
||||
acq.client_xact_id := client_xact_id
|
||||
acq.addr_block := addr_block
|
||||
acq.addr_beat := addr_beat
|
||||
acq.data := data
|
||||
acq.subblock := subblock
|
||||
acq
|
||||
}
|
||||
def apply(a_type: Bits, addr: UInt, client_xact_id: UInt, data: UInt): Acquire = {
|
||||
apply(Bool(false), a_type, addr, client_xact_id, data, UInt(0))
|
||||
}
|
||||
def apply(a_type: Bits, addr: UInt, client_xact_id: UInt): Acquire = {
|
||||
apply(a_type, addr, client_xact_id, UInt(0))
|
||||
// For cached types
|
||||
def apply(a_type: Bits, client_xact_id: UInt, addr_block: UInt): Acquire = {
|
||||
apply(
|
||||
uncached = Bool(false),
|
||||
a_type = a_type,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block)
|
||||
}
|
||||
// Copy constructor
|
||||
def apply(a: Acquire): Acquire = {
|
||||
val acq = new Acquire
|
||||
acq := a
|
||||
@ -113,83 +173,189 @@ object Acquire {
|
||||
}
|
||||
}
|
||||
|
||||
// Asks for a single TileLink beat of data
|
||||
object UncachedRead {
|
||||
def apply(addr: UInt, client_xact_id: UInt, subblock_addr: UInt, operand_sz: UInt, alloc: Bool): Acquire = {
|
||||
val acq = Acquire(Acquire.uncachedRead, addr, client_xact_id)
|
||||
acq.uncached := Bool(true)
|
||||
acq.subblock := Cat(subblock_addr, operand_sz, alloc)
|
||||
acq
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
alloc: Bool = Bool(true)): Acquire = {
|
||||
Acquire(
|
||||
uncached = Bool(true),
|
||||
a_type = Acquire.uncachedRead,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
subblock = alloc)
|
||||
}
|
||||
def apply(addr: UInt, client_xact_id: UInt): Acquire = {
|
||||
apply(addr, client_xact_id, UInt(0), MT_CB, Bool(true))
|
||||
}
|
||||
def apply(addr: UInt): Acquire = {
|
||||
apply(addr, UInt(0))
|
||||
}
|
||||
|
||||
// Asks for an entire cache block of data
|
||||
object UncachedReadBlock {
|
||||
def apply(
|
||||
client_xact_id: UInt = UInt(0),
|
||||
addr_block: UInt,
|
||||
alloc: Bool = Bool(true)): Acquire = {
|
||||
Acquire(
|
||||
uncached = Bool(true),
|
||||
a_type = Acquire.uncachedReadBlock,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
subblock = alloc.toUInt)
|
||||
}
|
||||
}
|
||||
|
||||
object UncachedWrite {
|
||||
def apply(addr: UInt, client_xact_id: UInt, write_mask: Bits, alloc: Bool, data: UInt): Acquire = {
|
||||
val acq = Acquire(Acquire.uncachedWrite, addr, client_xact_id, data)
|
||||
acq.uncached := Bool(true)
|
||||
acq.subblock := Cat(write_mask, alloc)
|
||||
acq
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt,
|
||||
write_mask: UInt = Acquire.fullWriteMask,
|
||||
alloc: Bool = Bool(true)): Acquire = {
|
||||
Acquire(
|
||||
uncached = Bool(true),
|
||||
a_type = Acquire.uncachedWrite,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
client_xact_id = client_xact_id,
|
||||
data = data,
|
||||
subblock = Cat(write_mask, alloc))
|
||||
}
|
||||
def apply(addr: UInt, client_xact_id: UInt, data: UInt): Acquire = {
|
||||
apply(addr, client_xact_id, SInt(-1), Bool(true), data)
|
||||
}
|
||||
def apply(addr: UInt, data: UInt): Acquire = {
|
||||
apply(addr, UInt(0), data)
|
||||
}
|
||||
|
||||
// For full block of data
|
||||
object UncachedWriteBlock {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt,
|
||||
alloc: Bool = Bool(true)): Acquire = {
|
||||
Acquire(
|
||||
uncached = Bool(true),
|
||||
a_type = Acquire.uncachedWriteBlock,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data,
|
||||
subblock = Cat(Acquire.fullWriteMask, alloc))
|
||||
}
|
||||
}
|
||||
|
||||
object UncachedAtomic {
|
||||
def apply(addr: UInt, client_xact_id: UInt, atomic_opcode: UInt,
|
||||
subblock_addr: UInt, operand_sz: UInt, data: UInt): Acquire = {
|
||||
val acq = Acquire(Acquire.uncachedAtomic, addr, client_xact_id, data)
|
||||
acq.uncached := Bool(true)
|
||||
acq.subblock := Cat(atomic_opcode, subblock_addr, operand_sz, Bool(true))
|
||||
acq
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
addr_byte: UInt,
|
||||
atomic_opcode: UInt,
|
||||
operand_size: UInt,
|
||||
data: UInt): Acquire = {
|
||||
Acquire(
|
||||
uncached = Bool(true),
|
||||
a_type = Acquire.uncachedAtomic,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data,
|
||||
subblock = Cat(atomic_opcode, operand_size, addr_byte, Bool(true)))
|
||||
}
|
||||
}
|
||||
|
||||
class Probe extends ManagerToClientChannel
|
||||
with HasPhysicalAddress {
|
||||
with HasCacheBlockAddress {
|
||||
val p_type = UInt(width = co.probeTypeWidth)
|
||||
|
||||
def is(t: UInt) = p_type === t
|
||||
def makeRelease(
|
||||
client_xact_id: UInt,
|
||||
meta: ClientMetadata = co.clientMetadataOnFlush,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0)): Release = {
|
||||
Release(
|
||||
voluntary = Bool(false),
|
||||
r_type = co.getReleaseType(this, meta),
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = this.addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data)
|
||||
}
|
||||
}
|
||||
|
||||
object Probe {
|
||||
def apply(p_type: UInt, addr: UInt) = {
|
||||
val co = new Probe().co
|
||||
def apply(p_type: UInt, addr_block: UInt) = {
|
||||
val prb = new Probe
|
||||
prb.p_type := p_type
|
||||
prb.addr := addr
|
||||
prb.addr_block := addr_block
|
||||
prb
|
||||
}
|
||||
|
||||
def onVoluntaryWriteback(meta: ManagerMetadata, addr_block: UInt): Probe = {
|
||||
apply(co.getProbeType(M_FLUSH, meta), addr_block)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Release extends ClientToManagerChannel
|
||||
with HasPhysicalAddress
|
||||
with HasCacheBlockAddress
|
||||
with HasClientTransactionId
|
||||
with HasTileLinkData {
|
||||
val r_type = UInt(width = co.releaseTypeWidth)
|
||||
val voluntary = Bool()
|
||||
|
||||
// Helper funcs
|
||||
def is(t: UInt) = r_type === t
|
||||
def hasData(dummy: Int = 0) = co.releaseTypesWithData.contains(r_type)
|
||||
def hasMultibeatData(dummy: Int = 0) = Bool(tlDataBeats > 1) && co.releaseTypesWithData.contains(r_type)
|
||||
def isVoluntary(dummy: Int = 0) = voluntary
|
||||
def requiresAck(dummy: Int = 0) = !Bool(networkPreservesPointToPointOrdering)
|
||||
|
||||
def makeGrant(
|
||||
manager_xact_id: UInt,
|
||||
meta: ManagerMetadata = co.managerMetadataOnFlush): Grant = {
|
||||
Grant(
|
||||
g_type = Grant.voluntaryAck,
|
||||
uncached = Bool(true), // Grant.voluntaryAck is built-in type
|
||||
client_xact_id = this.client_xact_id,
|
||||
manager_xact_id = manager_xact_id
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
object Release {
|
||||
def apply(r_type: UInt, addr: UInt, client_xact_id: UInt, data: UInt): Release = {
|
||||
val co = new Release().co
|
||||
def apply(
|
||||
voluntary: Bool,
|
||||
r_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0)): Release = {
|
||||
val rel = new Release
|
||||
rel.r_type := r_type
|
||||
rel.addr := addr
|
||||
rel.client_xact_id := client_xact_id
|
||||
rel.addr_block := addr_block
|
||||
rel.addr_beat := addr_beat
|
||||
rel.data := data
|
||||
rel.voluntary := voluntary
|
||||
rel
|
||||
}
|
||||
def apply(r_type: UInt, addr: UInt, client_xact_id: UInt): Release = {
|
||||
apply(r_type, addr, client_xact_id, UInt(0))
|
||||
}
|
||||
def apply(r_type: UInt, addr: UInt): Release = {
|
||||
apply(r_type, addr, UInt(0), UInt(0))
|
||||
|
||||
def makeVoluntaryWriteback(
|
||||
meta: ClientMetadata,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0)): Release = {
|
||||
Release(
|
||||
voluntary = Bool(true),
|
||||
r_type = co.getReleaseType(M_FLUSH, meta),
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data)
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,29 +364,62 @@ class Grant extends ManagerToClientChannel
|
||||
with HasClientTransactionId
|
||||
with HasManagerTransactionId {
|
||||
val uncached = Bool()
|
||||
val g_type = UInt(width = max(log2Up(Grant.nUncachedGrantTypes), co.grantTypeWidth))
|
||||
val g_type = UInt(width = max(log2Up(Grant.nBuiltinGrantTypes), co.grantTypeWidth))
|
||||
|
||||
// Helper funcs
|
||||
def is(t: UInt) = g_type === t
|
||||
def hasData(dummy: Int = 0): Bool = Mux(uncached,
|
||||
Grant.typesWithData.contains(g_type),
|
||||
co.grantTypesWithData.contains(g_type))
|
||||
def hasMultibeatData(dummy: Int = 0): Bool =
|
||||
Bool(tlDataBeats > 1) && Mux(uncached,
|
||||
Grant.typesWithMultibeatData.contains(g_type),
|
||||
co.grantTypesWithData.contains(g_type))
|
||||
def isVoluntary(dummy: Int = 0): Bool = uncached && (g_type === Grant.voluntaryAck)
|
||||
def requiresAck(dummy: Int = 0): Bool = !Bool(networkPreservesPointToPointOrdering) && !isVoluntary()
|
||||
def makeFinish(dummy: Int = 0): Finish = {
|
||||
val f = new Finish
|
||||
f.manager_xact_id := this.manager_xact_id
|
||||
f
|
||||
}
|
||||
}
|
||||
|
||||
object Grant {
|
||||
val nUncachedGrantTypes = 3
|
||||
//TODO val uncachedRead :: uncachedWrite :: uncachedAtomic :: Nil = Enum(UInt(), nUncachedGrantTypes)
|
||||
def uncachedRead = UInt(0)
|
||||
def uncachedWrite = UInt(1)
|
||||
def uncachedAtomic = UInt(2)
|
||||
def hasData(g_type: UInt) = Vec(uncachedRead, uncachedAtomic).contains(g_type)
|
||||
val nBuiltinGrantTypes = 5
|
||||
//TODO Use Enum
|
||||
def voluntaryAck = UInt(0)
|
||||
def uncachedRead = UInt(1)
|
||||
def uncachedReadBlock = UInt(2)
|
||||
def uncachedWrite = UInt(3)
|
||||
def uncachedAtomic = UInt(4)
|
||||
def typesWithData = Vec(uncachedRead, uncachedReadBlock, uncachedAtomic)
|
||||
def typesWithMultibeatData= Vec(uncachedReadBlock)
|
||||
|
||||
def apply(uncached: Bool, g_type: UInt, client_xact_id: UInt, manager_xact_id: UInt, data: UInt): Grant = {
|
||||
def apply(
|
||||
uncached: Bool,
|
||||
g_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
manager_xact_id: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0)): Grant = {
|
||||
val gnt = new Grant
|
||||
gnt.uncached := uncached
|
||||
gnt.g_type := g_type
|
||||
gnt.client_xact_id := client_xact_id
|
||||
gnt.manager_xact_id := manager_xact_id
|
||||
gnt.addr_beat := addr_beat
|
||||
gnt.data := data
|
||||
gnt
|
||||
}
|
||||
def apply(uncached: Bool, g_type: UInt, client_xact_id: UInt, manager_xact_id: UInt): Grant = {
|
||||
apply(uncached, g_type, client_xact_id, manager_xact_id, UInt(0))
|
||||
|
||||
def getGrantTypeForUncached(a: Acquire): UInt = {
|
||||
MuxLookup(a.a_type, Grant.uncachedRead, Array(
|
||||
Acquire.uncachedRead -> Grant.uncachedRead,
|
||||
Acquire.uncachedReadBlock -> Grant.uncachedReadBlock,
|
||||
Acquire.uncachedWrite -> Grant.uncachedWrite,
|
||||
Acquire.uncachedWriteBlock -> Grant.uncachedWrite,
|
||||
Acquire.uncachedAtomic -> Grant.uncachedAtomic
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@ -260,10 +459,17 @@ object TileLinkIOWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
// Utility functions for constructing TileLinkIO arbiters
|
||||
abstract class TileLinkArbiterLike(val arbN: Int) extends TLModule {
|
||||
abstract trait HasArbiterTypes {
|
||||
val arbN: Int
|
||||
type ManagerSourcedWithId = ManagerToClientChannel with HasClientTransactionId
|
||||
type ClientSourcedWithId = ClientToManagerChannel with HasClientTransactionId
|
||||
type ClientSourcedWithIdAndData = ClientToManagerChannel with
|
||||
HasClientTransactionId with
|
||||
HasTileLinkData
|
||||
}
|
||||
// Utility functions for constructing TileLinkIO arbiters
|
||||
abstract class TileLinkArbiterLike(val arbN: Int) extends TLModule
|
||||
with HasArbiterTypes {
|
||||
|
||||
// These are filled in depending on whether the arbiter mucks with the
|
||||
// client ids and then needs to revert them on the way back
|
||||
@ -271,10 +477,10 @@ abstract class TileLinkArbiterLike(val arbN: Int) extends TLModule {
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId): Bits
|
||||
def arbIdx(in: ManagerSourcedWithId): UInt
|
||||
|
||||
def hookupClientSource[M <: ClientSourcedWithId]
|
||||
def hookupClientSource[M <: ClientSourcedWithIdAndData]
|
||||
(ins: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
||||
out: DecoupledIO[LogicalNetworkIO[M]]) {
|
||||
def hasData(m: LogicalNetworkIO[M]) = co.messageHasData(m.payload)
|
||||
def hasData(m: LogicalNetworkIO[M]) = m.payload.hasMultibeatData()
|
||||
val arb = Module(new LockingRRArbiter(out.bits.clone, arbN, params(TLDataBeats), Some(hasData _)))
|
||||
out <> arb.io.out
|
||||
ins.zipWithIndex.zip(arb.io.in).map{ case ((req,id), arb) => {
|
||||
@ -336,35 +542,26 @@ abstract class TileLinkIOArbiter(n: Int) extends TileLinkArbiterLike(n) {
|
||||
}
|
||||
|
||||
// Appends the port index of the arbiter to the client_xact_id
|
||||
abstract trait AppendsArbiterId {
|
||||
val arbN: Int
|
||||
def clientSourcedClientXactId(in: ClientToManagerChannel with HasClientTransactionId, id: Int) =
|
||||
abstract trait AppendsArbiterId extends HasArbiterTypes {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) =
|
||||
Cat(in.client_xact_id, UInt(id, log2Up(arbN)))
|
||||
def managerSourcedClientXactId(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) =
|
||||
in.client_xact_id >> UInt(log2Up(arbN))
|
||||
def arbIdx(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
in.client_xact_id(log2Up(arbN)-1,0).toUInt
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id(log2Up(arbN)-1,0).toUInt
|
||||
}
|
||||
|
||||
// Uses the client_xact_id as is (assumes it has been set to port index)
|
||||
abstract trait PassesId {
|
||||
def clientSourcedClientXactId(in: ClientToManagerChannel with HasClientTransactionId, id: Int) =
|
||||
in.client_xact_id
|
||||
def managerSourcedClientXactId(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
in.client_xact_id
|
||||
def arbIdx(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
in.client_xact_id
|
||||
abstract trait PassesId extends HasArbiterTypes {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = in.client_xact_id
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
}
|
||||
|
||||
// Overwrites some default client_xact_id with the port idx
|
||||
abstract trait UsesNewId {
|
||||
val arbN: Int
|
||||
def clientSourcedClientXactId(in: ClientToManagerChannel with HasClientTransactionId, id: Int) =
|
||||
UInt(id, log2Up(arbN))
|
||||
def managerSourcedClientXactId(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
UInt(0)
|
||||
def arbIdx(in: ManagerToClientChannel with HasClientTransactionId) =
|
||||
in.client_xact_id
|
||||
abstract trait UsesNewId extends HasArbiterTypes {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = UInt(id, log2Up(arbN))
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) = UInt(0)
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
}
|
||||
|
||||
// Mix-in id generation traits to make concrete arbiter classes
|
||||
|
@ -67,7 +67,7 @@ class L2BroadcastHub(bankId: Int, innerId: String, outerId: String) extends
|
||||
val sdq_val = Reg(init=Bits(0, sdqDepth))
|
||||
val sdq_alloc_id = PriorityEncoder(~sdq_val)
|
||||
val sdq_rdy = !sdq_val.andR
|
||||
val sdq_enq = acquire.fire() && co.messageHasData(acquire.bits.payload)
|
||||
val sdq_enq = acquire.fire() && acquire.bits.payload.hasData()
|
||||
val sdq = Vec.fill(sdqDepth){ Reg(io.inner.acquire.bits.payload.data) }
|
||||
when (sdq_enq) { sdq(sdq_alloc_id) := acquire.bits.payload.data }
|
||||
|
||||
@ -87,8 +87,8 @@ class L2BroadcastHub(bankId: Int, innerId: String, outerId: String) extends
|
||||
|
||||
// Queue to store impending Voluntary Release data
|
||||
val release = io.inner.release
|
||||
val voluntary = co.isVoluntary(release.bits.payload)
|
||||
val vwbdq_enq = release.fire() && voluntary && co.messageHasData(release.bits.payload)
|
||||
val voluntary = release.bits.payload.isVoluntary()
|
||||
val vwbdq_enq = release.fire() && voluntary && release.bits.payload.hasData()
|
||||
val (rel_data_cnt, rel_data_done) = Counter(vwbdq_enq, tlDataBeats) //TODO Zero width
|
||||
val vwbdq = Vec.fill(tlDataBeats){ Reg(release.bits.payload.data) } //TODO Assumes nReleaseTransactors == 1
|
||||
when(vwbdq_enq) { vwbdq(rel_data_cnt) := release.bits.payload.data }
|
||||
@ -117,9 +117,10 @@ class L2BroadcastHub(bankId: Int, innerId: String, outerId: String) extends
|
||||
probe_arb.io.in zip trackerList map { case (arb, t) => arb <> t.io.inner.probe }
|
||||
|
||||
// Wire grant reply to initiating client
|
||||
def hasData(m: LogicalNetworkIO[Grant]) = co.messageHasData(m.payload)
|
||||
def hasData(m: LogicalNetworkIO[Grant]) = m.payload.hasMultibeatData()
|
||||
val grant_arb = Module(new LockingArbiter(new LogicalNetworkIO(new Grant), trackerList.size, tlDataBeats, Some(hasData _)))
|
||||
io.inner.grant.bits.payload.data := io.outer.grant.bits.payload.data
|
||||
io.inner.grant.bits.payload.addr_beat := io.outer.grant.bits.payload.addr_beat
|
||||
io.inner.grant <> grant_arb.io.out
|
||||
grant_arb.io.in zip trackerList map { case (arb, t) => arb <> t.io.inner.grant }
|
||||
|
||||
@ -130,7 +131,7 @@ class L2BroadcastHub(bankId: Int, innerId: String, outerId: String) extends
|
||||
val outer_data_ptr = new DataQueueLocation().fromBits(outer_arb.io.out.acquire.bits.payload.data)
|
||||
val is_in_sdq = outer_data_ptr.loc === inStoreQueue
|
||||
val free_sdq = io.outer.acquire.fire() &&
|
||||
co.messageHasData(io.outer.acquire.bits.payload) &&
|
||||
io.outer.acquire.bits.payload.hasData() &&
|
||||
outer_data_ptr.loc === inStoreQueue
|
||||
io.outer.acquire.bits.payload.data := MuxLookup(outer_data_ptr.loc, release.bits.payload.data, Array(
|
||||
inStoreQueue -> sdq(outer_data_ptr.idx),
|
||||
@ -156,32 +157,39 @@ abstract class XactTracker(innerId: String, outerId: String) extends Module {
|
||||
val has_release_match = Bool(OUTPUT)
|
||||
}
|
||||
|
||||
val c_acq = io.inner.acquire.bits
|
||||
val c_rel = io.inner.release.bits
|
||||
val c_gnt = io.inner.grant.bits
|
||||
val c_ack = io.inner.finish.bits
|
||||
val m_gnt = io.outer.grant.bits
|
||||
val cacq = io.inner.acquire.bits
|
||||
val crel = io.inner.release.bits
|
||||
val cgnt = io.inner.grant.bits
|
||||
val cfin = io.inner.finish.bits
|
||||
val macq = io.outer.acquire.bits
|
||||
val mgnt = io.outer.grant.bits
|
||||
|
||||
}
|
||||
|
||||
class VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, outerId: String) extends XactTracker(innerId, outerId) {
|
||||
val s_idle :: s_outer :: s_ack :: s_busy :: Nil = Enum(UInt(), 4)
|
||||
val s_idle :: s_outer :: s_grant :: s_ack :: Nil = Enum(UInt(), 4)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
val xact_src = Reg(io.inner.release.bits.header.src.clone)
|
||||
val xact_r_type = Reg(io.inner.release.bits.payload.r_type)
|
||||
val xact_addr = Reg(io.inner.release.bits.payload.addr.clone)
|
||||
val xact_addr_block = Reg(io.inner.release.bits.payload.addr_block.clone)
|
||||
val xact_client_xact_id = Reg(io.inner.release.bits.payload.client_xact_id.clone)
|
||||
val xact_data = Vec.fill(tlDataBeats){ Reg(io.inner.release.bits.payload.data.clone) }
|
||||
val xact = Release(
|
||||
voluntary = Bool(true),
|
||||
r_type = xact_r_type,
|
||||
client_xact_id = xact_client_xact_id,
|
||||
addr_block = xact_addr_block)
|
||||
|
||||
val collect_inner_data = Reg(init=Bool(false))
|
||||
// TODO: assert that all releases have full blocks of data
|
||||
val (inner_data_cnt, inner_data_done) =
|
||||
Counter(io.inner.release.fire() && co.messageHasData(io.inner.release.bits.payload), tlDataBeats)
|
||||
Counter(io.inner.release.fire() && io.inner.release.bits.payload.hasMultibeatData(), tlDataBeats)
|
||||
val (outer_data_cnt, outer_data_done) =
|
||||
Counter(io.outer.acquire.fire() && co.messageHasData(io.outer.acquire.bits.payload), tlDataBeats)
|
||||
Counter(io.outer.acquire.fire() && io.outer.acquire.bits.payload.hasMultibeatData(), tlDataBeats)
|
||||
|
||||
io.has_acquire_conflict := Bool(false)
|
||||
io.has_release_match := co.isVoluntary(c_rel.payload)
|
||||
io.has_release_match := crel.payload.isVoluntary()
|
||||
|
||||
io.outer.grant.ready := Bool(false)
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
@ -193,21 +201,19 @@ class VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, oute
|
||||
|
||||
io.inner.grant.bits.header.src := UInt(bankId)
|
||||
io.inner.grant.bits.header.dst := xact_src
|
||||
io.inner.grant.bits.payload := Grant(Bool(false),
|
||||
co.getGrantTypeOnVoluntaryWriteback(co.managerMetadataOnFlush),
|
||||
xact_client_xact_id,
|
||||
UInt(trackerId))
|
||||
io.inner.grant.bits.payload := xact.makeGrant(UInt(trackerId))
|
||||
|
||||
io.outer.acquire.bits.payload := Bundle(UncachedWrite(
|
||||
xact_addr,
|
||||
UInt(trackerId),
|
||||
xact_data(outer_data_cnt)),
|
||||
io.outer.acquire.bits.payload := Bundle(UncachedWriteBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = outer_data_cnt,
|
||||
data = xact_data(outer_data_cnt)),
|
||||
{ case TLId => outerId })
|
||||
|
||||
when(collect_inner_data) {
|
||||
io.inner.release.ready := Bool(true)
|
||||
when(io.inner.release.valid) {
|
||||
xact_data(inner_data_cnt) := c_rel.payload.data
|
||||
xact_data(inner_data_cnt) := crel.payload.data
|
||||
}
|
||||
when(inner_data_done) { collect_inner_data := Bool(false) }
|
||||
}
|
||||
@ -216,72 +222,107 @@ class VoluntaryReleaseTracker(trackerId: Int, bankId: Int, innerId: String, oute
|
||||
is(s_idle) {
|
||||
io.inner.release.ready := Bool(true)
|
||||
when( io.inner.release.valid ) {
|
||||
xact_src := c_rel.header.src
|
||||
xact_r_type := c_rel.payload.r_type
|
||||
xact_addr := c_rel.payload.addr
|
||||
xact_client_xact_id := c_rel.payload.client_xact_id
|
||||
xact_data(UInt(0)) := c_rel.payload.data
|
||||
collect_inner_data := co.messageHasData(c_rel.payload)
|
||||
state := Mux(co.messageHasData(c_rel.payload), s_outer, s_ack)
|
||||
xact_src := crel.header.src
|
||||
xact_r_type := crel.payload.r_type
|
||||
xact_addr_block := crel.payload.addr_block
|
||||
xact_client_xact_id := crel.payload.client_xact_id
|
||||
xact_data(UInt(0)) := crel.payload.data
|
||||
collect_inner_data := crel.payload.hasMultibeatData()
|
||||
state := Mux(crel.payload.hasData(), s_outer,
|
||||
Mux(crel.payload.requiresAck(), s_ack, s_idle))
|
||||
}
|
||||
}
|
||||
is(s_outer) {
|
||||
io.outer.acquire.valid := (if(tlDataBeats == 1) Bool(true)
|
||||
else !collect_inner_data || (outer_data_cnt < inner_data_cnt))
|
||||
when(outer_data_done) { state := s_ack }
|
||||
when(outer_data_done) {
|
||||
state := Mux(xact.requiresAck(), s_grant, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_grant) {
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(io.inner.grant.ready) {
|
||||
state := Mux(cgnt.payload.requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_ack) {
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(io.inner.grant.ready) { state := s_idle }
|
||||
// TODO: This state is unnecessary if no client will ever issue the
|
||||
// pending Acquire that caused this writeback until it receives the
|
||||
// Grant.voluntaryAck for this writeback
|
||||
io.inner.finish.ready := Bool(true)
|
||||
when(io.inner.finish.valid) { state := s_idle }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: String) extends XactTracker(innerId, outerId) {
|
||||
val s_idle :: s_probe :: s_mem_read :: s_mem_write :: s_make_grant :: s_busy :: Nil = Enum(UInt(), 6)
|
||||
val s_idle :: s_probe :: s_mem_read :: s_mem_write :: s_make_grant :: s_mem_resp :: s_ack :: Nil = Enum(UInt(), 7)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
val xact_src = Reg(io.inner.acquire.bits.header.src.clone)
|
||||
val xact_uncached = Reg(io.inner.acquire.bits.payload.uncached.clone)
|
||||
val xact_a_type = Reg(io.inner.acquire.bits.payload.a_type.clone)
|
||||
val xact_addr = Reg(io.inner.acquire.bits.payload.addr.clone)
|
||||
val xact_client_xact_id = Reg(io.inner.acquire.bits.payload.client_xact_id.clone)
|
||||
val xact_addr_block = Reg(io.inner.acquire.bits.payload.addr_block.clone)
|
||||
val xact_addr_beat = Reg(io.inner.acquire.bits.payload.addr_beat.clone)
|
||||
val xact_subblock = Reg(io.inner.acquire.bits.payload.subblock.clone)
|
||||
val xact_data = Vec.fill(tlDataBeats){ Reg(io.inner.acquire.bits.payload.data.clone) }
|
||||
val xact = Acquire(xact_uncached, xact_a_type, xact_addr, xact_client_xact_id, UInt(0), xact_subblock)
|
||||
val xact = Acquire(
|
||||
uncached = xact_uncached,
|
||||
a_type = xact_a_type,
|
||||
client_xact_id = xact_client_xact_id,
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = xact_addr_beat,
|
||||
data = UInt(0),
|
||||
subblock = xact_subblock)
|
||||
|
||||
val collect_inner_data = Reg(init=Bool(false))
|
||||
//TODO: Assert that if xact.uncached, xact_a_type is ReadBlock or WriteBlock
|
||||
val (inner_data_cnt, inner_data_done) =
|
||||
Counter(io.inner.acquire.fire() && co.messageHasData(io.inner.acquire.bits.payload), tlDataBeats)
|
||||
Counter(io.inner.acquire.fire() && cacq.payload.hasMultibeatData(), tlDataBeats)
|
||||
val (outer_data_cnt, outer_data_done) =
|
||||
Counter(io.outer.acquire.fire() && co.messageHasData(io.outer.acquire.bits.payload), tlDataBeats)
|
||||
Counter(io.outer.acquire.fire() && macq.payload.hasMultibeatData(), tlDataBeats)
|
||||
val (cgnt_data_cnt, cgnt_data_done) =
|
||||
Counter(io.inner.grant.fire() && cgnt.payload.hasMultibeatData(), tlDataBeats)
|
||||
|
||||
|
||||
val release_count = Reg(init=UInt(0, width = log2Up(nClients)))
|
||||
val probe_flags = Reg(init=Bits(0, width = nClients))
|
||||
val curr_p_id = PriorityEncoder(probe_flags)
|
||||
|
||||
val pending_outer_write = co.messageHasData(xact)
|
||||
val pending_outer_write = xact.hasData()
|
||||
val pending_outer_read = co.requiresOuterRead(xact, co.managerMetadataOnFlush)
|
||||
|
||||
val probe_initial_flags = Bits(width = nClients)
|
||||
probe_initial_flags := Bits(0)
|
||||
// issue self-probes for uncached read xacts to facilitate I$ coherence
|
||||
val probe_self = co.requiresSelfProbe(io.inner.acquire.bits.payload)
|
||||
val myflag = Mux(probe_self, Bits(0), UIntToOH(c_acq.header.src(log2Up(nClients)-1,0)))
|
||||
val probe_self = io.inner.acquire.bits.payload.requiresSelfProbe()
|
||||
val myflag = Mux(probe_self, Bits(0), UIntToOH(cacq.header.src(log2Up(nClients)-1,0)))
|
||||
probe_initial_flags := ~(io.tile_incoherent | myflag)
|
||||
|
||||
io.has_acquire_conflict := co.isCoherenceConflict(xact_addr, c_acq.payload.addr) &&
|
||||
io.has_acquire_conflict := co.isCoherenceConflict(xact_addr_block, cacq.payload.addr_block) &&
|
||||
(state != s_idle) &&
|
||||
!collect_inner_data
|
||||
io.has_release_match := co.isCoherenceConflict(xact_addr, c_rel.payload.addr) &&
|
||||
!co.isVoluntary(c_rel.payload) &&
|
||||
io.has_release_match := co.isCoherenceConflict(xact_addr_block, crel.payload.addr_block) &&
|
||||
!crel.payload.isVoluntary() &&
|
||||
(state != s_idle)
|
||||
|
||||
val outer_write_acq = Bundle(UncachedWrite(xact_addr, UInt(trackerId), xact_data(outer_data_cnt)),
|
||||
val outer_write_acq = Bundle(UncachedWriteBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = outer_data_cnt,
|
||||
data = xact_data(outer_data_cnt)),
|
||||
{ case TLId => outerId })
|
||||
val outer_write_rel = Bundle(UncachedWrite(xact_addr, UInt(trackerId), c_rel.payload.data),
|
||||
val outer_write_rel = Bundle(UncachedWriteBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = crel.payload.addr_beat,
|
||||
data = crel.payload.data),
|
||||
{ case TLId => outerId })
|
||||
val outer_read = Bundle(UncachedReadBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact_addr_block),
|
||||
{ case TLId => outerId })
|
||||
val outer_read = Bundle(UncachedRead(xact_addr, UInt(trackerId)), { case TLId => outerId })
|
||||
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
io.outer.acquire.bits.payload := outer_read //default
|
||||
@ -290,16 +331,12 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
io.inner.probe.valid := Bool(false)
|
||||
io.inner.probe.bits.header.src := UInt(bankId)
|
||||
io.inner.probe.bits.header.dst := curr_p_id
|
||||
io.inner.probe.bits.payload := Probe(co.getProbeType(xact, co.managerMetadataOnFlush), xact_addr)
|
||||
io.inner.probe.bits.payload := xact.makeProbe()
|
||||
|
||||
io.inner.grant.valid := Bool(false)
|
||||
io.inner.grant.bits.header.src := UInt(bankId)
|
||||
io.inner.grant.bits.header.dst := xact_src
|
||||
io.inner.grant.bits.payload := Grant(xact_uncached,
|
||||
co.getGrantType(xact, co.managerMetadataOnFlush),
|
||||
xact_client_xact_id,
|
||||
UInt(trackerId),
|
||||
UInt(0)) // Data bypassed in parent
|
||||
io.inner.grant.bits.payload := xact.makeGrant(UInt(trackerId)) // Data bypassed in parent
|
||||
|
||||
io.inner.acquire.ready := Bool(false)
|
||||
io.inner.release.ready := Bool(false)
|
||||
@ -307,7 +344,7 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
when(collect_inner_data) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
when(io.inner.acquire.valid) {
|
||||
xact_data(inner_data_cnt) := c_acq.payload.data
|
||||
xact_data(inner_data_cnt) := cacq.payload.data
|
||||
}
|
||||
when(inner_data_done) { collect_inner_data := Bool(false) }
|
||||
}
|
||||
@ -315,17 +352,18 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
switch (state) {
|
||||
is(s_idle) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
val needs_outer_write = co.messageHasData(c_acq.payload)
|
||||
val needs_outer_read = co.requiresOuterRead(c_acq.payload, co.managerMetadataOnFlush)
|
||||
when( io.inner.acquire.valid ) {
|
||||
xact_uncached := c_acq.payload.uncached
|
||||
xact_a_type := c_acq.payload.a_type
|
||||
xact_addr := c_acq.payload.addr
|
||||
xact_client_xact_id := c_acq.payload.client_xact_id
|
||||
xact_data(UInt(0)) := c_acq.payload.data
|
||||
xact_subblock := c_acq.payload.subblock
|
||||
xact_src := c_acq.header.src
|
||||
collect_inner_data := co.messageHasData(c_acq.payload)
|
||||
val needs_outer_write = cacq.payload.hasData()
|
||||
val needs_outer_read = co.requiresOuterRead(cacq.payload, co.managerMetadataOnFlush)
|
||||
when(io.inner.acquire.valid) {
|
||||
xact_uncached := cacq.payload.uncached
|
||||
xact_a_type := cacq.payload.a_type
|
||||
xact_addr_block := cacq.payload.addr_block
|
||||
xact_addr_beat := cacq.payload.addr_beat
|
||||
xact_client_xact_id := cacq.payload.client_xact_id
|
||||
xact_data(UInt(0)) := cacq.payload.data
|
||||
xact_subblock := cacq.payload.subblock
|
||||
xact_src := cacq.header.src
|
||||
collect_inner_data := cacq.payload.hasMultibeatData()
|
||||
probe_flags := probe_initial_flags
|
||||
release_count := PopCount(probe_initial_flags)
|
||||
state := Mux(probe_initial_flags.orR, s_probe,
|
||||
@ -341,9 +379,9 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
}
|
||||
|
||||
// Handle releases, which may have data to be written back
|
||||
io.inner.release.ready := !co.messageHasData(c_rel.payload) || io.outer.acquire.ready
|
||||
io.inner.release.ready := !crel.payload.hasData() || io.outer.acquire.ready
|
||||
when(io.inner.release.valid) {
|
||||
when(co.messageHasData(c_rel.payload)) {
|
||||
when(crel.payload.hasData()) {
|
||||
io.outer.acquire.valid := Bool(true)
|
||||
io.outer.acquire.bits.payload := outer_write_rel
|
||||
when(io.outer.acquire.ready) {
|
||||
@ -364,14 +402,12 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
}
|
||||
}
|
||||
}
|
||||
is(s_mem_read) {
|
||||
is(s_mem_read) { // Read data from outer memory (possibly what was just written)
|
||||
io.outer.acquire.valid := Bool(true)
|
||||
io.outer.acquire.bits.payload := outer_read
|
||||
when(io.outer.acquire.ready) {
|
||||
state := Mux(co.requiresAckForGrant(io.inner.grant.bits.payload), s_busy, s_idle)
|
||||
}
|
||||
when(io.outer.acquire.ready) { state := s_mem_resp }
|
||||
}
|
||||
is(s_mem_write) {
|
||||
is(s_mem_write) { // Write data to outer memory
|
||||
io.outer.acquire.valid := (if(tlDataBeats == 1) Bool(true)
|
||||
else !collect_inner_data || (outer_data_cnt < inner_data_cnt))
|
||||
io.outer.acquire.bits.payload := outer_write_acq
|
||||
@ -379,17 +415,22 @@ class AcquireTracker(trackerId: Int, bankId: Int, innerId: String, outerId: Stri
|
||||
state := Mux(pending_outer_read, s_mem_read, s_make_grant)
|
||||
}
|
||||
}
|
||||
is(s_make_grant) {
|
||||
is(s_make_grant) { // Manufactor a local grant (some kind of permission upgrade)
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(io.inner.grant.ready) {
|
||||
state := Mux(co.requiresAckForGrant(io.inner.grant.bits.payload), s_busy, s_idle)
|
||||
state := Mux(cgnt.payload.requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_busy) { // Nothing left to do but wait for transaction to complete
|
||||
when(io.outer.grant.valid && m_gnt.payload.client_xact_id === UInt(trackerId)) {
|
||||
is(s_mem_resp) { // Wait to forward grants from outer memory
|
||||
when(io.outer.grant.valid && mgnt.payload.client_xact_id === UInt(trackerId)) {
|
||||
io.inner.grant.valid := Bool(true)
|
||||
}
|
||||
when(io.inner.finish.valid && c_ack.payload.manager_xact_id === UInt(trackerId)) {
|
||||
when(cgnt_data_done) {
|
||||
state := Mux(cgnt.payload.requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_ack) { // Wait for transaction to complete
|
||||
when(io.inner.finish.valid && cfin.payload.manager_xact_id === UInt(trackerId)) {
|
||||
state := s_idle
|
||||
}
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ object ZCounter {
|
||||
}
|
||||
}
|
||||
|
||||
class FlowThroughSerializer[T <: HasTileLinkData](gen: LogicalNetworkIO[T], n: Int, doSer: T => Bool) extends Module {
|
||||
class FlowThroughSerializer[T <: HasTileLinkData](gen: LogicalNetworkIO[T], n: Int) extends Module {
|
||||
val io = new Bundle {
|
||||
val in = Decoupled(gen.clone).flip
|
||||
val out = Decoupled(gen.clone)
|
||||
@ -65,12 +65,12 @@ class FlowThroughSerializer[T <: HasTileLinkData](gen: LogicalNetworkIO[T], n: I
|
||||
io.out.valid := active || io.in.valid
|
||||
io.out.bits := io.in.bits
|
||||
when(!active && io.in.valid) {
|
||||
when(doSer(io.in.bits.payload)) {
|
||||
when(io.in.bits.payload.hasData()) {
|
||||
cnt := Mux(io.out.ready, UInt(1), UInt(0))
|
||||
rbits := io.in.bits
|
||||
active := Bool(true)
|
||||
}
|
||||
io.done := !doSer(io.in.bits.payload)
|
||||
io.done := !io.in.bits.payload.hasData()
|
||||
}
|
||||
when(active) {
|
||||
io.out.bits := rbits
|
||||
|
Loading…
Reference in New Issue
Block a user