Refactor the TransactionTracker logic in all the L2 TileLink Managers.
They now share common sub-transactions within traits, and use a common set of state transitions and scoreboarding logic. Tracker allocation logic has also been updated. No changes to external IOs or the TileLink protocol. A new bufferless Broadcast hub is also included, but does not yet pass fuzzing checks.
This commit is contained in:
parent
2d2096e509
commit
16bfbda3c9
@ -4,390 +4,198 @@ package uncore
|
||||
import Chisel._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
case object L2StoreDataQueueDepth extends Field[Int]
|
||||
class L2BroadcastHub(implicit p: Parameters) extends HierarchicalCoherenceAgent()(p) {
|
||||
|
||||
trait HasBroadcastHubParameters extends HasCoherenceAgentParameters {
|
||||
val sdqDepth = p(L2StoreDataQueueDepth)*innerDataBeats
|
||||
val dqIdxBits = math.max(log2Up(nReleaseTransactors) + 1, log2Up(sdqDepth))
|
||||
val nDataQueueLocations = 3 //Stores, VoluntaryWBs, Releases
|
||||
}
|
||||
|
||||
class DataQueueLocation(implicit p: Parameters) extends CoherenceAgentBundle()(p)
|
||||
with HasBroadcastHubParameters {
|
||||
val idx = UInt(width = dqIdxBits)
|
||||
val loc = UInt(width = log2Ceil(nDataQueueLocations))
|
||||
}
|
||||
|
||||
object DataQueueLocation {
|
||||
def apply(idx: UInt, loc: UInt)(implicit p: Parameters) = {
|
||||
val d = Wire(new DataQueueLocation)
|
||||
d.idx := idx
|
||||
d.loc := loc
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
class L2BroadcastHub(implicit p: Parameters) extends ManagerCoherenceAgent()(p)
|
||||
with HasBroadcastHubParameters {
|
||||
val internalDataBits = new DataQueueLocation().getWidth
|
||||
val inStoreQueue :: inVolWBQueue :: inClientReleaseQueue :: Nil = Enum(UInt(), nDataQueueLocations)
|
||||
|
||||
val usingStoreDataQueue = p.alterPartial({
|
||||
case TLKey(`innerTLId`) => innerTLParams.copy(overrideDataBitsPerBeat = Some(internalDataBits))
|
||||
case TLKey(`outerTLId`) => outerTLParams.copy(overrideDataBitsPerBeat = Some(internalDataBits))
|
||||
})
|
||||
|
||||
// Create SHRs for outstanding transactions
|
||||
val trackerList =
|
||||
// Create TSHRs for outstanding transactions
|
||||
val irelTrackerList =
|
||||
(0 until nReleaseTransactors).map(id =>
|
||||
Module(new BroadcastVoluntaryReleaseTracker(id)(usingStoreDataQueue))) ++
|
||||
Module(new BufferedBroadcastVoluntaryReleaseTracker(id)))
|
||||
val iacqTrackerList =
|
||||
(nReleaseTransactors until nTransactors).map(id =>
|
||||
Module(new BroadcastAcquireTracker(id)(usingStoreDataQueue)))
|
||||
Module(new BufferedBroadcastAcquireTracker(id)))
|
||||
val trackerList = irelTrackerList ++ iacqTrackerList
|
||||
|
||||
// Propagate incoherence flags
|
||||
trackerList.map(_.io.incoherent := io.incoherent)
|
||||
trackerList.map(_.io.incoherent) foreach { _ := io.incoherent }
|
||||
|
||||
// Queue to store impending Put data
|
||||
val sdq = Reg(Vec(sdqDepth, io.iacq().data))
|
||||
val sdq_val = Reg(init=Bits(0, sdqDepth))
|
||||
val sdq_alloc_id = PriorityEncoder(~sdq_val)
|
||||
val sdq_rdy = !sdq_val.andR
|
||||
val sdq_enq = trackerList.map( t =>
|
||||
(t.io.alloc.iacq || t.io.matches.iacq) &&
|
||||
t.io.inner.acquire.fire() &&
|
||||
t.io.iacq().hasData()
|
||||
).reduce(_||_)
|
||||
when (sdq_enq) { sdq(sdq_alloc_id) := io.iacq().data }
|
||||
// Create an arbiter for the one memory port
|
||||
val outerList = trackerList.map(_.io.outer)
|
||||
val outer_arb = Module(new ClientTileLinkIOArbiter(outerList.size)
|
||||
(p.alterPartial({ case TLId => p(OuterTLId) })))
|
||||
outer_arb.io.in <> outerList
|
||||
io.outer <> outer_arb.io.out
|
||||
|
||||
// Handle acquire transaction initiation
|
||||
val irel_vs_iacq_conflict =
|
||||
io.inner.acquire.valid &&
|
||||
io.inner.release.valid &&
|
||||
io.irel().conflicts(io.iacq())
|
||||
val sdqLoc = List.fill(nTransactors) {
|
||||
DataQueueLocation(sdq_alloc_id, inStoreQueue).toBits
|
||||
}
|
||||
doInputRoutingWithAllocation(
|
||||
io.inner.acquire,
|
||||
trackerList.map(_.io.inner.acquire),
|
||||
trackerList.map(_.io.matches.iacq),
|
||||
trackerList.map(_.io.alloc.iacq),
|
||||
Some(sdqLoc),
|
||||
Some(sdq_rdy && !irel_vs_iacq_conflict),
|
||||
Some(sdq_rdy))
|
||||
io.inner.acquire.valid &&
|
||||
io.inner.release.valid &&
|
||||
io.irel().conflicts(io.iacq())
|
||||
|
||||
// Queue to store impending Voluntary Release data
|
||||
val voluntary = io.irel().isVoluntary()
|
||||
val vwbdq_enq = io.inner.release.fire() && voluntary && io.irel().hasData()
|
||||
val (rel_data_cnt, rel_data_done) = Counter(vwbdq_enq, innerDataBeats) //TODO Zero width
|
||||
val vwbdq = Reg(Vec(innerDataBeats, io.irel().data)) //TODO Assumes nReleaseTransactors == 1
|
||||
when(vwbdq_enq) { vwbdq(rel_data_cnt) := io.irel().data }
|
||||
doInputRoutingWithAllocation(
|
||||
in = io.inner.acquire,
|
||||
outs = trackerList.map(_.io.inner.acquire),
|
||||
allocs = trackerList.map(_.io.alloc_iacq),
|
||||
allocOverride = !irel_vs_iacq_conflict)
|
||||
|
||||
// Handle releases, which might be voluntary and might have data
|
||||
val vwbqLoc = (0 until nTransactors).map(i =>
|
||||
(DataQueueLocation(rel_data_cnt,
|
||||
(if(i < nReleaseTransactors) inVolWBQueue
|
||||
else inClientReleaseQueue)).toBits))
|
||||
doInputRoutingWithAllocation(
|
||||
io.inner.release,
|
||||
trackerList.map(_.io.inner.release),
|
||||
trackerList.map(_.io.matches.irel),
|
||||
trackerList.map(_.io.alloc.irel),
|
||||
Some(vwbqLoc))
|
||||
in = io.inner.release,
|
||||
outs = trackerList.map(_.io.inner.release),
|
||||
allocs = trackerList.map(_.io.alloc_irel))
|
||||
|
||||
// Wire probe requests and grant reply to clients, finish acks from clients
|
||||
// Note that we bypass the Grant data subbundles
|
||||
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant))
|
||||
io.inner.grant.bits.data := io.outer.grant.bits.data
|
||||
io.inner.grant.bits.addr_beat := io.outer.grant.bits.addr_beat
|
||||
doOutputArbitration(io.inner.probe, trackerList.map(_.io.inner.probe))
|
||||
|
||||
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant))
|
||||
|
||||
doInputRouting(io.inner.finish, trackerList.map(_.io.inner.finish))
|
||||
|
||||
// Create an arbiter for the one memory port
|
||||
val outer_arb = Module(new ClientUncachedTileLinkIOArbiter(trackerList.size)
|
||||
(usingStoreDataQueue.alterPartial({ case TLId => p(OuterTLId) })))
|
||||
outer_arb.io.in <> trackerList.map(_.io.outer)
|
||||
// Get the pending data out of the store data queue
|
||||
val outer_data_ptr = new DataQueueLocation().fromBits(outer_arb.io.out.acquire.bits.data)
|
||||
val is_in_sdq = outer_data_ptr.loc === inStoreQueue
|
||||
val free_sdq = io.outer.acquire.fire() &&
|
||||
io.outer.acquire.bits.hasData() &&
|
||||
outer_data_ptr.loc === inStoreQueue
|
||||
io.outer <> outer_arb.io.out
|
||||
io.outer.acquire.bits.data := MuxLookup(outer_data_ptr.loc, io.irel().data, Array(
|
||||
inStoreQueue -> sdq(outer_data_ptr.idx),
|
||||
inVolWBQueue -> vwbdq(outer_data_ptr.idx)))
|
||||
|
||||
// Update SDQ valid bits
|
||||
when (io.outer.acquire.valid || sdq_enq) {
|
||||
sdq_val := sdq_val & ~(UIntToOH(outer_data_ptr.idx) & Fill(sdqDepth, free_sdq)) |
|
||||
PriorityEncoderOH(~sdq_val(sdqDepth-1,0)) & Fill(sdqDepth, sdq_enq)
|
||||
}
|
||||
}
|
||||
|
||||
class BroadcastXactTracker(implicit p: Parameters) extends XactTracker()(p) {
|
||||
val io = new ManagerXactTrackerIO
|
||||
val io = new HierarchicalXactTrackerIO
|
||||
pinAllReadyValidLow(io)
|
||||
}
|
||||
|
||||
class BroadcastVoluntaryReleaseTracker(trackerId: Int)
|
||||
(implicit p: Parameters) extends BroadcastXactTracker()(p) {
|
||||
val s_idle :: s_busy :: Nil = Enum(UInt(), 2)
|
||||
val state = Reg(init=s_idle)
|
||||
trait BroadcastsToAllClients extends HasCoherenceAgentParameters {
|
||||
val coh = HierarchicalMetadata.onReset
|
||||
val inner_coh = coh.inner
|
||||
val outer_coh = coh.outer
|
||||
def full_representation = ~UInt(0, width = innerNCachingClients)
|
||||
}
|
||||
|
||||
val xact = Reg(new BufferedReleaseFromSrc()(p.alterPartial({ case TLId => innerTLId })))
|
||||
val coh = ManagerMetadata.onReset
|
||||
|
||||
val pending_irels = Reg(init=Bits(0, width = io.inner.tlDataBeats))
|
||||
val pending_writes = Reg(init=Bits(0, width = io.outer.tlDataBeats))
|
||||
val pending_ignt = Reg(init=Bool(false))
|
||||
|
||||
val all_pending_done = !(pending_irels.orR || pending_writes.orR || pending_ignt)
|
||||
|
||||
// Accept a voluntary Release (and any further beats of data)
|
||||
pending_irels := (pending_irels & dropPendingBitWhenBeatHasData(io.inner.release))
|
||||
io.inner.release.ready := ((state === s_idle) && io.irel().isVoluntary()) || pending_irels.orR
|
||||
when(io.inner.release.fire()) { xact.data_buffer(io.irel().addr_beat) := io.irel().data }
|
||||
|
||||
// Write the voluntarily written back data to outer memory using an Acquire.PutBlock
|
||||
//TODO: Use io.outer.release instead?
|
||||
pending_writes := (pending_writes & dropPendingBitWhenBeatHasData(io.outer.acquire)) |
|
||||
addPendingBitWhenBeatHasData(io.inner.release)
|
||||
val curr_write_beat = PriorityEncoder(pending_writes)
|
||||
io.outer.acquire.valid := state === s_busy && pending_writes.orR
|
||||
io.outer.acquire.bits := PutBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact.addr_block,
|
||||
addr_beat = curr_write_beat,
|
||||
data = xact.data_buffer(curr_write_beat))
|
||||
(p.alterPartial({ case TLId => outerTLId }))
|
||||
|
||||
// Send an acknowledgement
|
||||
io.inner.grant.valid := state === s_busy && pending_ignt && !pending_irels && io.outer.grant.valid
|
||||
io.inner.grant.bits := coh.makeGrant(xact)
|
||||
when(io.inner.grant.fire()) { pending_ignt := Bool(false) }
|
||||
io.outer.grant.ready := state === s_busy && io.inner.grant.ready
|
||||
|
||||
// State machine updates and transaction handler metadata intialization
|
||||
when(state === s_idle && io.inner.release.valid && io.alloc.irel) {
|
||||
xact := io.irel()
|
||||
when(io.irel().hasMultibeatData()) {
|
||||
pending_irels := dropPendingBitWhenBeatHasData(io.inner.release)
|
||||
}. otherwise {
|
||||
pending_irels := UInt(0)
|
||||
}
|
||||
pending_writes := addPendingBitWhenBeatHasData(io.inner.release)
|
||||
pending_ignt := io.irel().requiresAck()
|
||||
state := s_busy
|
||||
}
|
||||
when(state === s_busy && all_pending_done) { state := s_idle }
|
||||
|
||||
// These IOs are used for routing in the parent
|
||||
io.matches.iacq := (state =/= s_idle) && xact.conflicts(io.iacq())
|
||||
io.matches.irel := (state =/= s_idle) && xact.conflicts(io.irel()) && io.irel().isVoluntary()
|
||||
io.matches.oprb := Bool(false)
|
||||
abstract class BroadcastVoluntaryReleaseTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends VoluntaryReleaseTracker(trackerId)(p)
|
||||
with EmitsVoluntaryReleases
|
||||
with BroadcastsToAllClients {
|
||||
val io = new HierarchicalXactTrackerIO
|
||||
pinAllReadyValidLow(io)
|
||||
|
||||
// Checks for illegal behavior
|
||||
assert(!(state === s_idle && io.inner.release.fire() && !io.irel().isVoluntary()),
|
||||
assert(!(state === s_idle && io.inner.release.fire() && io.alloc_irel.should && !io.irel().isVoluntary()),
|
||||
"VoluntaryReleaseTracker accepted Release that wasn't voluntary!")
|
||||
}
|
||||
|
||||
class BroadcastAcquireTracker(trackerId: Int)
|
||||
(implicit p: Parameters) extends BroadcastXactTracker()(p) {
|
||||
val s_idle :: s_probe :: s_mem_read :: s_mem_write :: s_make_grant :: s_mem_resp :: s_ack :: Nil = Enum(UInt(), 7)
|
||||
val state = Reg(init=s_idle)
|
||||
abstract class BroadcastAcquireTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends AcquireTracker(trackerId)(p)
|
||||
with EmitsVoluntaryReleases
|
||||
with BroadcastsToAllClients {
|
||||
val io = new HierarchicalXactTrackerIO
|
||||
pinAllReadyValidLow(io)
|
||||
|
||||
val xact = Reg(new BufferedAcquireFromSrc()(p.alterPartial({ case TLId => innerTLId })))
|
||||
val coh = ManagerMetadata.onReset
|
||||
val alwaysWriteFullBeat = false
|
||||
val nSecondaryMisses = 1
|
||||
def iacq_can_merge = Bool(false)
|
||||
|
||||
assert(!(state =/= s_idle && xact.isBuiltInType() &&
|
||||
Vec(Acquire.putAtomicType, Acquire.getPrefetchType, Acquire.putPrefetchType).contains(xact.a_type)),
|
||||
"Broadcast Hub does not support PutAtomics or prefetches") // TODO
|
||||
|
||||
val release_count = Reg(init=UInt(0, width = log2Up(io.inner.tlNCachingClients+1)))
|
||||
val pending_probes = Reg(init=Bits(0, width = io.inner.tlNCachingClients))
|
||||
val curr_p_id = PriorityEncoder(pending_probes)
|
||||
val mask_self = SInt(-1, width = io.inner.tlNCachingClients)
|
||||
.toUInt
|
||||
.bitSet(io.inner.acquire.bits.client_id, io.inner.acquire.bits.requiresSelfProbe())
|
||||
val mask_incoherent = mask_self & ~io.incoherent.toBits
|
||||
|
||||
val collect_iacq_data = Reg(init=Bool(false))
|
||||
val iacq_data_valid = Reg(init=Bits(0, width = innerDataBeats))
|
||||
val iacq_data_done = connectIncomingDataBeatCounter(io.inner.acquire)
|
||||
val irel_data_done = connectIncomingDataBeatCounter(io.inner.release)
|
||||
val (ignt_data_cnt, ignt_data_done) = connectOutgoingDataBeatCounter(io.inner.grant)
|
||||
val (oacq_data_cnt, oacq_data_done) = connectOutgoingDataBeatCounter(io.outer.acquire)
|
||||
val ognt_data_done = connectIncomingDataBeatCounter(io.outer.grant)
|
||||
val pending_ognt_ack = Reg(init=Bool(false))
|
||||
val pending_outer_write = xact.hasData()
|
||||
val pending_outer_write_ = io.iacq().hasData()
|
||||
val pending_outer_read = io.ignt().hasData()
|
||||
val pending_outer_read_ = coh.makeGrant(io.iacq(), UInt(trackerId)).hasData()
|
||||
val subblock_type = xact.isSubBlockType()
|
||||
|
||||
// These IOs are used for routing in the parent
|
||||
io.matches.iacq := (state =/= s_idle) && xact.conflicts(io.iacq())
|
||||
io.matches.irel := (state =/= s_idle) && xact.conflicts(io.irel()) && !io.irel().isVoluntary()
|
||||
io.matches.oprb := Bool(false)
|
||||
|
||||
val outerParams = p.alterPartial({ case TLId => outerTLId })
|
||||
|
||||
val oacq_probe = PutBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = io.irel().addr_block,
|
||||
addr_beat = io.irel().addr_beat,
|
||||
data = io.irel().data)(outerParams)
|
||||
|
||||
val oacq_write_beat = Put(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact.addr_block,
|
||||
addr_beat = xact.addr_beat,
|
||||
data = xact.data_buffer(0),
|
||||
wmask = xact.wmask())(outerParams)
|
||||
|
||||
val oacq_write_block = PutBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact.addr_block,
|
||||
addr_beat = oacq_data_cnt,
|
||||
data = xact.data_buffer(oacq_data_cnt),
|
||||
wmask = xact.wmask_buffer(oacq_data_cnt))(outerParams)
|
||||
|
||||
val oacq_read_beat = Get(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact.addr_block,
|
||||
addr_beat = xact.addr_beat,
|
||||
addr_byte = xact.addr_byte(),
|
||||
operand_size = xact.op_size(),
|
||||
alloc = Bool(false))(outerParams)
|
||||
|
||||
val oacq_read_block = GetBlock(
|
||||
client_xact_id = UInt(trackerId),
|
||||
addr_block = xact.addr_block)(outerParams)
|
||||
|
||||
io.outer.acquire.valid := Bool(false)
|
||||
io.outer.acquire.bits := Mux(state === s_probe, oacq_probe,
|
||||
Mux(state === s_mem_write,
|
||||
Mux(subblock_type, oacq_write_beat, oacq_write_block),
|
||||
Mux(subblock_type, oacq_read_beat, oacq_read_block)))
|
||||
io.outer.grant.ready := Bool(false)
|
||||
|
||||
io.inner.probe.valid := Bool(false)
|
||||
io.inner.probe.bits := coh.makeProbe(curr_p_id, xact)
|
||||
|
||||
io.inner.grant.valid := Bool(false)
|
||||
io.inner.grant.bits := coh.makeGrant(xact, UInt(trackerId)) // Data bypassed in parent
|
||||
|
||||
io.inner.acquire.ready := Bool(false)
|
||||
io.inner.release.ready := Bool(false)
|
||||
io.inner.finish.ready := Bool(false)
|
||||
|
||||
assert(!(state =/= s_idle && collect_iacq_data && io.inner.acquire.fire() &&
|
||||
io.iacq().client_id =/= xact.client_id),
|
||||
"AcquireTracker accepted data beat from different network source than initial request.")
|
||||
|
||||
assert(!(state =/= s_idle && collect_iacq_data && io.inner.acquire.fire() &&
|
||||
io.iacq().client_xact_id =/= xact.client_xact_id),
|
||||
"AcquireTracker accepted data beat from different client transaction than initial request.")
|
||||
|
||||
assert(!(state === s_idle && io.inner.acquire.fire() && io.alloc.iacq &&
|
||||
io.iacq().hasMultibeatData() && io.iacq().addr_beat =/= UInt(0)),
|
||||
// Checks for illegal behavior
|
||||
// TODO: this could be allowed, but is a useful check against allocation gone wild
|
||||
assert(!(state === s_idle && io.inner.acquire.fire() && io.alloc_iacq.should &&
|
||||
io.iacq().hasMultibeatData() && !io.iacq().first()),
|
||||
"AcquireTracker initialized with a tail data beat.")
|
||||
|
||||
when(collect_iacq_data) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
when(io.inner.acquire.valid) {
|
||||
xact.data_buffer(io.iacq().addr_beat) := io.iacq().data
|
||||
xact.wmask_buffer(io.iacq().addr_beat) := io.iacq().wmask()
|
||||
iacq_data_valid := iacq_data_valid.bitSet(io.iacq().addr_beat, Bool(true))
|
||||
}
|
||||
when(iacq_data_done) { collect_iacq_data := Bool(false) }
|
||||
}
|
||||
assert(!(state =/= s_idle && pending_ignt && xact_iacq.isPrefetch()),
|
||||
"Broadcast Hub does not support Prefetches.")
|
||||
|
||||
assert(!(state =/= s_idle && pending_ignt && xact_iacq.isAtomic()),
|
||||
"Broadcast Hub does not support PutAtomics.")
|
||||
}
|
||||
|
||||
class BufferedBroadcastVoluntaryReleaseTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends BroadcastVoluntaryReleaseTracker(trackerId)(p)
|
||||
with HasDataBuffer {
|
||||
|
||||
// Tell the parent if any incoming messages conflict with the ongoing transaction
|
||||
routeInParent()
|
||||
io.alloc_iacq.can := Bool(false)
|
||||
|
||||
// Start transaction by accepting inner release
|
||||
innerRelease(block_vol_ignt = pending_orel || vol_ognt_counter.pending)
|
||||
|
||||
// A release beat can be accepted if we are idle, if its a mergeable transaction, or if its a tail beat
|
||||
io.inner.release.ready := state === s_idle || irel_can_merge || irel_same_xact
|
||||
|
||||
when(irel_is_allocating) { pending_orel := io.irel().hasData() }
|
||||
|
||||
when(io.inner.release.fire()) { data_buffer(io.irel().addr_beat) := io.irel().data }
|
||||
|
||||
// Dispatch outer release
|
||||
outerRelease(
|
||||
coh = outer_coh.onHit(M_XWR),
|
||||
data = data_buffer(vol_ognt_counter.up.idx))
|
||||
|
||||
quiesce()
|
||||
}
|
||||
|
||||
class BufferedBroadcastAcquireTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends BroadcastAcquireTracker(trackerId)(p)
|
||||
with HasByteWriteMaskBuffer {
|
||||
|
||||
// Setup IOs used for routing in the parent
|
||||
routeInParent()
|
||||
io.alloc_irel.can := Bool(false)
|
||||
|
||||
// First, take care of accpeting new acquires or secondary misses
|
||||
// Handling of primary and secondary misses' data and write mask merging
|
||||
innerAcquire(
|
||||
can_alloc = Bool(false),
|
||||
next = s_inner_probe)
|
||||
|
||||
io.inner.acquire.ready := state === s_idle || iacq_can_merge || iacq_same_xact
|
||||
|
||||
// Track which clients yet need to be probed and make Probe message
|
||||
// If a writeback occurs, we can forward its data via the buffer,
|
||||
// and skip having to go outwards
|
||||
val skip_outer_acquire = pending_ignt_data.andR
|
||||
|
||||
innerProbe(
|
||||
inner_coh.makeProbe(curr_probe_dst, xact_iacq, xact_addr_block),
|
||||
Mux(!skip_outer_acquire, s_outer_acquire, s_busy))
|
||||
|
||||
// Handle incoming releases from clients, which may reduce sharer counts
|
||||
// and/or write back dirty data, and may be unexpected voluntary releases
|
||||
def irel_can_merge = io.irel().conflicts(xact_addr_block) &&
|
||||
io.irel().isVoluntary() &&
|
||||
!Vec(s_idle, s_meta_write).contains(state) &&
|
||||
!all_pending_done &&
|
||||
!io.outer.grant.fire() &&
|
||||
!io.inner.grant.fire() &&
|
||||
!vol_ignt_counter.pending
|
||||
|
||||
innerRelease(block_vol_ignt = vol_ognt_counter.pending)
|
||||
|
||||
//TODO: accept vol irels when state === s_idle, operate like the VolRelTracker
|
||||
io.inner.release.ready := irel_can_merge || irel_same_xact
|
||||
|
||||
mergeDataInner(io.inner.release)
|
||||
|
||||
// If there was a writeback, forward it outwards
|
||||
outerRelease(
|
||||
coh = outer_coh.onHit(M_XWR),
|
||||
data = data_buffer(vol_ognt_counter.up.idx))
|
||||
|
||||
// Send outer request for miss
|
||||
outerAcquire(
|
||||
caching = !xact_iacq.isBuiltInType(),
|
||||
coh = outer_coh,
|
||||
data = data_buffer(ognt_counter.up.idx),
|
||||
wmask = wmask_buffer(ognt_counter.up.idx),
|
||||
next = s_busy)
|
||||
|
||||
// Handle the response from outer memory
|
||||
mergeDataOuter(io.outer.grant)
|
||||
|
||||
// Acknowledge or respond with data
|
||||
innerGrant(
|
||||
data = data_buffer(ignt_data_idx),
|
||||
external_pending = pending_orel || ognt_counter.pending || vol_ognt_counter.pending)
|
||||
|
||||
when(iacq_is_allocating) {
|
||||
wmask_buffer.foreach { w => w := UInt(0) } // This is the only reg that must be clear in s_idle
|
||||
initializeProbes()
|
||||
}
|
||||
|
||||
initDataInner(io.inner.acquire)
|
||||
|
||||
// Wait for everything to quiesce
|
||||
quiesce()
|
||||
|
||||
when(pending_ognt_ack) {
|
||||
io.outer.grant.ready := Bool(true)
|
||||
when(io.outer.grant.valid) { pending_ognt_ack := Bool(false) }
|
||||
//TODO add finish queue if this isnt the last level manager
|
||||
}
|
||||
|
||||
switch (state) {
|
||||
is(s_idle) {
|
||||
io.inner.acquire.ready := Bool(true)
|
||||
when(io.inner.acquire.valid && io.alloc.iacq) {
|
||||
xact := io.iacq()
|
||||
xact.data_buffer(UInt(0)) := io.iacq().data
|
||||
xact.wmask_buffer(UInt(0)) := io.iacq().wmask()
|
||||
collect_iacq_data := io.iacq().hasMultibeatData()
|
||||
iacq_data_valid := io.iacq().hasData() << io.iacq().addr_beat
|
||||
val needs_probes = mask_incoherent.orR
|
||||
when(needs_probes) {
|
||||
pending_probes := mask_incoherent
|
||||
release_count := PopCount(mask_incoherent)
|
||||
}
|
||||
state := Mux(needs_probes, s_probe,
|
||||
Mux(pending_outer_write_, s_mem_write,
|
||||
Mux(pending_outer_read_, s_mem_read, s_make_grant)))
|
||||
}
|
||||
}
|
||||
is(s_probe) {
|
||||
// Generate probes
|
||||
io.inner.probe.valid := pending_probes.orR
|
||||
when(io.inner.probe.ready) {
|
||||
pending_probes := pending_probes & ~UIntToOH(curr_p_id)
|
||||
}
|
||||
|
||||
// Handle releases, which may have data to be written back
|
||||
val matches = io.matches.irel
|
||||
io.inner.release.ready := (!io.irel().hasData() || io.outer.acquire.ready) && matches
|
||||
when(io.inner.release.valid && matches) {
|
||||
when(io.irel().hasData()) {
|
||||
io.outer.acquire.valid := Bool(true)
|
||||
when(io.outer.acquire.ready) {
|
||||
when(oacq_data_done) {
|
||||
pending_ognt_ack := Bool(true)
|
||||
release_count := release_count - UInt(1)
|
||||
when(release_count === UInt(1)) {
|
||||
state := Mux(pending_outer_write, s_mem_write,
|
||||
Mux(pending_outer_read, s_mem_read, s_make_grant))
|
||||
}
|
||||
}
|
||||
}
|
||||
} .otherwise {
|
||||
release_count := release_count - UInt(1)
|
||||
when(release_count === UInt(1)) {
|
||||
state := Mux(pending_outer_write, s_mem_write,
|
||||
Mux(pending_outer_read, s_mem_read, s_make_grant))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
is(s_mem_write) { // Write data to outer memory
|
||||
io.outer.acquire.valid := !pending_ognt_ack && (!collect_iacq_data || iacq_data_valid(oacq_data_cnt))
|
||||
when(oacq_data_done) {
|
||||
pending_ognt_ack := Bool(true)
|
||||
state := Mux(pending_outer_read, s_mem_read, s_mem_resp)
|
||||
}
|
||||
}
|
||||
is(s_mem_read) { // Read data from outer memory (possibly what was just written)
|
||||
io.outer.acquire.valid := !pending_ognt_ack
|
||||
when(io.outer.acquire.fire()) { state := s_mem_resp }
|
||||
}
|
||||
is(s_mem_resp) { // Wait to forward grants from outer memory
|
||||
io.outer.grant.ready := io.inner.grant.ready
|
||||
io.inner.grant.valid := io.outer.grant.valid
|
||||
when(ignt_data_done) {
|
||||
state := Mux(io.ignt().requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_make_grant) { // Manufacture a local grant (some kind of permission upgrade)
|
||||
io.inner.grant.valid := Bool(true)
|
||||
when(io.inner.grant.ready) {
|
||||
state := Mux(io.ignt().requiresAck(), s_ack, s_idle)
|
||||
}
|
||||
}
|
||||
is(s_ack) { // Wait for transaction to complete
|
||||
io.inner.finish.ready := Bool(true)
|
||||
when(io.inner.finish.valid) { state := s_idle }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
141
uncore/src/main/scala/bufferless.scala
Normal file
141
uncore/src/main/scala/bufferless.scala
Normal file
@ -0,0 +1,141 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore
|
||||
import Chisel._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
|
||||
class BufferlessBroadcastHub(implicit p: Parameters) extends HierarchicalCoherenceAgent()(p) {
|
||||
|
||||
// Create TSHRs for outstanding transactions
|
||||
val irelTrackerList =
|
||||
(0 until nReleaseTransactors).map(id =>
|
||||
Module(new BufferlessBroadcastVoluntaryReleaseTracker(id)))
|
||||
val iacqTrackerList =
|
||||
(nReleaseTransactors until nTransactors).map(id =>
|
||||
Module(new BufferlessBroadcastAcquireTracker(id)))
|
||||
val trackerList = irelTrackerList ++ iacqTrackerList
|
||||
|
||||
// Propagate incoherence flags
|
||||
trackerList.map(_.io.incoherent) foreach { _ := io.incoherent }
|
||||
|
||||
// Create an arbiter for the one memory port
|
||||
val outerList = trackerList.map(_.io.outer)
|
||||
val outer_arb = Module(new ClientTileLinkIOArbiter(outerList.size)
|
||||
(p.alterPartial({ case TLId => p(OuterTLId) })))
|
||||
outer_arb.io.in <> outerList
|
||||
io.outer <> outer_arb.io.out
|
||||
|
||||
// Handle acquire transaction initiation
|
||||
val irel_vs_iacq_conflict =
|
||||
io.inner.acquire.valid &&
|
||||
io.inner.release.valid &&
|
||||
io.irel().conflicts(io.iacq())
|
||||
|
||||
doInputRoutingWithAllocation(
|
||||
in = io.inner.acquire,
|
||||
outs = trackerList.map(_.io.inner.acquire),
|
||||
allocs = trackerList.map(_.io.alloc_iacq),
|
||||
allocOverride = !irel_vs_iacq_conflict)
|
||||
io.outer.acquire.bits.data := io.inner.acquire.bits.data
|
||||
io.outer.acquire.bits.addr_beat := io.inner.acquire.bits.addr_beat
|
||||
|
||||
// Handle releases, which might be voluntary and might have data
|
||||
doInputRoutingWithAllocation(
|
||||
in = io.inner.release,
|
||||
outs = trackerList.map(_.io.inner.release),
|
||||
allocs = trackerList.map(_.io.alloc_irel))
|
||||
io.outer.release.bits.data := io.inner.release.bits.data
|
||||
io.outer.release.bits.addr_beat := io.inner.release.bits.addr_beat
|
||||
|
||||
// Wire probe requests and grant reply to clients, finish acks from clients
|
||||
doOutputArbitration(io.inner.probe, trackerList.map(_.io.inner.probe))
|
||||
|
||||
doOutputArbitration(io.inner.grant, trackerList.map(_.io.inner.grant))
|
||||
io.inner.grant.bits.data := io.outer.grant.bits.data
|
||||
io.inner.grant.bits.addr_beat := io.outer.grant.bits.addr_beat
|
||||
|
||||
doInputRouting(io.inner.finish, trackerList.map(_.io.inner.finish))
|
||||
}
|
||||
|
||||
class BufferlessBroadcastVoluntaryReleaseTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends BroadcastVoluntaryReleaseTracker(trackerId)(p) {
|
||||
|
||||
// Tell the parent if any incoming messages conflict with the ongoing transaction
|
||||
routeInParent()
|
||||
io.alloc_iacq.can := Bool(false)
|
||||
|
||||
// Start transaction by accepting inner release
|
||||
innerRelease(block_vol_ignt = pending_orel || vol_ognt_counter.pending)
|
||||
|
||||
// A release beat can be accepted if we are idle, if its a mergeable transaction, or if its a tail beat
|
||||
// and if the outer relase path is clear
|
||||
val irel_could_accept = state === s_idle || irel_can_merge || irel_same_xact
|
||||
io.inner.release.ready := irel_could_accept &&
|
||||
(!io.irel().hasData() || io.outer.release.ready)
|
||||
|
||||
// Dispatch outer release
|
||||
outerRelease(coh = outer_coh.onHit(M_XWR))
|
||||
io.outer.grant.ready := state === s_busy && io.inner.grant.ready // bypass data
|
||||
|
||||
quiesce()
|
||||
}
|
||||
|
||||
class BufferlessBroadcastAcquireTracker(trackerId: Int)(implicit p: Parameters)
|
||||
extends BroadcastAcquireTracker(trackerId)(p) {
|
||||
|
||||
// Setup IOs used for routing in the parent
|
||||
routeInParent()
|
||||
io.alloc_irel.can := Bool(false)
|
||||
|
||||
// First, take care of accpeting new acquires or secondary misses
|
||||
// Handling of primary and secondary misses' data and write mask merging
|
||||
innerAcquire(
|
||||
can_alloc = Bool(false),
|
||||
next = s_inner_probe)
|
||||
|
||||
val iacq_could_accept = state === s_outer_acquire || iacq_can_merge || iacq_same_xact
|
||||
io.inner.acquire.ready := iacq_could_accept &&
|
||||
(!io.iacq().hasData() || io.outer.acquire.fire())
|
||||
|
||||
// Track which clients yet need to be probed and make Probe message
|
||||
innerProbe(
|
||||
inner_coh.makeProbe(curr_probe_dst, xact_iacq, xact_addr_block),
|
||||
s_outer_acquire)
|
||||
|
||||
// Handle incoming releases from clients, which may reduce sharer counts
|
||||
// and/or write back dirty data, and may be unexpected voluntary releases
|
||||
def irel_can_merge = io.irel().conflicts(xact_addr_block) &&
|
||||
io.irel().isVoluntary() &&
|
||||
!vol_ignt_counter.pending &&
|
||||
(state =/= s_idle)
|
||||
|
||||
innerRelease(block_vol_ignt = vol_ognt_counter.pending)
|
||||
|
||||
val irel_could_accept = irel_can_merge || irel_same_xact
|
||||
io.inner.release.ready := irel_could_accept &&
|
||||
(!io.irel().hasData() || io.outer.release.ready)
|
||||
|
||||
// If there was a writeback, forward it outwards
|
||||
outerRelease(
|
||||
coh = outer_coh.onHit(M_XWR),
|
||||
buffering = Bool(false))
|
||||
|
||||
// Send outer request for miss
|
||||
outerAcquire(
|
||||
caching = !xact_iacq.isBuiltInType(),
|
||||
buffering = Bool(false),
|
||||
coh = outer_coh,
|
||||
next = s_busy)
|
||||
|
||||
// Handle the response from outer memory
|
||||
io.outer.grant.ready := state === s_busy && io.inner.grant.ready // bypass data
|
||||
|
||||
// Acknowledge or respond with data
|
||||
innerGrant(external_pending = pending_orel || ognt_counter.pending || vol_ognt_counter.pending)
|
||||
|
||||
when(iacq_is_allocating) { initializeProbes() }
|
||||
|
||||
// Wait for everything to quiesce
|
||||
quiesce()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -227,6 +227,17 @@ object ManagerTileLinkHeaderCreator {
|
||||
}
|
||||
}
|
||||
|
||||
class BeatCounterStatus extends Bundle {
|
||||
val idx = UInt()
|
||||
val done = Bool()
|
||||
}
|
||||
|
||||
class TwoWayBeatCounterStatus extends Bundle {
|
||||
val pending = Bool()
|
||||
val up = new BeatCounterStatus()
|
||||
val down = new BeatCounterStatus()
|
||||
}
|
||||
|
||||
/** Utility trait containing wiring functions to keep track of how many data beats have
|
||||
* been sent or recieved over a particular [[uncore.TileLinkChannel]] or pair of channels.
|
||||
*
|
||||
@ -283,27 +294,31 @@ trait HasDataBeatCounters {
|
||||
/** Provides counters on two channels, as well a meta-counter that tracks how many
|
||||
* messages have been sent over the up channel but not yet responded to over the down channel
|
||||
*
|
||||
* @param max max number of outstanding ups with no down
|
||||
* @param status bundle of status of the counters
|
||||
* @param up outgoing channel
|
||||
* @param down incoming channel
|
||||
* @param max max number of outstanding ups with no down
|
||||
* @param beat overrides cnts on single-beat messages
|
||||
* @param track whether up's message should be tracked
|
||||
* @return a tuple containing whether their are outstanding messages, up's count,
|
||||
* up's done, down's count, down's done
|
||||
*/
|
||||
def connectTwoWayBeatCounter[T <: TileLinkChannel, S <: TileLinkChannel](
|
||||
max: Int,
|
||||
def connectTwoWayBeatCounters[T <: TileLinkChannel, S <: TileLinkChannel](
|
||||
status: TwoWayBeatCounterStatus,
|
||||
up: DecoupledIO[T],
|
||||
down: DecoupledIO[S],
|
||||
max: Int = 1,
|
||||
beat: UInt = UInt(0),
|
||||
trackUp: T => Bool = (t: T) => Bool(true),
|
||||
trackDown: S => Bool = (s: S) => Bool(true)): (Bool, UInt, Bool, UInt, Bool) = {
|
||||
val (up_idx, up_done) = connectDataBeatCounter(up.fire(), up.bits, beat)
|
||||
val (down_idx, down_done) = connectDataBeatCounter(down.fire(), down.bits, beat)
|
||||
val do_inc = up_done && trackUp(up.bits)
|
||||
val do_dec = down_done && trackDown(down.bits)
|
||||
val cnt = TwoWayCounter(do_inc, do_dec, max)
|
||||
(cnt > UInt(0), up_idx, up_done, down_idx, down_done)
|
||||
trackDown: S => Bool = (s: S) => Bool(true)) {
|
||||
val (up_idx, up_done) = connectDataBeatCounter(up.fire() && trackUp(up.bits), up.bits, beat)
|
||||
val (dn_idx, dn_done) = connectDataBeatCounter(down.fire() && trackDown(down.bits), down.bits, beat)
|
||||
val cnt = TwoWayCounter(up_done, dn_done, max)
|
||||
status.pending := cnt > UInt(0)
|
||||
status.up.idx := up_idx
|
||||
status.up.done := up_done
|
||||
status.down.idx := dn_idx
|
||||
status.down.done := dn_done
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,9 +406,10 @@ class ClientTileLinkIOUnwrapper(implicit p: Parameters) extends TLModule()(p) {
|
||||
addr_beat = ognt.addr_beat,
|
||||
data = ognt.data)
|
||||
|
||||
assert(!io.in.release.valid || io.in.release.bits.isVoluntary(), "Unwrapper can only process voluntary releases.")
|
||||
val rel_grant = Grant(
|
||||
is_builtin_type = Bool(true),
|
||||
g_type = Mux(gnt_voluntary, Grant.voluntaryAckType, ognt.g_type),
|
||||
g_type = Grant.voluntaryAckType, // We should only every be working with voluntary releases
|
||||
client_xact_id = ognt.client_xact_id,
|
||||
manager_xact_id = ognt.manager_xact_id,
|
||||
addr_beat = ognt.addr_beat,
|
||||
|
@ -47,13 +47,14 @@ class ClientMetadata(implicit p: Parameters) extends CoherenceMetadata()(p) {
|
||||
def makeAcquire(
|
||||
op_code: UInt,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt): Acquire =
|
||||
addr_block: UInt): Acquire = {
|
||||
Acquire(
|
||||
is_builtin_type = Bool(false),
|
||||
a_type = co.getAcquireType(op_code, this),
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
union = Cat(op_code, Bool(true)))(p)
|
||||
}
|
||||
|
||||
/** Constructs a Release message based on this metadata on cache control op
|
||||
*
|
||||
|
118
uncore/src/main/scala/sdq.scala
Normal file
118
uncore/src/main/scala/sdq.scala
Normal file
@ -0,0 +1,118 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore
|
||||
import Chisel._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
case object L2StoreDataQueueDepth extends Field[Int]
|
||||
|
||||
trait HasStoreDataQueueParameters extends HasCoherenceAgentParameters {
|
||||
val sdqDepth = p(L2StoreDataQueueDepth)*innerDataBeats
|
||||
val dqIdxBits = math.max(log2Up(nReleaseTransactors) + 1, log2Up(sdqDepth))
|
||||
val nDataQueueLocations = 3 //Stores, VoluntaryWBs, Releases
|
||||
}
|
||||
|
||||
class DataQueueLocation(implicit p: Parameters) extends CoherenceAgentBundle()(p)
|
||||
with HasStoreDataQueueParameters {
|
||||
val idx = UInt(width = dqIdxBits)
|
||||
val loc = UInt(width = log2Ceil(nDataQueueLocations))
|
||||
}
|
||||
|
||||
object DataQueueLocation {
|
||||
def apply(idx: UInt, loc: UInt)(implicit p: Parameters) = {
|
||||
val d = Wire(new DataQueueLocation)
|
||||
d.idx := idx
|
||||
d.loc := loc
|
||||
d
|
||||
}
|
||||
}
|
||||
|
||||
trait HasStoreDataQueue extends HasStoreDataQueueParameters {
|
||||
val io: HierarchicalTLIO
|
||||
val trackerIOsList: Seq[HierarchicalXactTrackerIO]
|
||||
|
||||
val internalDataBits = new DataQueueLocation().getWidth
|
||||
val inStoreQueue :: inVolWBQueue :: inClientReleaseQueue :: Nil = Enum(UInt(), nDataQueueLocations)
|
||||
|
||||
val usingStoreDataQueue = p.alterPartial({
|
||||
case TLKey(`innerTLId`) => innerTLParams.copy(overrideDataBitsPerBeat = Some(internalDataBits))
|
||||
case TLKey(`outerTLId`) => outerTLParams.copy(overrideDataBitsPerBeat = Some(internalDataBits))
|
||||
})
|
||||
|
||||
// Queue to store impending Put data
|
||||
lazy val sdq = Reg(Vec(sdqDepth, io.iacq().data))
|
||||
lazy val sdq_val = Reg(init=Bits(0, sdqDepth))
|
||||
lazy val sdq_alloc_id = PriorityEncoder(~sdq_val)
|
||||
lazy val sdq_rdy = !sdq_val.andR
|
||||
lazy val sdq_enq = trackerIOsList.map( t =>
|
||||
(t.alloc_iacq.should || t.alloc_iacq.matches) &&
|
||||
t.inner.acquire.fire() &&
|
||||
t.iacq().hasData()
|
||||
).reduce(_||_)
|
||||
|
||||
lazy val sdqLoc = List.fill(nTransactors) {
|
||||
DataQueueLocation(sdq_alloc_id, inStoreQueue).toBits
|
||||
}
|
||||
|
||||
/*
|
||||
doInputRoutingWithAllocation(
|
||||
in = io.inner.acquire,
|
||||
outs = trackerList.map(_.io.inner.acquire),
|
||||
allocs = trackerList.map(_.io.alloc._iacq),
|
||||
dataOverride = Some(sdqLoc),
|
||||
allocOverride = Some(sdq_rdy && !irel_vs_iacq_conflict))
|
||||
*/
|
||||
|
||||
// Queue to store impending Voluntary Release data
|
||||
lazy val voluntary = io.irel().isVoluntary()
|
||||
lazy val vwbdq_enq = io.inner.release.fire() && voluntary && io.irel().hasData()
|
||||
lazy val (rel_data_cnt, rel_data_done) = Counter(vwbdq_enq, innerDataBeats) //TODO Zero width
|
||||
lazy val vwbdq = Reg(Vec(innerDataBeats, io.irel().data)) //TODO Assumes nReleaseTransactors == 1
|
||||
|
||||
|
||||
lazy val vwbqLoc = (0 until nTransactors).map(i =>
|
||||
(DataQueueLocation(rel_data_cnt,
|
||||
(if(i < nReleaseTransactors) inVolWBQueue
|
||||
else inClientReleaseQueue)).toBits))
|
||||
/*
|
||||
doInputRoutingWithAllocation(
|
||||
io.inner.release,
|
||||
trackerList.map(_.io.inner.release),
|
||||
trackerList.map(_.io.matches.irel),
|
||||
trackerList.map(_.io.alloc.irel),
|
||||
Some(vwbqLoc))
|
||||
*/
|
||||
|
||||
val outer_arb: ClientTileLinkIOArbiter
|
||||
lazy val outer_data_ptr = new DataQueueLocation().fromBits(outer_arb.io.out.acquire.bits.data)
|
||||
/*
|
||||
val outer_arb = Module(new ClientTileLinkIOArbiter(trackerList.size)
|
||||
(usingStoreDataQueue.alterPartial({ case TLId => p(OuterTLId) })))
|
||||
outer_arb.io.in <> trackerList
|
||||
*/
|
||||
// Get the pending data out of the store data queue
|
||||
lazy val is_in_sdq = outer_data_ptr.loc === inStoreQueue
|
||||
lazy val free_sdq = io.outer.acquire.fire() &&
|
||||
io.outer.acquire.bits.hasData() &&
|
||||
outer_data_ptr.loc === inStoreQueue
|
||||
/*
|
||||
io.outer <> outer_arb.io.out
|
||||
io.outer.acquire.bits.data := MuxLookup(outer_data_ptr.loc, io.irel().data, Array(
|
||||
inStoreQueue -> sdq(outer_data_ptr.idx),
|
||||
inVolWBQueue -> vwbdq(outer_data_ptr.idx)))
|
||||
*/
|
||||
|
||||
// Enqueue SDQ data
|
||||
def sdqEnqueue() {
|
||||
when (sdq_enq) { sdq(sdq_alloc_id) := io.iacq().data }
|
||||
when(vwbdq_enq) { vwbdq(rel_data_cnt) := io.irel().data }
|
||||
}
|
||||
|
||||
// Update SDQ valid bits
|
||||
def sdqUpdate() {
|
||||
when (io.outer.acquire.valid || sdq_enq) {
|
||||
sdq_val := sdq_val & ~(UIntToOH(outer_data_ptr.idx) & Fill(sdqDepth, free_sdq)) |
|
||||
PriorityEncoderOH(~sdq_val(sdqDepth-1,0)) & Fill(sdqDepth, sdq_enq)
|
||||
}
|
||||
}
|
||||
}
|
@ -132,8 +132,8 @@ trait HasTileLinkData extends HasTileLinkBeatId {
|
||||
|
||||
def hasData(dummy: Int = 0): Bool
|
||||
def hasMultibeatData(dummy: Int = 0): Bool
|
||||
def first(dummy: Int = 0): Bool = Mux(hasMultibeatData(), addr_beat === UInt(0), Bool(true))
|
||||
def last(dummy: Int = 0): Bool = Mux(hasMultibeatData(), addr_beat === UInt(tlDataBeats-1), Bool(true))
|
||||
def first(dummy: Int = 0): Bool = !hasMultibeatData() || addr_beat === UInt(0)
|
||||
def last(dummy: Int = 0): Bool = !hasMultibeatData() || addr_beat === UInt(tlDataBeats-1)
|
||||
}
|
||||
|
||||
/** An entire cache block of data */
|
||||
@ -186,6 +186,10 @@ trait HasAcquireUnion extends HasTileLinkParameters {
|
||||
}
|
||||
/** Full, beat-sized writemask */
|
||||
def full_wmask(dummy: Int = 0) = FillInterleaved(8, wmask())
|
||||
|
||||
/** Is this message a built-in read message */
|
||||
def hasPartialWritemask(dummy: Int = 0): Bool = wmask() =/= Acquire.fullWriteMask
|
||||
|
||||
}
|
||||
|
||||
trait HasAcquireType extends HasTileLinkParameters {
|
||||
@ -207,6 +211,12 @@ trait HasAcquireType extends HasTileLinkParameters {
|
||||
def isPrefetch(dummy: Int = 0): Bool = isBuiltInType() &&
|
||||
(is(Acquire.getPrefetchType) || is(Acquire.putPrefetchType))
|
||||
|
||||
/** Is this message a built-in atomic message */
|
||||
def isAtomic(dummy: Int = 0): Bool = isBuiltInType() && is(Acquire.putAtomicType)
|
||||
|
||||
/** Is this message a built-in read message */
|
||||
def isGet(dummy: Int = 0): Bool = isBuiltInType() && (is(Acquire.getType) || is(Acquire.getBlockType))
|
||||
|
||||
/** Does this message contain data? Assumes that no custom message types have data. */
|
||||
def hasData(dummy: Int = 0): Bool = isBuiltInType() && Acquire.typesWithData.contains(a_type)
|
||||
|
||||
@ -214,11 +224,6 @@ trait HasAcquireType extends HasTileLinkParameters {
|
||||
def hasMultibeatData(dummy: Int = 0): Bool = Bool(tlDataBeats > 1) && isBuiltInType() &&
|
||||
Acquire.typesWithMultibeatData.contains(a_type)
|
||||
|
||||
/** Does this message require the manager to probe the client the very client that sent it?
|
||||
* Needed if multiple caches are attached to the same port.
|
||||
*/
|
||||
def requiresSelfProbe(dummy: Int = 0) = Bool(false)
|
||||
|
||||
/** Mapping between each built-in Acquire type and a built-in Grant type. */
|
||||
def getBuiltInGrantType(dummy: Int = 0): UInt = Acquire.getBuiltInGrantType(this.a_type)
|
||||
}
|
||||
|
546
uncore/src/main/scala/trackers.scala
Normal file
546
uncore/src/main/scala/trackers.scala
Normal file
@ -0,0 +1,546 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore
|
||||
import Chisel._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
class TrackerAllocation extends Bundle {
|
||||
val matches = Bool(OUTPUT)
|
||||
val can = Bool(OUTPUT)
|
||||
val should = Bool(INPUT)
|
||||
}
|
||||
|
||||
trait HasTrackerAllocationIO extends Bundle {
|
||||
val alloc_iacq = new TrackerAllocation
|
||||
val alloc_irel = new TrackerAllocation
|
||||
val alloc_oprb = new TrackerAllocation
|
||||
}
|
||||
|
||||
class ManagerXactTrackerIO(implicit p: Parameters) extends ManagerTLIO()(p)
|
||||
with HasTrackerAllocationIO
|
||||
|
||||
class HierarchicalXactTrackerIO(implicit p: Parameters) extends HierarchicalTLIO()(p)
|
||||
with HasTrackerAllocationIO
|
||||
|
||||
abstract class XactTracker(implicit p: Parameters) extends CoherenceAgentModule()(p)
|
||||
with HasXactTrackerStates
|
||||
with HasPendingBitHelpers {
|
||||
override val s_idle :: s_meta_read :: s_meta_resp :: s_wb_req :: s_wb_resp :: s_inner_probe :: s_outer_acquire :: s_busy :: s_meta_write :: Nil = Enum(UInt(), 9)
|
||||
val state = Reg(init=s_idle)
|
||||
|
||||
def quiesce(next: UInt = s_idle) {
|
||||
all_pending_done := !scoreboard.foldLeft(Bool(false))(_||_)
|
||||
when(state === s_busy && all_pending_done) { state := next }
|
||||
}
|
||||
|
||||
def pinAllReadyValidLow[T <: Data](b: Bundle) {
|
||||
b.elements.foreach {
|
||||
_._2 match {
|
||||
case d: DecoupledIO[_] =>
|
||||
if(d.ready.dir == OUTPUT) d.ready := Bool(false)
|
||||
else if(d.valid.dir == OUTPUT) d.valid := Bool(false)
|
||||
case v: ValidIO[_] => if(v.valid.dir == OUTPUT) v.valid := Bool(false)
|
||||
case b: Bundle => pinAllReadyValidLow(b)
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait HasXactTrackerStates {
|
||||
def state: UInt
|
||||
def s_idle: UInt = UInt(0)
|
||||
def s_meta_read: UInt
|
||||
def s_meta_resp: UInt
|
||||
def s_wb_req: UInt
|
||||
def s_wb_resp: UInt
|
||||
def s_inner_probe: UInt
|
||||
def s_outer_acquire: UInt
|
||||
def s_busy: UInt
|
||||
def s_meta_write: UInt
|
||||
}
|
||||
|
||||
trait HasPendingBitHelpers extends HasDataBeatCounters {
|
||||
val scoreboard = scala.collection.mutable.ListBuffer.empty[Bool]
|
||||
val all_pending_done = Wire(Bool())
|
||||
|
||||
def addPendingBitWhenBeat[T <: HasBeat](inc: Bool, in: T): UInt =
|
||||
Fill(in.tlDataBeats, inc) & UIntToOH(in.addr_beat)
|
||||
|
||||
def dropPendingBitWhenBeat[T <: HasBeat](dec: Bool, in: T): UInt =
|
||||
~Fill(in.tlDataBeats, dec) | ~UIntToOH(in.addr_beat)
|
||||
|
||||
def addPendingBitWhenId[T <: HasClientId](inc: Bool, in: T): UInt =
|
||||
Fill(in.tlNCachingClients, inc) & UIntToOH(in.client_id)
|
||||
|
||||
def dropPendingBitWhenId[T <: HasClientId](dec: Bool, in: T): UInt =
|
||||
~Fill(in.tlNCachingClients, dec) | ~UIntToOH(in.client_id)
|
||||
|
||||
def addPendingBitWhenBeatHasData[T <: HasBeat](in: DecoupledIO[T], inc: Bool = Bool(true)): UInt =
|
||||
addPendingBitWhenBeat(in.fire() && in.bits.hasData() && inc, in.bits)
|
||||
|
||||
def addPendingBitWhenBeatHasDataAndAllocs(in: DecoupledIO[AcquireFromSrc]): UInt =
|
||||
addPendingBitWhenBeatHasData(in, in.bits.allocate())
|
||||
|
||||
def addPendingBitWhenBeatNeedsRead(in: DecoupledIO[AcquireFromSrc], inc: Bool = Bool(true)): UInt = {
|
||||
val a = in.bits
|
||||
val needs_read = (a.isGet() || a.isAtomic() || a.hasPartialWritemask()) || inc
|
||||
addPendingBitWhenBeat(in.fire() && needs_read, a)
|
||||
}
|
||||
|
||||
def addPendingBitWhenBeatHasPartialWritemask(in: DecoupledIO[AcquireFromSrc]): UInt =
|
||||
addPendingBitWhenBeat(in.fire() && in.bits.hasPartialWritemask(), in.bits)
|
||||
|
||||
def addPendingBitsFromAcquire(a: SecondaryMissInfo): UInt =
|
||||
Mux(a.hasMultibeatData(), Fill(a.tlDataBeats, UInt(1, 1)), UIntToOH(a.addr_beat))
|
||||
|
||||
def dropPendingBitWhenBeatHasData[T <: HasBeat](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenBeat(in.fire() && in.bits.hasData(), in.bits)
|
||||
|
||||
def dropPendingBitAtDest[T <: HasId](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenId(in.fire(), in.bits)
|
||||
|
||||
def dropPendingBitAtDestWhenVoluntary[T <: HasId with MightBeVoluntary](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenId(in.fire() && in.bits.isVoluntary(), in.bits)
|
||||
|
||||
def addPendingBitAtSrc[T <: HasId](in: DecoupledIO[T]): UInt =
|
||||
addPendingBitWhenId(in.fire(), in.bits)
|
||||
|
||||
def addPendingBitAtSrcWhenVoluntary[T <: HasId with MightBeVoluntary](in: DecoupledIO[T]): UInt =
|
||||
addPendingBitWhenId(in.fire() && in.bits.isVoluntary(), in.bits)
|
||||
|
||||
def addOtherBits(en: Bool, nBits: Int): UInt =
|
||||
Mux(en, Cat(Fill(nBits - 1, UInt(1, 1)), UInt(0, 1)), UInt(0, nBits))
|
||||
|
||||
def addPendingBitsOnFirstBeat(in: DecoupledIO[Acquire]): UInt =
|
||||
addOtherBits(in.fire() &&
|
||||
in.bits.hasMultibeatData() &&
|
||||
in.bits.addr_beat === UInt(0),
|
||||
in.bits.tlDataBeats)
|
||||
|
||||
def dropPendingBitsOnFirstBeat(in: DecoupledIO[Acquire]): UInt =
|
||||
~addPendingBitsOnFirstBeat(in)
|
||||
}
|
||||
|
||||
trait HasDataBuffer extends HasCoherenceAgentParameters {
|
||||
val data_buffer = Reg(init=Vec.fill(innerDataBeats)(UInt(0, width = innerDataBits)))
|
||||
|
||||
type TLDataBundle = TLBundle with HasTileLinkData with HasTileLinkBeatId
|
||||
|
||||
def initDataInner[T <: Acquire](in: DecoupledIO[T]) {
|
||||
when(in.fire() && in.bits.hasData()) {
|
||||
data_buffer(in.bits.addr_beat) := in.bits.data
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: provide func for accessing when innerDataBeats =/= outerDataBeats or internalDataBeats
|
||||
def mergeData(dataBits: Int)(beat: UInt, incoming: UInt) {
|
||||
data_buffer(beat) := incoming
|
||||
}
|
||||
|
||||
def mergeDataInner[T <: TLDataBundle](in: DecoupledIO[T]) {
|
||||
when(in.fire() && in.bits.hasData()) {
|
||||
mergeData(innerDataBits)(in.bits.addr_beat, in.bits.data)
|
||||
}
|
||||
}
|
||||
|
||||
def mergeDataOuter[T <: TLDataBundle](in: DecoupledIO[T]) {
|
||||
when(in.fire() && in.bits.hasData()) {
|
||||
mergeData(outerDataBits)(in.bits.addr_beat, in.bits.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait HasByteWriteMaskBuffer extends HasDataBuffer {
|
||||
val wmask_buffer = Reg(init=Vec.fill(innerDataBeats)(UInt(0, width = innerWriteMaskBits)))
|
||||
|
||||
override def initDataInner[T <: Acquire](in: DecoupledIO[T]) {
|
||||
when(in.fire() && in.bits.hasData()) {
|
||||
val beat = in.bits.addr_beat
|
||||
val full = FillInterleaved(8, in.bits.wmask())
|
||||
data_buffer(beat) := (~full & data_buffer(beat)) | (full & in.bits.data)
|
||||
wmask_buffer(beat) := in.bits.wmask() | wmask_buffer(beat) // assumes wmask_buffer is zeroed
|
||||
}
|
||||
}
|
||||
|
||||
override def mergeData(dataBits: Int)(beat: UInt, incoming: UInt) {
|
||||
val old_data = incoming // Refilled, written back, or de-cached data
|
||||
val new_data = data_buffer(beat) // Newly Put data is already in the buffer
|
||||
val wmask = FillInterleaved(8, wmask_buffer(beat))
|
||||
data_buffer(beat) := ~wmask & old_data | wmask & new_data
|
||||
}
|
||||
}
|
||||
|
||||
trait HasBlockAddressBuffer extends HasCoherenceAgentParameters {
|
||||
val xact_addr_block = Reg(init = UInt(0, width = blockAddrBits))
|
||||
}
|
||||
|
||||
|
||||
trait HasAcquireMetadataBuffer extends HasBlockAddressBuffer {
|
||||
val xact_allocate = Reg{ Bool() }
|
||||
val xact_amo_shift_bytes = Reg{ UInt() }
|
||||
val xact_op_code = Reg{ UInt() }
|
||||
val xact_addr_byte = Reg{ UInt() }
|
||||
val xact_op_size = Reg{ UInt() }
|
||||
val xact_addr_beat = Wire(UInt())
|
||||
val xact_iacq = Wire(new SecondaryMissInfo)
|
||||
}
|
||||
|
||||
trait HasVoluntaryReleaseMetadataBuffer extends HasBlockAddressBuffer
|
||||
with HasPendingBitHelpers
|
||||
with HasXactTrackerStates {
|
||||
def io: HierarchicalXactTrackerIO
|
||||
|
||||
val xact_vol_ir_r_type = Reg{ UInt() }
|
||||
val xact_vol_ir_src = Reg{ UInt() }
|
||||
val xact_vol_ir_client_xact_id = Reg{ UInt() }
|
||||
|
||||
def xact_vol_irel = Release(
|
||||
src = xact_vol_ir_src,
|
||||
voluntary = Bool(true),
|
||||
r_type = xact_vol_ir_r_type,
|
||||
client_xact_id = xact_vol_ir_client_xact_id,
|
||||
addr_block = xact_addr_block)
|
||||
(p.alterPartial({ case TLId => p(InnerTLId) }))
|
||||
}
|
||||
|
||||
trait AcceptsVoluntaryReleases extends HasVoluntaryReleaseMetadataBuffer {
|
||||
def inner_coh: ManagerMetadata
|
||||
|
||||
val pending_irel_data = Reg(init=Bits(0, width = innerDataBeats))
|
||||
val vol_ignt_counter = Wire(new TwoWayBeatCounterStatus)
|
||||
|
||||
def irel_can_merge: Bool
|
||||
def irel_same_xact: Bool
|
||||
def irel_is_allocating: Bool = state === s_idle && io.alloc_irel.should && io.inner.release.valid
|
||||
def irel_is_merging: Bool = (irel_can_merge || irel_same_xact) && io.inner.release.valid
|
||||
|
||||
def innerRelease(block_vol_ignt: Bool = Bool(false), next: UInt = s_busy) {
|
||||
connectTwoWayBeatCounters(
|
||||
status = vol_ignt_counter,
|
||||
up = io.inner.release,
|
||||
down = io.inner.grant,
|
||||
trackUp = (r: Release) => {
|
||||
Mux(state === s_idle, io.alloc_irel.should, io.alloc_irel.matches) && r.isVoluntary() && r.requiresAck()
|
||||
},
|
||||
trackDown = (g: Grant) => (state =/= s_idle) && g.isVoluntary())
|
||||
|
||||
pending_irel_data := (pending_irel_data & dropPendingBitWhenBeatHasData(io.inner.release))
|
||||
|
||||
when(irel_is_allocating) {
|
||||
xact_addr_block := io.irel().addr_block
|
||||
state := next
|
||||
}
|
||||
|
||||
when(io.inner.release.fire()) {
|
||||
when(io.alloc_irel.should || (irel_can_merge && io.irel().first())) {
|
||||
xact_vol_ir_r_type := io.irel().r_type
|
||||
xact_vol_ir_src := io.irel().client_id
|
||||
xact_vol_ir_client_xact_id := io.irel().client_xact_id
|
||||
pending_irel_data := Mux(io.irel().hasMultibeatData(),
|
||||
dropPendingBitWhenBeatHasData(io.inner.release),
|
||||
UInt(0))
|
||||
}
|
||||
}
|
||||
|
||||
io.inner.grant.valid := Vec(s_wb_req, s_wb_resp, s_inner_probe, s_busy).contains(state) &&
|
||||
vol_ignt_counter.pending &&
|
||||
!(pending_irel_data.orR || block_vol_ignt)
|
||||
|
||||
io.inner.grant.bits := inner_coh.makeGrant(xact_vol_irel)
|
||||
|
||||
scoreboard += (pending_irel_data.orR, vol_ignt_counter.pending)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
trait EmitsVoluntaryReleases extends HasVoluntaryReleaseMetadataBuffer {
|
||||
val pending_orel = Reg(init=Bool(false))
|
||||
val pending_orel_data = Reg(init=Bits(0, width = innerDataBeats))
|
||||
val vol_ognt_counter = Wire(new TwoWayBeatCounterStatus)
|
||||
|
||||
def outerRelease(
|
||||
coh: ClientMetadata,
|
||||
buffering: Bool = Bool(true),
|
||||
data: UInt = io.irel().data,
|
||||
add_pending_bit: UInt = UInt(0)) {
|
||||
pending_orel_data := (pending_orel_data & dropPendingBitWhenBeatHasData(io.outer.release)) |
|
||||
addPendingBitWhenBeatHasData(io.inner.release) |
|
||||
add_pending_bit
|
||||
|
||||
connectTwoWayBeatCounters(
|
||||
status = vol_ognt_counter,
|
||||
up = io.outer.release,
|
||||
down = io.outer.grant,
|
||||
trackUp = (r: Release) => r.isVoluntary() && r.requiresAck(),
|
||||
trackDown = (g: Grant) => g.isVoluntary())
|
||||
|
||||
io.outer.release.valid := state === s_busy &&
|
||||
Mux(io.orel().hasData(),
|
||||
Mux(buffering,
|
||||
pending_orel_data(vol_ognt_counter.up.idx),
|
||||
io.inner.release.valid),
|
||||
pending_orel)
|
||||
|
||||
|
||||
io.outer.release.bits := coh.makeVoluntaryWriteback(
|
||||
client_xact_id = UInt(0), // TODO was tracker id, but not needed?
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = vol_ognt_counter.up.idx,
|
||||
data = data)
|
||||
|
||||
when(pending_orel_data.orR) { pending_orel := Bool(true) }
|
||||
when(vol_ognt_counter.up.done) { pending_orel := Bool(false) }
|
||||
|
||||
io.outer.grant.ready := state === s_busy
|
||||
|
||||
scoreboard += (pending_orel, vol_ognt_counter.pending)
|
||||
}
|
||||
}
|
||||
|
||||
trait EmitsInnerProbes extends HasBlockAddressBuffer
|
||||
with HasXactTrackerStates
|
||||
with HasPendingBitHelpers {
|
||||
def io: HierarchicalXactTrackerIO
|
||||
|
||||
val pending_iprbs = Reg(UInt(width = innerNCachingClients))
|
||||
val curr_probe_dst = PriorityEncoder(pending_iprbs)
|
||||
val irel_counter = Wire(new TwoWayBeatCounterStatus)
|
||||
|
||||
def full_representation: UInt
|
||||
def initializeProbes() { pending_iprbs := full_representation & ~io.incoherent.toBits }
|
||||
def irel_same_xact = io.irel().conflicts(xact_addr_block) &&
|
||||
!io.irel().isVoluntary() &&
|
||||
state === s_inner_probe
|
||||
|
||||
def innerProbe(prb: Probe, next: UInt) {
|
||||
pending_iprbs := pending_iprbs & dropPendingBitAtDest(io.inner.probe)
|
||||
io.inner.probe.valid := state === s_inner_probe && pending_iprbs.orR
|
||||
io.inner.probe.bits := prb
|
||||
|
||||
connectTwoWayBeatCounters(
|
||||
status = irel_counter,
|
||||
up = io.inner.probe,
|
||||
down = io.inner.release,
|
||||
max = innerNCachingClients,
|
||||
trackDown = (r: Release) => (state =/= s_idle) && !r.isVoluntary())
|
||||
|
||||
when(state === s_inner_probe && !(pending_iprbs.orR || irel_counter.pending)) {
|
||||
state := next
|
||||
}
|
||||
|
||||
//N.B. no pending bits added to scoreboard because all handled in s_inner_probe
|
||||
}
|
||||
}
|
||||
|
||||
trait RoutesInParent extends HasBlockAddressBuffer
|
||||
with HasXactTrackerStates {
|
||||
def io: HierarchicalXactTrackerIO
|
||||
type AddrComparison = HasCacheBlockAddress => Bool
|
||||
def exactAddrMatch(a: HasCacheBlockAddress): Bool = a.conflicts(xact_addr_block)
|
||||
def routeInParent(iacqMatches: AddrComparison = exactAddrMatch,
|
||||
irelMatches: AddrComparison = exactAddrMatch,
|
||||
oprbMatches: AddrComparison = exactAddrMatch) {
|
||||
io.alloc_iacq.matches := (state =/= s_idle) && iacqMatches(io.iacq())
|
||||
io.alloc_irel.matches := (state =/= s_idle) && irelMatches(io.irel())
|
||||
io.alloc_oprb.matches := (state =/= s_idle) && oprbMatches(io.oprb())
|
||||
io.alloc_iacq.can := state === s_idle
|
||||
io.alloc_irel.can := state === s_idle
|
||||
io.alloc_oprb.can := Bool(false)
|
||||
}
|
||||
}
|
||||
|
||||
trait AcceptsInnerAcquires extends HasAcquireMetadataBuffer
|
||||
with AcceptsVoluntaryReleases
|
||||
with HasXactTrackerStates
|
||||
with HasPendingBitHelpers {
|
||||
def io: HierarchicalXactTrackerIO
|
||||
def nSecondaryMisses: Int
|
||||
def alwaysWriteFullBeat: Boolean
|
||||
def inner_coh: ManagerMetadata
|
||||
def trackerId: Int
|
||||
|
||||
// Secondary miss queue holds transaction metadata used to make grants
|
||||
lazy val ignt_q = Module(new Queue(
|
||||
new SecondaryMissInfo()(p.alterPartial({ case TLId => p(InnerTLId) })),
|
||||
1 + nSecondaryMisses))
|
||||
|
||||
val pending_ignt = Wire(Bool())
|
||||
val ignt_data_idx = Wire(UInt())
|
||||
val ignt_data_done = Wire(Bool())
|
||||
val ifin_counter = Wire(new TwoWayBeatCounterStatus)
|
||||
val pending_put_data = Reg(init=Bits(0, width = innerDataBeats))
|
||||
val pending_ignt_data = Reg(init=Bits(0, width = innerDataBeats))
|
||||
|
||||
def iacq_same_xact: Bool = {
|
||||
(xact_iacq.client_xact_id === io.iacq().client_xact_id) &&
|
||||
xact_iacq.hasMultibeatData() &&
|
||||
pending_ignt &&
|
||||
pending_put_data(io.iacq().addr_beat)
|
||||
}
|
||||
def iacq_can_merge: Bool
|
||||
def iacq_is_allocating: Bool = state === s_idle && io.alloc_iacq.should && io.inner.acquire.valid
|
||||
def iacq_is_merging: Bool = (iacq_can_merge || iacq_same_xact) && io.inner.acquire.valid
|
||||
|
||||
def innerAcquire(can_alloc: Bool, next: UInt) {
|
||||
|
||||
// Enqueue some metadata information that we'll use to make coherence updates with later
|
||||
ignt_q.io.enq.valid := iacq_is_allocating || (iacq_is_merging && !iacq_same_xact)
|
||||
ignt_q.io.enq.bits := io.iacq()
|
||||
|
||||
// Use the outputs of the queue to make further messages
|
||||
xact_iacq := Mux(ignt_q.io.deq.valid, ignt_q.io.deq.bits, ignt_q.io.enq.bits)
|
||||
xact_addr_beat := xact_iacq.addr_beat
|
||||
pending_ignt := ignt_q.io.count > UInt(0)
|
||||
|
||||
// Track whether any beats are missing from a PutBlock
|
||||
pending_put_data := (pending_put_data &
|
||||
dropPendingBitWhenBeatHasData(io.inner.acquire)) |
|
||||
addPendingBitsOnFirstBeat(io.inner.acquire)
|
||||
|
||||
// Intialize transaction metadata for accepted Acquire
|
||||
when(iacq_is_allocating) {
|
||||
xact_addr_block := io.iacq().addr_block
|
||||
xact_allocate := io.iacq().allocate() && can_alloc
|
||||
xact_amo_shift_bytes := io.iacq().amo_shift_bytes()
|
||||
xact_op_code := io.iacq().op_code()
|
||||
xact_addr_byte := io.iacq().addr_byte()
|
||||
xact_op_size := io.iacq().op_size()
|
||||
// Make sure to collect all data from a PutBlock
|
||||
pending_put_data := Mux(
|
||||
io.iacq().isBuiltInType(Acquire.putBlockType),
|
||||
dropPendingBitWhenBeatHasData(io.inner.acquire),
|
||||
UInt(0))
|
||||
pending_ignt_data := UInt(0)
|
||||
state := next
|
||||
}
|
||||
|
||||
scoreboard += (pending_put_data.orR)
|
||||
}
|
||||
|
||||
def innerGrant(
|
||||
data: UInt = io.ognt().data,
|
||||
external_pending: Bool = Bool(false),
|
||||
add: UInt = UInt(0)) {
|
||||
// Track the number of outstanding inner.finishes
|
||||
connectTwoWayBeatCounters(
|
||||
status = ifin_counter,
|
||||
up = io.inner.grant,
|
||||
down = io.inner.finish,
|
||||
max = nSecondaryMisses,
|
||||
trackUp = (g: Grant) => g.requiresAck())
|
||||
|
||||
// Track which beats are ready for response
|
||||
when(!iacq_is_allocating) {
|
||||
pending_ignt_data := (pending_ignt_data & dropPendingBitWhenBeatHasData(io.inner.grant)) |
|
||||
addPendingBitWhenBeatHasData(io.inner.release) |
|
||||
addPendingBitWhenBeatHasData(io.outer.grant) |
|
||||
add
|
||||
}
|
||||
|
||||
// We can issue a grant for a pending write once all data is
|
||||
// received and committed to the data array or outer memory
|
||||
val ignt_ack_ready = !(state === s_idle ||
|
||||
state === s_meta_read ||
|
||||
pending_put_data.orR)
|
||||
|
||||
val ignt_from_iacq = inner_coh.makeGrant(
|
||||
sec = ignt_q.io.deq.bits,
|
||||
manager_xact_id = UInt(trackerId),
|
||||
data = data)
|
||||
|
||||
// Make the Grant message using the data stored in the secondary miss queue
|
||||
val (cnt, done) = connectOutgoingDataBeatCounter(io.inner.grant, ignt_q.io.deq.bits.addr_beat)
|
||||
ignt_data_idx := cnt
|
||||
ignt_data_done := done
|
||||
ignt_q.io.deq.ready := Bool(false)
|
||||
when(!vol_ignt_counter.pending) {
|
||||
ignt_q.io.deq.ready := ignt_data_done
|
||||
io.inner.grant.bits := ignt_from_iacq
|
||||
io.inner.grant.bits.addr_beat := ignt_data_idx // override based on outgoing counter
|
||||
when (state === s_busy && pending_ignt) {
|
||||
io.inner.grant.valid := !external_pending &&
|
||||
Mux(io.ignt().hasData(), pending_ignt_data(ignt_data_idx), ignt_ack_ready)
|
||||
}
|
||||
}
|
||||
|
||||
// We must wait for as many Finishes as we sent Grants
|
||||
io.inner.finish.ready := state === s_busy
|
||||
|
||||
scoreboard += (pending_ignt, ifin_counter.pending)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
trait EmitsOuterAcquires extends AcceptsInnerAcquires {
|
||||
val ognt_counter = Wire(new TwoWayBeatCounterStatus)
|
||||
|
||||
// Handle misses or coherence permission upgrades by initiating a new transaction in the outer memory:
|
||||
//
|
||||
// If we're allocating in this cache, we can use the current metadata
|
||||
// to make an appropriate custom Acquire, otherwise we copy over the
|
||||
// built-in Acquire from the inner TL to the outer TL
|
||||
def outerAcquire(
|
||||
caching: Bool,
|
||||
coh: ClientMetadata,
|
||||
buffering: Bool = Bool(true),
|
||||
data: UInt = io.iacq().data,
|
||||
wmask: UInt = io.iacq().wmask(),
|
||||
next: UInt = s_busy) {
|
||||
|
||||
// Tracks outstanding Acquires, waiting for their matching Grant.
|
||||
connectTwoWayBeatCounters(
|
||||
status = ognt_counter,
|
||||
up = io.outer.acquire,
|
||||
down = io.outer.grant,
|
||||
beat = xact_addr_beat,
|
||||
trackDown = (g: Grant) => !g.isVoluntary())
|
||||
|
||||
io.outer.acquire.valid := state === s_outer_acquire &&
|
||||
(xact_allocate ||
|
||||
Mux(buffering,
|
||||
!pending_put_data(ognt_counter.up.idx),
|
||||
io.inner.acquire.valid))
|
||||
|
||||
io.outer.acquire.bits :=
|
||||
Mux(caching,
|
||||
coh.makeAcquire(
|
||||
op_code = xact_op_code,
|
||||
client_xact_id = UInt(0),
|
||||
addr_block = xact_addr_block),
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = xact_iacq.a_type,
|
||||
client_xact_id = UInt(0),
|
||||
addr_block = xact_addr_block,
|
||||
addr_beat = ognt_counter.up.idx,
|
||||
data = data,
|
||||
addr_byte = xact_addr_byte,
|
||||
operand_size = xact_op_size,
|
||||
opcode = xact_op_code,
|
||||
wmask = wmask,
|
||||
alloc = Bool(false))
|
||||
(p.alterPartial({ case TLId => p(OuterTLId)})))
|
||||
|
||||
when(state === s_outer_acquire && ognt_counter.up.done) { state := next }
|
||||
|
||||
io.outer.grant.ready := state === s_busy
|
||||
|
||||
scoreboard += ognt_counter.pending
|
||||
}
|
||||
}
|
||||
|
||||
abstract class VoluntaryReleaseTracker(val trackerId: Int)(implicit p: Parameters) extends XactTracker()(p)
|
||||
with AcceptsVoluntaryReleases
|
||||
with RoutesInParent {
|
||||
def irel_can_merge = Bool(false)
|
||||
def irel_same_xact = io.irel().conflicts(xact_addr_block) &&
|
||||
io.irel().isVoluntary() &&
|
||||
pending_irel_data.orR
|
||||
}
|
||||
|
||||
abstract class AcquireTracker(val trackerId: Int)(implicit p: Parameters) extends XactTracker()(p)
|
||||
with AcceptsInnerAcquires
|
||||
with EmitsOuterAcquires
|
||||
with EmitsInnerProbes
|
||||
with RoutesInParent {
|
||||
}
|
@ -3,6 +3,7 @@
|
||||
package uncore
|
||||
import Chisel._
|
||||
import cde.{Parameters, Field}
|
||||
import junctions._
|
||||
|
||||
case object NReleaseTransactors extends Field[Int]
|
||||
case object NProbeTransactors extends Field[Int]
|
||||
@ -18,6 +19,7 @@ trait HasCoherenceAgentParameters {
|
||||
val nReleaseTransactors = 1
|
||||
val nAcquireTransactors = p(NAcquireTransactors)
|
||||
val nTransactors = nReleaseTransactors + nAcquireTransactors
|
||||
val blockAddrBits = p(PAddrBits) - p(CacheBlockOffsetBits)
|
||||
val outerTLId = p(OuterTLId)
|
||||
val outerTLParams = p(TLKey(outerTLId))
|
||||
val outerDataBeats = outerTLParams.dataBeats
|
||||
@ -32,6 +34,7 @@ trait HasCoherenceAgentParameters {
|
||||
val innerWriteMaskBits = innerTLParams.writeMaskBits
|
||||
val innerBeatAddrBits = log2Up(innerDataBeats)
|
||||
val innerByteAddrBits = log2Up(innerDataBits/8)
|
||||
val innerNCachingClients = innerTLParams.nCachingClients
|
||||
val maxManagerXacts = innerTLParams.maxManagerXacts
|
||||
require(outerDataBeats == innerDataBeats) //TODO: fix all xact_data Vecs to remove this requirement
|
||||
}
|
||||
@ -66,31 +69,33 @@ trait HasCoherenceAgentWiringHelpers {
|
||||
*
|
||||
* When a match is reported, if ready is high the new transaction
|
||||
* is merged; when ready is low the transaction is being blocked.
|
||||
* When no match is reported, any high readys are presumed to be
|
||||
* When no match is reported, any high idles are presumed to be
|
||||
* from trackers that are available for allocation, and one is
|
||||
* assigned via alloc based on priority; f no readys are high then
|
||||
* all trackers are busy with other transactions.
|
||||
* assigned via alloc based on priority; if no idles are high then
|
||||
* all trackers are busy with other transactions. If idle is high
|
||||
* but ready is low, the tracker will be allocated but does not
|
||||
* have sufficient buffering for the data.
|
||||
*/
|
||||
def doInputRoutingWithAllocation[T <: TileLinkChannel with HasTileLinkData](
|
||||
in: DecoupledIO[T],
|
||||
outs: Seq[DecoupledIO[T]],
|
||||
matches: Seq[Bool],
|
||||
allocs: Seq[Bool],
|
||||
allocs: Seq[TrackerAllocation],
|
||||
dataOverrides: Option[Seq[UInt]] = None,
|
||||
allocOverride: Option[Bool] = None,
|
||||
matchOverride: Option[Bool] = None) {
|
||||
val ready_bits = Vec(outs.map(_.ready)).toBits
|
||||
val alloc_bits = PriorityEncoderOH(ready_bits)
|
||||
val match_bits = Vec(matches).toBits
|
||||
val can_alloc_bits = Vec(allocs.map(_.can)).toBits
|
||||
val should_alloc_bits = PriorityEncoderOH(can_alloc_bits)
|
||||
val match_bits = Vec(allocs.map(_.matches)).toBits
|
||||
val no_matches = !match_bits.orR
|
||||
val alloc_ok = allocOverride.getOrElse(Bool(true))
|
||||
val match_ok = matchOverride.getOrElse(Bool(true))
|
||||
in.ready := Mux(no_matches, ready_bits.orR, (match_bits & ready_bits).orR) && alloc_ok && match_ok
|
||||
outs.zip(allocs).zipWithIndex.foreach { case((out, a), i) =>
|
||||
in.ready := (Mux(no_matches, can_alloc_bits, match_bits) & ready_bits).orR && alloc_ok && match_ok
|
||||
outs.zip(allocs).zipWithIndex.foreach { case((out, alloc), i) =>
|
||||
out.valid := in.valid && match_ok && alloc_ok
|
||||
out.bits := in.bits
|
||||
dataOverrides foreach { d => out.bits.data := d(i) }
|
||||
a := alloc_bits(i) & no_matches & alloc_ok
|
||||
alloc.should := should_alloc_bits(i) && no_matches && alloc_ok
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -141,87 +146,10 @@ class HierarchicalTLIO(implicit p: Parameters) extends CoherenceAgentBundle()(p)
|
||||
with HasInnerTLIO
|
||||
with HasCachedOuterTLIO
|
||||
|
||||
abstract class HierarchicalCoherenceAgent(implicit p: Parameters) extends CoherenceAgent()(p) {
|
||||
abstract class HierarchicalCoherenceAgent(implicit p: Parameters) extends CoherenceAgent()(p)
|
||||
with HasCoherenceAgentWiringHelpers {
|
||||
val io = new HierarchicalTLIO
|
||||
def innerTL = io.inner
|
||||
def outerTL = io.outer
|
||||
def incoherent = io.incoherent
|
||||
}
|
||||
|
||||
trait HasTrackerAllocationIO extends Bundle {
|
||||
val matches = new Bundle {
|
||||
val iacq = Bool(OUTPUT)
|
||||
val irel = Bool(OUTPUT)
|
||||
val oprb = Bool(OUTPUT)
|
||||
}
|
||||
val alloc = new Bundle {
|
||||
val iacq = Bool(INPUT)
|
||||
val irel = Bool(INPUT)
|
||||
val oprb = Bool(INPUT)
|
||||
}
|
||||
}
|
||||
|
||||
class ManagerXactTrackerIO(implicit p: Parameters) extends ManagerTLIO()(p)
|
||||
with HasTrackerAllocationIO
|
||||
|
||||
class HierarchicalXactTrackerIO(implicit p: Parameters) extends HierarchicalTLIO()(p)
|
||||
with HasTrackerAllocationIO
|
||||
|
||||
abstract class XactTracker(implicit p: Parameters) extends CoherenceAgentModule()(p)
|
||||
with HasDataBeatCounters {
|
||||
def addPendingBitWhenBeat[T <: HasBeat](inc: Bool, in: T): UInt =
|
||||
Fill(in.tlDataBeats, inc) & UIntToOH(in.addr_beat)
|
||||
|
||||
def dropPendingBitWhenBeat[T <: HasBeat](dec: Bool, in: T): UInt =
|
||||
~Fill(in.tlDataBeats, dec) | ~UIntToOH(in.addr_beat)
|
||||
|
||||
def addPendingBitWhenId[T <: HasClientId](inc: Bool, in: T): UInt =
|
||||
Fill(in.tlNCachingClients, inc) & UIntToOH(in.client_id)
|
||||
|
||||
def dropPendingBitWhenId[T <: HasClientId](dec: Bool, in: T): UInt =
|
||||
~Fill(in.tlNCachingClients, dec) | ~UIntToOH(in.client_id)
|
||||
|
||||
def addPendingBitWhenBeatHasData[T <: HasBeat](in: DecoupledIO[T], inc: Bool = Bool(true)): UInt =
|
||||
addPendingBitWhenBeat(in.fire() && in.bits.hasData() && inc, in.bits)
|
||||
|
||||
def addPendingBitWhenBeatHasDataAndAllocs(in: DecoupledIO[AcquireFromSrc]): UInt =
|
||||
addPendingBitWhenBeatHasData(in, in.bits.allocate())
|
||||
|
||||
def addPendingBitWhenBeatIsGetOrAtomic(in: DecoupledIO[AcquireFromSrc]): UInt = {
|
||||
val a = in.bits
|
||||
val isGetOrAtomic = a.isBuiltInType() &&
|
||||
(Vec(Acquire.getType, Acquire.getBlockType, Acquire.putAtomicType).contains(a.a_type))
|
||||
addPendingBitWhenBeat(in.fire() && isGetOrAtomic, a)
|
||||
}
|
||||
|
||||
def addPendingBitsFromAcquire(a: SecondaryMissInfo): UInt =
|
||||
Mux(a.hasMultibeatData(), Fill(a.tlDataBeats, UInt(1, 1)), UIntToOH(a.addr_beat))
|
||||
|
||||
def dropPendingBitWhenBeatHasData[T <: HasBeat](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenBeat(in.fire() && in.bits.hasData(), in.bits)
|
||||
|
||||
def dropPendingBitAtDest[T <: HasId](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenId(in.fire(), in.bits)
|
||||
|
||||
def dropPendingBitAtDestWhenVoluntary[T <: HasId with MightBeVoluntary](in: DecoupledIO[T]): UInt =
|
||||
dropPendingBitWhenId(in.fire() && in.bits.isVoluntary(), in.bits)
|
||||
|
||||
def addPendingBitAtSrc[T <: HasId](in: DecoupledIO[T]): UInt =
|
||||
addPendingBitWhenId(in.fire(), in.bits)
|
||||
|
||||
def addPendingBitAtSrcWhenVoluntary[T <: HasId with MightBeVoluntary](in: DecoupledIO[T]): UInt =
|
||||
addPendingBitWhenId(in.fire() && in.bits.isVoluntary(), in.bits)
|
||||
|
||||
def pinAllReadyValidLow[T <: Data](b: Bundle) {
|
||||
b.elements.foreach {
|
||||
_._2 match {
|
||||
case d: DecoupledIO[_] =>
|
||||
if(d.ready.dir == OUTPUT) d.ready := Bool(false)
|
||||
else if(d.valid.dir == OUTPUT) d.valid := Bool(false)
|
||||
case v: ValidIO[_] => if(v.valid.dir == OUTPUT) v.valid := Bool(false)
|
||||
case b: Bundle => pinAllReadyValidLow(b)
|
||||
case _ =>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user