Merge pull request #432 from ucb-bar/tl2-address-filtering
Tl2 address filtering
This commit is contained in:
commit
ea602790a8
@ -18,6 +18,8 @@ import util._
|
||||
case object NMemoryChannels extends Field[Int]
|
||||
/** Number of banks per memory channel */
|
||||
case object NBanksPerMemoryChannel extends Field[Int]
|
||||
/** Number of tracker per bank */
|
||||
case object NTrackersPerBank extends Field[Int]
|
||||
/** Least significant bit of address used for bank partitioning */
|
||||
case object BankIdLSB extends Field[Int]
|
||||
/** Function for building some kind of coherence manager agent */
|
||||
@ -39,6 +41,7 @@ trait HasCoreplexParameters {
|
||||
lazy val nSlaves = p(rocketchip.NCoreplexExtClients)
|
||||
lazy val nMemChannels = p(NMemoryChannels)
|
||||
lazy val hasSupervisor = p(rocket.UseVM)
|
||||
lazy val nTrackersPerBank = p(NTrackersPerBank)
|
||||
}
|
||||
|
||||
case class CoreplexParameters(implicit val p: Parameters) extends HasCoreplexParameters
|
||||
@ -93,6 +96,45 @@ trait CoreplexNetworkModule extends HasCoreplexParameters {
|
||||
implicit val p = outer.p
|
||||
}
|
||||
|
||||
trait BankedL2 {
|
||||
this: CoreplexNetwork =>
|
||||
require (isPow2(nBanksPerMemChannel))
|
||||
require (isPow2(l1tol2_beatBytes))
|
||||
|
||||
def l2ManagerFactory(): (TLInwardNode, TLOutwardNode)
|
||||
|
||||
val l2Channels = Seq.fill(nMemChannels) {
|
||||
val bankBar = LazyModule(new TLXbar)
|
||||
val output = TLOutputNode()
|
||||
|
||||
output := bankBar.node
|
||||
val mask = ~BigInt((nBanksPerMemChannel-1) * l1tol2_lineBytes)
|
||||
for (i <- 0 until nBanksPerMemChannel) {
|
||||
val (in, out) = l2ManagerFactory()
|
||||
in := TLFilter(AddressSet(i * l1tol2_lineBytes, mask))(l1tol2.node)
|
||||
bankBar.node := out
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
|
||||
trait BankedL2Bundle {
|
||||
this: CoreplexNetworkBundle {
|
||||
val outer: BankedL2
|
||||
} =>
|
||||
|
||||
require (nMemChannels == 1, "Seq in Chisel Bundle needed to support > 1") // !!!
|
||||
val mem = outer.l2Channels.map(_.bundleOut).toList.head // .head should be removed !!!
|
||||
}
|
||||
|
||||
trait BankedL2Module {
|
||||
this: CoreplexNetworkModule {
|
||||
val outer: BankedL2
|
||||
val io: BankedL2Bundle
|
||||
} =>
|
||||
}
|
||||
|
||||
trait CoreplexRISCVPlatform {
|
||||
this: CoreplexNetwork =>
|
||||
|
||||
|
@ -155,6 +155,7 @@ class BaseCoreplexConfig extends Config (
|
||||
case BootROMFile => "./bootrom/bootrom.img"
|
||||
case NTiles => 1
|
||||
case NBanksPerMemoryChannel => Knob("NBANKS_PER_MEM_CHANNEL")
|
||||
case NTrackersPerBank => Knob("NTRACKERS_PER_BANK")
|
||||
case BankIdLSB => 0
|
||||
case CacheBlockBytes => Dump("CACHE_BLOCK_BYTES", 64)
|
||||
case CacheBlockOffsetBits => log2Up(here(CacheBlockBytes))
|
||||
@ -163,6 +164,7 @@ class BaseCoreplexConfig extends Config (
|
||||
}},
|
||||
knobValues = {
|
||||
case "NBANKS_PER_MEM_CHANNEL" => 1
|
||||
case "NTRACKERS_PER_BANK" => 4
|
||||
case "L1D_MSHRS" => 2
|
||||
case "L1D_SETS" => 64
|
||||
case "L1D_WAYS" => 4
|
||||
@ -183,6 +185,12 @@ class WithNBanksPerMemChannel(n: Int) extends Config(
|
||||
case _ => throw new CDEMatchError
|
||||
})
|
||||
|
||||
class WithNTrackersPerBank(n: Int) extends Config(
|
||||
knobValues = {
|
||||
case "NTRACKERS_PER_BANK" => n
|
||||
case _ => throw new CDEMatchError
|
||||
})
|
||||
|
||||
class WithDataScratchpad(n: Int) extends Config(
|
||||
(pname,site,here) => pname match {
|
||||
case DataScratchpadSize => n
|
||||
|
@ -10,6 +10,16 @@ import uncore.util._
|
||||
import util._
|
||||
import rocket._
|
||||
|
||||
trait BroadcastL2 {
|
||||
this: CoreplexNetwork =>
|
||||
def l2ManagerFactory() = {
|
||||
val bh = LazyModule(new TLBroadcast(l1tol2_beatBytes, nTrackersPerBank))
|
||||
(bh.node, bh.node)
|
||||
}
|
||||
}
|
||||
|
||||
/////
|
||||
|
||||
trait DirectConnection {
|
||||
this: CoreplexNetwork with CoreplexRISCVPlatform =>
|
||||
lazyTiles.map(_.slave).flatten.foreach { scratch => scratch := cbus.node }
|
||||
|
@ -104,6 +104,17 @@ case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet]
|
||||
// Widen the match function to ignore all bits in imask
|
||||
def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask)
|
||||
|
||||
// Return an AddressSet that only contains the addresses both sets contain
|
||||
def intersect(x: AddressSet): Option[AddressSet] = {
|
||||
if (!overlaps(x)) {
|
||||
None
|
||||
} else {
|
||||
val r_mask = mask & x.mask
|
||||
val r_base = base | x.base
|
||||
Some(AddressSet(r_base, r_mask))
|
||||
}
|
||||
}
|
||||
|
||||
// AddressSets have one natural Ordering (the containment order, if contiguous)
|
||||
def compare(x: AddressSet) = {
|
||||
val primary = (this.base - x.base).signum // smallest address first
|
||||
@ -133,5 +144,24 @@ object AddressSet
|
||||
misaligned(base+step, size-step, AddressSet(base, step-1) +: tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def unify(seq: Seq[AddressSet]): Seq[AddressSet] = {
|
||||
val n = seq.size
|
||||
val array = Array(seq:_*)
|
||||
var filter = Array.fill(n) { false }
|
||||
for (i <- 0 until n-1) { if (!filter(i)) {
|
||||
for (j <- i+1 until n) { if (!filter(j)) {
|
||||
val a = array(i)
|
||||
val b = array(j)
|
||||
if (a.mask == b.mask && isPow2(a.base ^ b.base)) {
|
||||
val c_base = a.base & ~(a.base ^ b.base)
|
||||
val c_mask = a.mask | (a.base ^ b.base)
|
||||
filter.update(j, true)
|
||||
array.update(i, AddressSet(c_base, c_mask))
|
||||
}
|
||||
}}
|
||||
}}
|
||||
val out = (array zip filter) flatMap { case (a, f) => if (f) None else Some(a) }
|
||||
if (out.size != n) unify(out) else out.toList
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ class TLBroadcast(lineBytes: Int, numTrackers: Int = 4) extends LazyModule
|
||||
require (lineBytes >= edgeOut.manager.beatBytes)
|
||||
// For the probe walker, we need to identify all the caches
|
||||
val caches = clients.filter(_.supportsProbe).map(_.sourceId)
|
||||
val cache_targets = Vec(caches.map(c => UInt(c.start)))
|
||||
val cache_targets = caches.map(c => UInt(c.start))
|
||||
|
||||
// Create the request tracker queues
|
||||
val trackers = Seq.tabulate(numTrackers) { id =>
|
||||
@ -81,7 +81,7 @@ class TLBroadcast(lineBytes: Int, numTrackers: Int = 4) extends LazyModule
|
||||
}
|
||||
|
||||
// Depending on the high source bits, we might transform D
|
||||
val d_high = 1 << log2Ceil(edgeIn.client.endSourceId)
|
||||
val d_high = log2Ceil(edgeIn.client.endSourceId)
|
||||
val d_what = out.d.bits.source(d_high+1, d_high)
|
||||
val d_drop = d_what === DROP
|
||||
val d_hasData = edgeOut.hasData(out.d.bits)
|
||||
@ -143,20 +143,22 @@ class TLBroadcast(lineBytes: Int, numTrackers: Int = 4) extends LazyModule
|
||||
trackers.map { t => (edgeOut.numBeats1(t.out_a.bits), t.out_a) }):_*)
|
||||
|
||||
// The Probe FSM walks all caches and probes them
|
||||
val probe_todo = RegInit(UInt(0, width = caches.size))
|
||||
val probe_todo = RegInit(UInt(0, width = max(1, caches.size)))
|
||||
val probe_line = Reg(UInt())
|
||||
val probe_perms = Reg(UInt(width = 2))
|
||||
val probe_next = probe_todo & ~(leftOR(probe_todo) << 1)
|
||||
val probe_busy = probe_todo.orR()
|
||||
val probe_target = Mux1H(probe_next, cache_targets)
|
||||
val probe_target = if (caches.size == 0) UInt(0) else Mux1H(probe_next, cache_targets)
|
||||
|
||||
// Probe whatever the FSM wants to do next
|
||||
in.b.valid := probe_busy
|
||||
in.b.bits := edgeIn.Probe(probe_line << lineShift, probe_target, UInt(lineShift), probe_perms)._2
|
||||
if (caches.size != 0) {
|
||||
in.b.bits := edgeIn.Probe(probe_line << lineShift, probe_target, UInt(lineShift), probe_perms)._2
|
||||
}
|
||||
when (in.b.fire()) { probe_todo := probe_todo & ~probe_next }
|
||||
|
||||
// Which cache does a request come from?
|
||||
val a_cache = Vec(caches.map(_.contains(in.a.bits.source))).asUInt
|
||||
val a_cache = if (caches.size == 0) UInt(1) else Vec(caches.map(_.contains(in.a.bits.source))).asUInt
|
||||
val (a_first, _, _) = edgeIn.firstlast(in.a)
|
||||
|
||||
// To accept a request from A, the probe FSM must be idle and there must be a matching tracker
|
||||
@ -173,7 +175,7 @@ class TLBroadcast(lineBytes: Int, numTrackers: Int = 4) extends LazyModule
|
||||
t.in_a.valid := in.a.valid && select && (!a_first || !probe_busy)
|
||||
t.in_a.bits := in.a.bits
|
||||
t.in_a_first := a_first
|
||||
t.probe := Mux(a_cache.orR(), UInt(caches.size-1), UInt(caches.size))
|
||||
t.probe := (if (caches.size == 0) UInt(0) else Mux(a_cache.orR(), UInt(caches.size-1), UInt(caches.size)))
|
||||
}
|
||||
|
||||
when (in.a.fire() && a_first) {
|
||||
@ -256,12 +258,7 @@ class TLBroadcastTracker(id: Int, lineBytes: Int, probeCountBits: Int, edgeIn: T
|
||||
io.source := source
|
||||
io.line := address >> lineShift
|
||||
|
||||
class Data extends Bundle {
|
||||
val mask = io.in_a.bits.mask.cloneType
|
||||
val data = io.in_a.bits.data.cloneType
|
||||
}
|
||||
|
||||
val i_data = Wire(Decoupled(new Data))
|
||||
val i_data = Wire(Decoupled(new TLBroadcastData(edgeIn.bundle)))
|
||||
val o_data = Queue(i_data, lineBytes / edgeIn.manager.beatBytes)
|
||||
|
||||
io.in_a.ready := (idle || !io.in_a_first) && i_data.ready
|
||||
@ -295,3 +292,9 @@ object TLBroadcastConstants
|
||||
val DROP = UInt(1)
|
||||
val PASS = UInt(0)
|
||||
}
|
||||
|
||||
class TLBroadcastData(params: TLBundleParameters) extends TLBundleBase(params)
|
||||
{
|
||||
val mask = UInt(width = params.dataBits/8)
|
||||
val data = UInt(width = params.dataBits)
|
||||
}
|
||||
|
49
src/main/scala/uncore/tilelink2/Filter.scala
Normal file
49
src/main/scala/uncore/tilelink2/Filter.scala
Normal file
@ -0,0 +1,49 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore.tilelink2
|
||||
|
||||
import Chisel._
|
||||
import chisel3.internal.sourceinfo.SourceInfo
|
||||
import diplomacy._
|
||||
import scala.math.{min,max}
|
||||
|
||||
class TLFilter(select: AddressSet) extends LazyModule
|
||||
{
|
||||
val node = TLAdapterNode(
|
||||
clientFn = { case Seq(cp) => cp },
|
||||
managerFn = { case Seq(mp) =>
|
||||
mp.copy(managers = mp.managers.map { m =>
|
||||
val filtered = m.address.map(_.intersect(select)).flatten
|
||||
val cap = TransferSizes(1, select.alignment.toInt)
|
||||
if (filtered.isEmpty) { None } else {
|
||||
Some(m.copy(
|
||||
address = filtered,
|
||||
supportsAcquire = m.supportsAcquire .intersect(cap),
|
||||
supportsArithmetic = m.supportsArithmetic.intersect(cap),
|
||||
supportsLogical = m.supportsLogical .intersect(cap),
|
||||
supportsGet = m.supportsGet .intersect(cap),
|
||||
supportsPutFull = m.supportsPutFull .intersect(cap),
|
||||
supportsPutPartial = m.supportsPutPartial.intersect(cap),
|
||||
supportsHint = m.supportsHint .intersect(cap)))
|
||||
}
|
||||
}.flatten)
|
||||
})
|
||||
|
||||
lazy val module = new LazyModuleImp(this) {
|
||||
val io = new Bundle {
|
||||
val in = node.bundleIn
|
||||
val out = node.bundleOut
|
||||
}
|
||||
io.out <> io.in
|
||||
}
|
||||
}
|
||||
|
||||
object TLFilter
|
||||
{
|
||||
// applied to the TL source node; y.node := TLBuffer(x.node)
|
||||
def apply(select: AddressSet)(x: TLOutwardNode)(implicit sourceInfo: SourceInfo): TLOutwardNode = {
|
||||
val filter = LazyModule(new TLFilter(select))
|
||||
filter.node := x
|
||||
filter.node
|
||||
}
|
||||
}
|
@ -23,7 +23,9 @@ case class TLManagerParameters(
|
||||
fifoId: Option[Int] = None,
|
||||
customDTS: Option[String]= None)
|
||||
{
|
||||
require (!address.isEmpty)
|
||||
address.foreach { a => require (a.finite) }
|
||||
|
||||
address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y)) }
|
||||
require (supportsPutFull.contains(supportsPutPartial))
|
||||
|
||||
@ -285,3 +287,45 @@ case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: T
|
||||
{
|
||||
val bundle = TLAsyncBundleParameters(manager.depth, TLBundleParameters(client.base, manager.base))
|
||||
}
|
||||
|
||||
object ManagerUnification
|
||||
{
|
||||
def apply(managers: Seq[TLManagerParameters]) = {
|
||||
// To be unified, devices must agree on all of these terms
|
||||
case class TLManagerKey(
|
||||
regionType: RegionType.T,
|
||||
executable: Boolean,
|
||||
lastNode: BaseNode,
|
||||
supportsAcquire: TransferSizes,
|
||||
supportsArithmetic: TransferSizes,
|
||||
supportsLogical: TransferSizes,
|
||||
supportsGet: TransferSizes,
|
||||
supportsPutFull: TransferSizes,
|
||||
supportsPutPartial: TransferSizes,
|
||||
supportsHint: TransferSizes)
|
||||
def key(x: TLManagerParameters) = TLManagerKey(
|
||||
regionType = x.regionType,
|
||||
executable = x.executable,
|
||||
lastNode = x.nodePath.last,
|
||||
supportsAcquire = x.supportsAcquire,
|
||||
supportsArithmetic = x.supportsArithmetic,
|
||||
supportsLogical = x.supportsLogical,
|
||||
supportsGet = x.supportsGet,
|
||||
supportsPutFull = x.supportsPutFull,
|
||||
supportsPutPartial = x.supportsPutPartial,
|
||||
supportsHint = x.supportsHint)
|
||||
val map = scala.collection.mutable.HashMap[TLManagerKey, TLManagerParameters]()
|
||||
managers.foreach { m =>
|
||||
val k = key(m)
|
||||
map.get(k) match {
|
||||
case None => map.update(k, m)
|
||||
case Some(n) => {
|
||||
map.update(k, m.copy(
|
||||
address = m.address ++ n.address,
|
||||
fifoId = None)) // Merging means it's not FIFO anymore!
|
||||
}
|
||||
}
|
||||
}
|
||||
map.values.map(m => m.copy(address = AddressSet.unify(m.address))).toList
|
||||
}
|
||||
}
|
||||
|
@ -54,13 +54,13 @@ class TLXbar(policy: TLArbiter.Policy = TLArbiter.lowestIndexFirst) extends Lazy
|
||||
seq(0).copy(
|
||||
minLatency = seq.map(_.minLatency).min,
|
||||
endSinkId = outputIdRanges.map(_.end).max,
|
||||
managers = (outputIdRanges zip seq) flatMap { case (range, port) =>
|
||||
managers = ManagerUnification(seq.flatMap { port =>
|
||||
require (port.beatBytes == seq(0).beatBytes)
|
||||
val fifoIdMapper = fifoIdFactory()
|
||||
port.managers map { manager => manager.copy(
|
||||
fifoId = manager.fifoId.map(fifoIdMapper(_))
|
||||
)}
|
||||
}
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user