fix TileLink arbiters and add memory interconnect and memory selector
This commit is contained in:
parent
cf363b1fe4
commit
3083bbca21
@ -126,3 +126,101 @@ class TileLinkRecursiveInterconnect(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class TileLinkMemoryInterconnect(
|
||||||
|
nBanksPerChannel: Int, nChannels: Int)
|
||||||
|
(implicit p: Parameters) extends TileLinkInterconnect()(p) {
|
||||||
|
|
||||||
|
val nBanks = nBanksPerChannel * nChannels
|
||||||
|
val nInner = nBanks
|
||||||
|
val nOuter = nChannels
|
||||||
|
|
||||||
|
def connectChannel(outer: ClientUncachedTileLinkIO, inner: ClientUncachedTileLinkIO) {
|
||||||
|
outer <> inner
|
||||||
|
outer.acquire.bits.addr_block := inner.acquire.bits.addr_block >> UInt(log2Ceil(nChannels))
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i <- 0 until nChannels) {
|
||||||
|
/* Bank assignments to channels are strided so that consecutive banks
|
||||||
|
* map to different channels. That way, consecutive cache lines also
|
||||||
|
* map to different channels */
|
||||||
|
val banks = (i until nBanks by nChannels).map(j => io.in(j))
|
||||||
|
|
||||||
|
val channelArb = Module(new ClientUncachedTileLinkIOArbiter(nBanksPerChannel))
|
||||||
|
channelArb.io.in <> banks
|
||||||
|
connectChannel(io.out(i), channelArb.io.out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Allows users to switch between various memory configurations. Note that
|
||||||
|
* this is a dangerous operation: not only does switching the select input to
|
||||||
|
* this module violate TileLink, it also causes the memory of the machine to
|
||||||
|
* become garbled. It's expected that select only changes at boot time, as
|
||||||
|
* part of the memory controller configuration. */
|
||||||
|
class TileLinkMemorySelectorIO(val nBanks: Int, val maxMemChannels: Int, nConfigs: Int)
|
||||||
|
(implicit p: Parameters)
|
||||||
|
extends TileLinkInterconnectIO(nBanks, maxMemChannels) {
|
||||||
|
val select = UInt(INPUT, width = log2Up(nConfigs))
|
||||||
|
override def cloneType =
|
||||||
|
new TileLinkMemorySelectorIO(nBanks, maxMemChannels, nConfigs).asInstanceOf[this.type]
|
||||||
|
}
|
||||||
|
|
||||||
|
class TileLinkMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int])
|
||||||
|
(implicit p: Parameters)
|
||||||
|
extends TileLinkInterconnect()(p) {
|
||||||
|
val nInner = nBanks
|
||||||
|
val nOuter = maxMemChannels
|
||||||
|
val nConfigs = configs.size
|
||||||
|
|
||||||
|
override lazy val io = new TileLinkMemorySelectorIO(nBanks, maxMemChannels, nConfigs)
|
||||||
|
|
||||||
|
def muxOnSelect[T <: Data](up: DecoupledIO[T], dn: DecoupledIO[T], active: Bool): Unit = {
|
||||||
|
when (active) { dn.bits := up.bits }
|
||||||
|
when (active) { up.ready := dn.ready }
|
||||||
|
when (active) { dn.valid := up.valid }
|
||||||
|
}
|
||||||
|
|
||||||
|
def muxOnSelect(up: ClientUncachedTileLinkIO, dn: ClientUncachedTileLinkIO, active: Bool): Unit = {
|
||||||
|
muxOnSelect(up.acquire, dn.acquire, active)
|
||||||
|
muxOnSelect(dn.grant, up.grant, active)
|
||||||
|
}
|
||||||
|
|
||||||
|
def muxOnSelect(up: Vec[ClientUncachedTileLinkIO], dn: Vec[ClientUncachedTileLinkIO], active: Bool) : Unit = {
|
||||||
|
for (i <- 0 until up.size)
|
||||||
|
muxOnSelect(up(i), dn(i), active)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Disconnects a vector of TileLink ports, which involves setting them to
|
||||||
|
* invalid. Due to Chisel reasons, we need to also set the bits to 0 (since
|
||||||
|
* there can't be any unconnected inputs). */
|
||||||
|
def disconnectOuter(outer: Vec[ClientUncachedTileLinkIO]) = {
|
||||||
|
outer.foreach{ m =>
|
||||||
|
m.acquire.valid := Bool(false)
|
||||||
|
m.acquire.bits := m.acquire.bits.fromBits(UInt(0))
|
||||||
|
m.grant.ready := Bool(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def disconnectInner(inner: Vec[ClientUncachedTileLinkIO]) = {
|
||||||
|
inner.foreach { m =>
|
||||||
|
m.grant.valid := Bool(false)
|
||||||
|
m.grant.bits := m.grant.bits.fromBits(UInt(0))
|
||||||
|
m.acquire.ready := Bool(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Provides default wires on all our outputs. */
|
||||||
|
disconnectOuter(io.out)
|
||||||
|
disconnectInner(io.in)
|
||||||
|
|
||||||
|
/* Constructs interconnects for each of the layouts suggested by the
|
||||||
|
* configuration and switches between them based on the select input. */
|
||||||
|
configs.zipWithIndex.foreach{ case (nChannels, select) =>
|
||||||
|
val nBanksPerChannel = nBanks / nChannels
|
||||||
|
val ic = Module(new TileLinkMemoryInterconnect(nBanksPerChannel, nChannels))
|
||||||
|
disconnectInner(ic.io.out)
|
||||||
|
disconnectOuter(ic.io.in)
|
||||||
|
muxOnSelect(io.in, ic.io.in, io.select === UInt(select))
|
||||||
|
muxOnSelect(ic.io.out, io.out, io.select === UInt(select))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1150,8 +1150,10 @@ class ClientUncachedTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters)
|
|||||||
val in = Vec(arbN, new ClientUncachedTileLinkIO).flip
|
val in = Vec(arbN, new ClientUncachedTileLinkIO).flip
|
||||||
val out = new ClientUncachedTileLinkIO
|
val out = new ClientUncachedTileLinkIO
|
||||||
}
|
}
|
||||||
|
if (arbN > 1) {
|
||||||
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
||||||
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
||||||
|
} else { io.out <> io.in.head }
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Concrete client-side arbiter that appends the arbiter's port id to client_xact_id */
|
/** Concrete client-side arbiter that appends the arbiter's port id to client_xact_id */
|
||||||
@ -1160,8 +1162,10 @@ class ClientTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends
|
|||||||
val in = Vec(arbN, new ClientTileLinkIO).flip
|
val in = Vec(arbN, new ClientTileLinkIO).flip
|
||||||
val out = new ClientTileLinkIO
|
val out = new ClientTileLinkIO
|
||||||
}
|
}
|
||||||
|
if (arbN > 1) {
|
||||||
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
||||||
hookupClientSourceHeaderless(io.in.map(_.release), io.out.release)
|
hookupClientSourceHeaderless(io.in.map(_.release), io.out.release)
|
||||||
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
||||||
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
||||||
|
} else { io.out <> io.in.head }
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user