reorganize moving non-submodule packages into src/main/scala
This commit is contained in:
196
src/main/scala/uncore/tilelink/Arbiters.scala
Normal file
196
src/main/scala/uncore/tilelink/Arbiters.scala
Normal file
@ -0,0 +1,196 @@
|
||||
package uncore.tilelink
|
||||
import Chisel._
|
||||
import junctions._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
/** Utility functions for constructing TileLinkIO arbiters */
|
||||
trait TileLinkArbiterLike extends HasTileLinkParameters {
|
||||
// Some shorthand type variables
|
||||
type ManagerSourcedWithId = ManagerToClientChannel with HasClientTransactionId
|
||||
type ClientSourcedWithId = ClientToManagerChannel with HasClientTransactionId
|
||||
type ClientSourcedWithIdAndData = ClientToManagerChannel with HasClientTransactionId with HasTileLinkData
|
||||
|
||||
val arbN: Int // The number of ports on the client side
|
||||
|
||||
// These abstract funcs are filled in depending on whether the arbiter mucks with the
|
||||
// outgoing client ids to track sourcing and then needs to revert them on the way back
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int): Bits
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId): Bits
|
||||
def arbIdx(in: ManagerSourcedWithId): UInt
|
||||
|
||||
// The following functions are all wiring helpers for each of the different types of TileLink channels
|
||||
|
||||
def hookupClientSource[M <: ClientSourcedWithIdAndData](
|
||||
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
||||
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
||||
def hasData(m: LogicalNetworkIO[M]) = m.payload.hasMultibeatData()
|
||||
val arb = Module(new LockingRRArbiter(mngr.bits, arbN, tlDataBeats, Some(hasData _)))
|
||||
clts.zipWithIndex.zip(arb.io.in).map{ case ((req, id), arb) => {
|
||||
arb.valid := req.valid
|
||||
arb.bits := req.bits
|
||||
arb.bits.payload.client_xact_id := clientSourcedClientXactId(req.bits.payload, id)
|
||||
req.ready := arb.ready
|
||||
}}
|
||||
mngr <> arb.io.out
|
||||
}
|
||||
|
||||
def hookupClientSourceHeaderless[M <: ClientSourcedWithIdAndData](
|
||||
clts: Seq[DecoupledIO[M]],
|
||||
mngr: DecoupledIO[M]) {
|
||||
def hasData(m: M) = m.hasMultibeatData()
|
||||
val arb = Module(new LockingRRArbiter(mngr.bits, arbN, tlDataBeats, Some(hasData _)))
|
||||
clts.zipWithIndex.zip(arb.io.in).map{ case ((req, id), arb) => {
|
||||
arb.valid := req.valid
|
||||
arb.bits := req.bits
|
||||
arb.bits.client_xact_id := clientSourcedClientXactId(req.bits, id)
|
||||
req.ready := arb.ready
|
||||
}}
|
||||
mngr <> arb.io.out
|
||||
}
|
||||
|
||||
def hookupManagerSourceWithHeader[M <: ManagerToClientChannel](
|
||||
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
||||
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
||||
mngr.ready := Bool(false)
|
||||
for (i <- 0 until arbN) {
|
||||
clts(i).valid := Bool(false)
|
||||
when (mngr.bits.header.dst === UInt(i)) {
|
||||
clts(i).valid := mngr.valid
|
||||
mngr.ready := clts(i).ready
|
||||
}
|
||||
clts(i).bits := mngr.bits
|
||||
}
|
||||
}
|
||||
|
||||
def hookupManagerSourceWithId[M <: ManagerSourcedWithId](
|
||||
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
||||
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
||||
mngr.ready := Bool(false)
|
||||
for (i <- 0 until arbN) {
|
||||
clts(i).valid := Bool(false)
|
||||
when (arbIdx(mngr.bits.payload) === UInt(i)) {
|
||||
clts(i).valid := mngr.valid
|
||||
mngr.ready := clts(i).ready
|
||||
}
|
||||
clts(i).bits := mngr.bits
|
||||
clts(i).bits.payload.client_xact_id := managerSourcedClientXactId(mngr.bits.payload)
|
||||
}
|
||||
}
|
||||
|
||||
def hookupManagerSourceHeaderlessWithId[M <: ManagerSourcedWithId](
|
||||
clts: Seq[DecoupledIO[M]],
|
||||
mngr: DecoupledIO[M]) {
|
||||
mngr.ready := Bool(false)
|
||||
for (i <- 0 until arbN) {
|
||||
clts(i).valid := Bool(false)
|
||||
when (arbIdx(mngr.bits) === UInt(i)) {
|
||||
clts(i).valid := mngr.valid
|
||||
mngr.ready := clts(i).ready
|
||||
}
|
||||
clts(i).bits := mngr.bits
|
||||
clts(i).bits.client_xact_id := managerSourcedClientXactId(mngr.bits)
|
||||
}
|
||||
}
|
||||
|
||||
def hookupManagerSourceBroadcast[M <: Data](clts: Seq[DecoupledIO[M]], mngr: DecoupledIO[M]) {
|
||||
clts.map{ _.valid := mngr.valid }
|
||||
clts.map{ _.bits := mngr.bits }
|
||||
mngr.ready := clts.map(_.ready).reduce(_&&_)
|
||||
}
|
||||
|
||||
def hookupFinish[M <: LogicalNetworkIO[Finish]]( clts: Seq[DecoupledIO[M]], mngr: DecoupledIO[M]) {
|
||||
val arb = Module(new RRArbiter(mngr.bits, arbN))
|
||||
arb.io.in <> clts
|
||||
mngr <> arb.io.out
|
||||
}
|
||||
}
|
||||
|
||||
/** Abstract base case for any Arbiters that have UncachedTileLinkIOs */
|
||||
abstract class UncachedTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module
|
||||
with TileLinkArbiterLike {
|
||||
val io = new Bundle {
|
||||
val in = Vec(arbN, new UncachedTileLinkIO).flip
|
||||
val out = new UncachedTileLinkIO
|
||||
}
|
||||
hookupClientSource(io.in.map(_.acquire), io.out.acquire)
|
||||
hookupFinish(io.in.map(_.finish), io.out.finish)
|
||||
hookupManagerSourceWithId(io.in.map(_.grant), io.out.grant)
|
||||
}
|
||||
|
||||
/** Abstract base case for any Arbiters that have cached TileLinkIOs */
|
||||
abstract class TileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module
|
||||
with TileLinkArbiterLike {
|
||||
val io = new Bundle {
|
||||
val in = Vec(arbN, new TileLinkIO).flip
|
||||
val out = new TileLinkIO
|
||||
}
|
||||
hookupClientSource(io.in.map(_.acquire), io.out.acquire)
|
||||
hookupClientSource(io.in.map(_.release), io.out.release)
|
||||
hookupFinish(io.in.map(_.finish), io.out.finish)
|
||||
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
||||
hookupManagerSourceWithId(io.in.map(_.grant), io.out.grant)
|
||||
}
|
||||
|
||||
/** Appends the port index of the arbiter to the client_xact_id */
|
||||
trait AppendsArbiterId extends TileLinkArbiterLike {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) =
|
||||
Cat(in.client_xact_id, UInt(id, log2Up(arbN)))
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) = {
|
||||
/* This shouldn't be necessary, but Chisel3 doesn't emit correct Verilog
|
||||
* when right shifting by too many bits. See
|
||||
* https://github.com/ucb-bar/firrtl/issues/69 */
|
||||
if (in.client_xact_id.getWidth > log2Up(arbN))
|
||||
in.client_xact_id >> log2Up(arbN)
|
||||
else
|
||||
UInt(0)
|
||||
}
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id(log2Up(arbN)-1,0)
|
||||
}
|
||||
|
||||
/** Uses the client_xact_id as is (assumes it has been set to port index) */
|
||||
trait PassesId extends TileLinkArbiterLike {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = in.client_xact_id
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
}
|
||||
|
||||
/** Overwrites some default client_xact_id with the port idx */
|
||||
trait UsesNewId extends TileLinkArbiterLike {
|
||||
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = UInt(id, log2Up(arbN))
|
||||
def managerSourcedClientXactId(in: ManagerSourcedWithId) = UInt(0)
|
||||
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
||||
}
|
||||
|
||||
// Now we can mix-in thevarious id-generation traits to make concrete arbiter classes
|
||||
class UncachedTileLinkIOArbiterThatAppendsArbiterId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with AppendsArbiterId
|
||||
class UncachedTileLinkIOArbiterThatPassesId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with PassesId
|
||||
class UncachedTileLinkIOArbiterThatUsesNewId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with UsesNewId
|
||||
class TileLinkIOArbiterThatAppendsArbiterId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with AppendsArbiterId
|
||||
class TileLinkIOArbiterThatPassesId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with PassesId
|
||||
class TileLinkIOArbiterThatUsesNewId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with UsesNewId
|
||||
|
||||
/** Concrete uncached client-side arbiter that appends the arbiter's port id to client_xact_id */
|
||||
class ClientUncachedTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module with TileLinkArbiterLike with AppendsArbiterId {
|
||||
val io = new Bundle {
|
||||
val in = Vec(arbN, new ClientUncachedTileLinkIO).flip
|
||||
val out = new ClientUncachedTileLinkIO
|
||||
}
|
||||
if (arbN > 1) {
|
||||
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
||||
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
||||
} else { io.out <> io.in.head }
|
||||
}
|
||||
|
||||
/** Concrete client-side arbiter that appends the arbiter's port id to client_xact_id */
|
||||
class ClientTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module with TileLinkArbiterLike with AppendsArbiterId {
|
||||
val io = new Bundle {
|
||||
val in = Vec(arbN, new ClientTileLinkIO).flip
|
||||
val out = new ClientTileLinkIO
|
||||
}
|
||||
if (arbN > 1) {
|
||||
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
||||
hookupClientSourceHeaderless(io.in.map(_.release), io.out.release)
|
||||
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
||||
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
||||
} else { io.out <> io.in.head }
|
||||
}
|
971
src/main/scala/uncore/tilelink/Definitions.scala
Normal file
971
src/main/scala/uncore/tilelink/Definitions.scala
Normal file
@ -0,0 +1,971 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore.tilelink
|
||||
import Chisel._
|
||||
import junctions._
|
||||
import uncore.coherence.CoherencePolicy
|
||||
import uncore.util._
|
||||
import scala.math.max
|
||||
import uncore.constants._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
case object CacheBlockOffsetBits extends Field[Int]
|
||||
case object AmoAluOperandBits extends Field[Int]
|
||||
|
||||
case object TLId extends Field[String]
|
||||
case class TLKey(id: String) extends Field[TileLinkParameters]
|
||||
|
||||
/** Parameters exposed to the top-level design, set based on
|
||||
* external requirements or design space exploration
|
||||
*
|
||||
* Coherency policy used to define custom mesage types
|
||||
* Number of manager agents
|
||||
* Number of client agents that cache data and use custom [[uncore.Acquire]] types
|
||||
* Number of client agents that do not cache data and use built-in [[uncore.Acquire]] types
|
||||
* Maximum number of unique outstanding transactions per client
|
||||
* Maximum number of clients multiplexed onto a single port
|
||||
* Maximum number of unique outstanding transactions per manager
|
||||
* Width of cache block addresses
|
||||
* Total amount of data per cache block
|
||||
* Number of data beats per cache block
|
||||
**/
|
||||
|
||||
case class TileLinkParameters(
|
||||
coherencePolicy: CoherencePolicy,
|
||||
nManagers: Int,
|
||||
nCachingClients: Int,
|
||||
nCachelessClients: Int,
|
||||
maxClientXacts: Int,
|
||||
maxClientsPerPort: Int,
|
||||
maxManagerXacts: Int,
|
||||
dataBits: Int,
|
||||
dataBeats: Int = 4,
|
||||
overrideDataBitsPerBeat: Option[Int] = None
|
||||
) {
|
||||
val nClients = nCachingClients + nCachelessClients
|
||||
val writeMaskBits: Int = ((dataBits / dataBeats) - 1) / 8 + 1
|
||||
val dataBitsPerBeat: Int = overrideDataBitsPerBeat.getOrElse(dataBits / dataBeats)
|
||||
}
|
||||
|
||||
|
||||
/** Utility trait for building Modules and Bundles that use TileLink parameters */
|
||||
trait HasTileLinkParameters {
|
||||
implicit val p: Parameters
|
||||
val tlExternal = p(TLKey(p(TLId)))
|
||||
val tlCoh = tlExternal.coherencePolicy
|
||||
val tlNManagers = tlExternal.nManagers
|
||||
val tlNCachingClients = tlExternal.nCachingClients
|
||||
val tlNCachelessClients = tlExternal.nCachelessClients
|
||||
val tlNClients = tlExternal.nClients
|
||||
val tlClientIdBits = log2Up(tlNClients)
|
||||
val tlManagerIdBits = log2Up(tlNManagers)
|
||||
val tlMaxClientXacts = tlExternal.maxClientXacts
|
||||
val tlMaxClientsPerPort = tlExternal.maxClientsPerPort
|
||||
val tlMaxManagerXacts = tlExternal.maxManagerXacts
|
||||
val tlClientXactIdBits = log2Up(tlMaxClientXacts*tlMaxClientsPerPort)
|
||||
val tlManagerXactIdBits = log2Up(tlMaxManagerXacts)
|
||||
val tlBlockAddrBits = p(PAddrBits) - p(CacheBlockOffsetBits)
|
||||
val tlDataBeats = tlExternal.dataBeats
|
||||
val tlDataBits = tlExternal.dataBitsPerBeat
|
||||
val tlDataBytes = tlDataBits/8
|
||||
val tlWriteMaskBits = tlExternal.writeMaskBits
|
||||
val tlBeatAddrBits = log2Up(tlDataBeats)
|
||||
val tlByteAddrBits = log2Up(tlWriteMaskBits)
|
||||
val tlMemoryOpcodeBits = M_SZ
|
||||
val tlMemoryOperandSizeBits = log2Ceil(log2Ceil(tlWriteMaskBits) + 1)
|
||||
val tlAcquireTypeBits = max(log2Up(Acquire.nBuiltInTypes),
|
||||
tlCoh.acquireTypeWidth)
|
||||
val tlAcquireUnionBits = max(tlWriteMaskBits,
|
||||
(tlByteAddrBits +
|
||||
tlMemoryOperandSizeBits +
|
||||
tlMemoryOpcodeBits)) + 1
|
||||
val tlGrantTypeBits = max(log2Up(Grant.nBuiltInTypes),
|
||||
tlCoh.grantTypeWidth) + 1
|
||||
/** Whether the underlying physical network preserved point-to-point ordering of messages */
|
||||
val tlNetworkPreservesPointToPointOrdering = false
|
||||
val tlNetworkDoesNotInterleaveBeats = true
|
||||
val amoAluOperandBits = p(AmoAluOperandBits)
|
||||
val amoAluOperandBytes = amoAluOperandBits/8
|
||||
}
|
||||
|
||||
abstract class TLModule(implicit val p: Parameters) extends Module
|
||||
with HasTileLinkParameters
|
||||
abstract class TLBundle(implicit val p: Parameters) extends junctions.ParameterizedBundle()(p)
|
||||
with HasTileLinkParameters
|
||||
|
||||
/** Base trait for all TileLink channels */
|
||||
abstract class TileLinkChannel(implicit p: Parameters) extends TLBundle()(p) {
|
||||
def hasData(dummy: Int = 0): Bool
|
||||
def hasMultibeatData(dummy: Int = 0): Bool
|
||||
}
|
||||
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
||||
abstract class ClientToManagerChannel(implicit p: Parameters) extends TileLinkChannel()(p)
|
||||
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
||||
abstract class ManagerToClientChannel(implicit p: Parameters) extends TileLinkChannel()(p)
|
||||
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
||||
abstract class ClientToClientChannel(implicit p: Parameters) extends TileLinkChannel()(p) // Unused for now
|
||||
|
||||
/** Common signals that are used in multiple channels.
|
||||
* These traits are useful for type parameterizing bundle wiring functions.
|
||||
*/
|
||||
|
||||
/** Address of a cache block. */
|
||||
trait HasCacheBlockAddress extends HasTileLinkParameters {
|
||||
val addr_block = UInt(width = tlBlockAddrBits)
|
||||
|
||||
def conflicts(that: HasCacheBlockAddress) = this.addr_block === that.addr_block
|
||||
def conflicts(addr: UInt) = this.addr_block === addr
|
||||
}
|
||||
|
||||
/** Sub-block address or beat id of multi-beat data */
|
||||
trait HasTileLinkBeatId extends HasTileLinkParameters {
|
||||
val addr_beat = UInt(width = tlBeatAddrBits)
|
||||
}
|
||||
|
||||
/* Client-side transaction id. Usually Miss Status Handling Register File index */
|
||||
trait HasClientTransactionId extends HasTileLinkParameters {
|
||||
val client_xact_id = Bits(width = tlClientXactIdBits)
|
||||
}
|
||||
|
||||
/** Manager-side transaction id. Usually Transaction Status Handling Register File index. */
|
||||
trait HasManagerTransactionId extends HasTileLinkParameters {
|
||||
val manager_xact_id = Bits(width = tlManagerXactIdBits)
|
||||
}
|
||||
|
||||
/** A single beat of cache block data */
|
||||
trait HasTileLinkData extends HasTileLinkBeatId {
|
||||
val data = UInt(width = tlDataBits)
|
||||
|
||||
def hasData(dummy: Int = 0): Bool
|
||||
def hasMultibeatData(dummy: Int = 0): Bool
|
||||
def first(dummy: Int = 0): Bool = !hasMultibeatData() || addr_beat === UInt(0)
|
||||
def last(dummy: Int = 0): Bool = !hasMultibeatData() || addr_beat === UInt(tlDataBeats-1)
|
||||
}
|
||||
|
||||
/** An entire cache block of data */
|
||||
trait HasTileLinkBlock extends HasTileLinkParameters {
|
||||
val data_buffer = Vec(tlDataBeats, UInt(width = tlDataBits))
|
||||
val wmask_buffer = Vec(tlDataBeats, UInt(width = tlWriteMaskBits))
|
||||
}
|
||||
|
||||
/** The id of a client source or destination. Used in managers. */
|
||||
trait HasClientId extends HasTileLinkParameters {
|
||||
val client_id = UInt(width = tlClientIdBits)
|
||||
}
|
||||
|
||||
trait HasManagerId extends HasTileLinkParameters {
|
||||
val manager_id = UInt(width = tlManagerIdBits)
|
||||
}
|
||||
|
||||
trait HasAcquireUnion extends HasTileLinkParameters {
|
||||
val union = Bits(width = tlAcquireUnionBits)
|
||||
|
||||
// Utility funcs for accessing subblock union:
|
||||
def isBuiltInType(t: UInt): Bool
|
||||
val opCodeOff = 1
|
||||
val opSizeOff = tlMemoryOpcodeBits + opCodeOff
|
||||
val addrByteOff = tlMemoryOperandSizeBits + opSizeOff
|
||||
val addrByteMSB = tlByteAddrBits + addrByteOff
|
||||
/** Hint whether to allocate the block in any interveneing caches */
|
||||
def allocate(dummy: Int = 0) = union(0)
|
||||
/** Op code for [[uncore.PutAtomic]] operations */
|
||||
def op_code(dummy: Int = 0) = Mux(
|
||||
isBuiltInType(Acquire.putType) || isBuiltInType(Acquire.putBlockType),
|
||||
M_XWR, union(opSizeOff-1, opCodeOff))
|
||||
/** Operand size for [[uncore.PutAtomic]] */
|
||||
def op_size(dummy: Int = 0) = union(addrByteOff-1, opSizeOff)
|
||||
/** Byte address for [[uncore.PutAtomic]] operand */
|
||||
def addr_byte(dummy: Int = 0) = union(addrByteMSB-1, addrByteOff)
|
||||
def amo_offset(dummy: Int = 0) =
|
||||
if (tlByteAddrBits > log2Up(amoAluOperandBytes)) addr_byte()(tlByteAddrBits-1, log2Up(amoAluOperandBytes))
|
||||
else UInt(0)
|
||||
/** Bit offset of [[uncore.PutAtomic]] operand */
|
||||
def amo_shift_bytes(dummy: Int = 0) = UInt(amoAluOperandBytes)*amo_offset()
|
||||
/** Write mask for [[uncore.Put]], [[uncore.PutBlock]], [[uncore.PutAtomic]] */
|
||||
def wmask(dummy: Int = 0): UInt = {
|
||||
val is_amo = isBuiltInType(Acquire.putAtomicType)
|
||||
val amo_mask = if (tlByteAddrBits > log2Up(amoAluOperandBytes))
|
||||
FillInterleaved(amoAluOperandBytes, UIntToOH(amo_offset()))
|
||||
else Acquire.fullWriteMask
|
||||
val is_put = isBuiltInType(Acquire.putBlockType) || isBuiltInType(Acquire.putType)
|
||||
val put_mask = union(tlWriteMaskBits, 1)
|
||||
Mux(is_amo, amo_mask, Mux(is_put, put_mask, UInt(0)))
|
||||
}
|
||||
/** Full, beat-sized writemask */
|
||||
def full_wmask(dummy: Int = 0) = FillInterleaved(8, wmask())
|
||||
|
||||
/** Is this message a built-in read message */
|
||||
def hasPartialWritemask(dummy: Int = 0): Bool = wmask() =/= Acquire.fullWriteMask
|
||||
|
||||
}
|
||||
|
||||
trait HasAcquireType extends HasTileLinkParameters {
|
||||
val is_builtin_type = Bool()
|
||||
val a_type = UInt(width = tlAcquireTypeBits)
|
||||
|
||||
/** Message type equality */
|
||||
def is(t: UInt) = a_type === t //TODO: make this more opaque; def ===?
|
||||
|
||||
/** Is this message a built-in or custom type */
|
||||
def isBuiltInType(dummy: Int = 0): Bool = is_builtin_type
|
||||
/** Is this message a particular built-in type */
|
||||
def isBuiltInType(t: UInt): Bool = is_builtin_type && a_type === t
|
||||
|
||||
/** Does this message refer to subblock operands using info in the Acquire.union subbundle */
|
||||
def isSubBlockType(dummy: Int = 0): Bool = isBuiltInType() && a_type.isOneOf(Acquire.typesOnSubBlocks)
|
||||
|
||||
/** Is this message a built-in prefetch message */
|
||||
def isPrefetch(dummy: Int = 0): Bool = isBuiltInType() &&
|
||||
(is(Acquire.getPrefetchType) || is(Acquire.putPrefetchType))
|
||||
|
||||
/** Is this message a built-in atomic message */
|
||||
def isAtomic(dummy: Int = 0): Bool = isBuiltInType() && is(Acquire.putAtomicType)
|
||||
|
||||
/** Is this message a built-in read message */
|
||||
def isGet(dummy: Int = 0): Bool = isBuiltInType() && (is(Acquire.getType) || is(Acquire.getBlockType))
|
||||
|
||||
/** Does this message contain data? Assumes that no custom message types have data. */
|
||||
def hasData(dummy: Int = 0): Bool = isBuiltInType() && a_type.isOneOf(Acquire.typesWithData)
|
||||
|
||||
/** Does this message contain multiple beats of data? Assumes that no custom message types have data. */
|
||||
def hasMultibeatData(dummy: Int = 0): Bool = Bool(tlDataBeats > 1) && isBuiltInType() &&
|
||||
a_type.isOneOf(Acquire.typesWithMultibeatData)
|
||||
|
||||
/** Mapping between each built-in Acquire type and a built-in Grant type. */
|
||||
def getBuiltInGrantType(dummy: Int = 0): UInt = Acquire.getBuiltInGrantType(this.a_type)
|
||||
}
|
||||
|
||||
trait HasProbeType extends HasTileLinkParameters {
|
||||
val p_type = UInt(width = tlCoh.probeTypeWidth)
|
||||
|
||||
def is(t: UInt) = p_type === t
|
||||
def hasData(dummy: Int = 0) = Bool(false)
|
||||
def hasMultibeatData(dummy: Int = 0) = Bool(false)
|
||||
}
|
||||
|
||||
trait MightBeVoluntary {
|
||||
def isVoluntary(dummy: Int = 0): Bool
|
||||
}
|
||||
|
||||
trait HasReleaseType extends HasTileLinkParameters with MightBeVoluntary {
|
||||
val voluntary = Bool()
|
||||
val r_type = UInt(width = tlCoh.releaseTypeWidth)
|
||||
|
||||
def is(t: UInt) = r_type === t
|
||||
def hasData(dummy: Int = 0) = r_type.isOneOf(tlCoh.releaseTypesWithData)
|
||||
def hasMultibeatData(dummy: Int = 0) = Bool(tlDataBeats > 1) &&
|
||||
r_type.isOneOf(tlCoh.releaseTypesWithData)
|
||||
def isVoluntary(dummy: Int = 0) = voluntary
|
||||
def requiresAck(dummy: Int = 0) = !Bool(tlNetworkPreservesPointToPointOrdering)
|
||||
}
|
||||
|
||||
trait HasGrantType extends HasTileLinkParameters with MightBeVoluntary {
|
||||
val is_builtin_type = Bool()
|
||||
val g_type = UInt(width = tlGrantTypeBits)
|
||||
|
||||
// Helper funcs
|
||||
def isBuiltInType(dummy: Int = 0): Bool = is_builtin_type
|
||||
def isBuiltInType(t: UInt): Bool = is_builtin_type && g_type === t
|
||||
def is(t: UInt):Bool = g_type === t
|
||||
def hasData(dummy: Int = 0): Bool = Mux(isBuiltInType(),
|
||||
g_type.isOneOf(Grant.typesWithData),
|
||||
g_type.isOneOf(tlCoh.grantTypesWithData))
|
||||
def hasMultibeatData(dummy: Int = 0): Bool =
|
||||
Bool(tlDataBeats > 1) && Mux(isBuiltInType(),
|
||||
g_type.isOneOf(Grant.typesWithMultibeatData),
|
||||
g_type.isOneOf(tlCoh.grantTypesWithData))
|
||||
def isVoluntary(dummy: Int = 0): Bool = isBuiltInType() && (g_type === Grant.voluntaryAckType)
|
||||
def requiresAck(dummy: Int = 0): Bool = !Bool(tlNetworkPreservesPointToPointOrdering) && !isVoluntary()
|
||||
}
|
||||
|
||||
/** TileLink channel bundle definitions */
|
||||
|
||||
/** The Acquire channel is used to intiate coherence protocol transactions in
|
||||
* order to gain access to a cache block's data with certain permissions
|
||||
* enabled. Messages sent over this channel may be custom types defined by
|
||||
* a [[uncore.CoherencePolicy]] for cached data accesse or may be built-in types
|
||||
* used for uncached data accesses. Acquires may contain data for Put or
|
||||
* PutAtomic built-in types. After sending an Acquire, clients must
|
||||
* wait for a manager to send them a [[uncore.Grant]] message in response.
|
||||
*/
|
||||
class AcquireMetadata(implicit p: Parameters) extends ClientToManagerChannel
|
||||
with HasCacheBlockAddress
|
||||
with HasClientTransactionId
|
||||
with HasTileLinkBeatId
|
||||
with HasAcquireType
|
||||
with HasAcquireUnion {
|
||||
/** Complete physical address for block, beat or operand */
|
||||
def full_addr(dummy: Int = 0) =
|
||||
Cat(this.addr_block, this.addr_beat,
|
||||
Mux(isBuiltInType() && this.a_type.isOneOf(Acquire.typesWithAddrByte),
|
||||
this.addr_byte(), UInt(0, tlByteAddrBits)))
|
||||
}
|
||||
|
||||
/** [[uncore.AcquireMetadata]] with an extra field containing the data beat */
|
||||
class Acquire(implicit p: Parameters) extends AcquireMetadata
|
||||
with HasTileLinkData
|
||||
|
||||
/** [[uncore.AcquireMetadata]] with an extra field containing the entire cache block */
|
||||
class BufferedAcquire(implicit p: Parameters) extends AcquireMetadata
|
||||
with HasTileLinkBlock
|
||||
|
||||
/** [[uncore.Acquire]] with an extra field stating its source id */
|
||||
class AcquireFromSrc(implicit p: Parameters) extends Acquire
|
||||
with HasClientId
|
||||
|
||||
/** [[uncore.BufferedAcquire]] with an extra field stating its source id */
|
||||
class BufferedAcquireFromSrc(implicit p: Parameters) extends BufferedAcquire
|
||||
with HasClientId
|
||||
|
||||
/** Used to track metadata for transactions where multiple secondary misses have been merged
|
||||
* and handled by a single transaction tracker.
|
||||
*/
|
||||
class SecondaryMissInfo(implicit p: Parameters) extends TLBundle
|
||||
with HasClientTransactionId
|
||||
with HasTileLinkBeatId
|
||||
with HasClientId
|
||||
with HasAcquireType
|
||||
|
||||
/** Contains definitions of the the built-in Acquire types and a factory
|
||||
* for [[uncore.Acquire]]
|
||||
*
|
||||
* In general you should avoid using this factory directly and use
|
||||
* [[uncore.ClientMetadata.makeAcquire]] for custom cached Acquires and
|
||||
* [[uncore.Get]], [[uncore.Put]], etc. for built-in uncached Acquires.
|
||||
*
|
||||
* @param is_builtin_type built-in or custom type message?
|
||||
* @param a_type built-in type enum or custom type enum
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat sub-block address (which beat)
|
||||
* @param data data being put outwards
|
||||
* @param union additional fields used for uncached types
|
||||
*/
|
||||
object Acquire {
|
||||
val nBuiltInTypes = 7
|
||||
//TODO: Use Enum
|
||||
def getType = UInt("b000") // Get a single beat of data
|
||||
def getBlockType = UInt("b001") // Get a whole block of data
|
||||
def putType = UInt("b010") // Put a single beat of data
|
||||
def putBlockType = UInt("b011") // Put a whole block of data
|
||||
def putAtomicType = UInt("b100") // Perform an atomic memory op
|
||||
def getPrefetchType = UInt("b101") // Prefetch a whole block of data
|
||||
def putPrefetchType = UInt("b110") // Prefetch a whole block of data, with intent to write
|
||||
def typesWithData = Vec(putType, putBlockType, putAtomicType)
|
||||
def typesWithMultibeatData = Vec(putBlockType)
|
||||
def typesOnSubBlocks = Vec(putType, getType, putAtomicType)
|
||||
def typesWithAddrByte = Vec(getType, putAtomicType)
|
||||
|
||||
/** Mapping between each built-in Acquire type and a built-in Grant type. */
|
||||
def getBuiltInGrantType(a_type: UInt): UInt = {
|
||||
MuxLookup(a_type, Grant.putAckType, Array(
|
||||
Acquire.getType -> Grant.getDataBeatType,
|
||||
Acquire.getBlockType -> Grant.getDataBlockType,
|
||||
Acquire.putType -> Grant.putAckType,
|
||||
Acquire.putBlockType -> Grant.putAckType,
|
||||
Acquire.putAtomicType -> Grant.getDataBeatType,
|
||||
Acquire.getPrefetchType -> Grant.prefetchAckType,
|
||||
Acquire.putPrefetchType -> Grant.prefetchAckType))
|
||||
}
|
||||
|
||||
def makeUnion(
|
||||
a_type: UInt,
|
||||
addr_byte: UInt,
|
||||
operand_size: UInt,
|
||||
opcode: UInt,
|
||||
wmask: UInt,
|
||||
alloc: Bool)
|
||||
(implicit p: Parameters): UInt = {
|
||||
|
||||
val tlExternal = p(TLKey(p(TLId)))
|
||||
val tlWriteMaskBits = tlExternal.writeMaskBits
|
||||
val tlByteAddrBits = log2Up(tlWriteMaskBits)
|
||||
val tlMemoryOperandSizeBits = log2Ceil(log2Ceil(tlWriteMaskBits) + 1)
|
||||
|
||||
// These had better be the right size when we cat them together!
|
||||
val my_addr_byte = (UInt(0, tlByteAddrBits) | addr_byte)(tlByteAddrBits-1, 0)
|
||||
val my_operand_size = (UInt(0, tlMemoryOperandSizeBits) | operand_size)(tlMemoryOperandSizeBits-1, 0)
|
||||
val my_opcode = (UInt(0, M_SZ) | opcode)(M_SZ-1, 0)
|
||||
val my_wmask = (UInt(0, tlWriteMaskBits) | wmask)(tlWriteMaskBits-1, 0)
|
||||
|
||||
MuxLookup(a_type, UInt(0), Array(
|
||||
Acquire.getType -> Cat(my_addr_byte, my_operand_size, my_opcode, alloc),
|
||||
Acquire.getBlockType -> Cat(my_operand_size, my_opcode, alloc),
|
||||
Acquire.putType -> Cat(my_wmask, alloc),
|
||||
Acquire.putBlockType -> Cat(my_wmask, alloc),
|
||||
Acquire.putAtomicType -> Cat(my_addr_byte, my_operand_size, my_opcode, alloc),
|
||||
Acquire.getPrefetchType -> Cat(M_XRD, alloc),
|
||||
Acquire.putPrefetchType -> Cat(M_XWR, alloc)))
|
||||
}
|
||||
|
||||
def fullWriteMask(implicit p: Parameters) = SInt(-1, width = p(TLKey(p(TLId))).writeMaskBits).asUInt
|
||||
def fullOperandSize(implicit p: Parameters) = {
|
||||
val dataBits = p(TLKey(p(TLId))).dataBitsPerBeat
|
||||
UInt(log2Ceil(dataBits / 8))
|
||||
}
|
||||
|
||||
// Most generic constructor
|
||||
def apply(
|
||||
is_builtin_type: Bool,
|
||||
a_type: Bits,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0),
|
||||
union: UInt = UInt(0))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
val acq = Wire(new Acquire)
|
||||
acq.is_builtin_type := is_builtin_type
|
||||
acq.a_type := a_type
|
||||
acq.client_xact_id := client_xact_id
|
||||
acq.addr_block := addr_block
|
||||
acq.addr_beat := addr_beat
|
||||
acq.data := data
|
||||
acq.union := union
|
||||
acq
|
||||
}
|
||||
|
||||
// Copy constructor
|
||||
def apply(a: Acquire): Acquire = {
|
||||
val acq = Wire(new Acquire()(a.p))
|
||||
acq := a
|
||||
acq
|
||||
}
|
||||
}
|
||||
|
||||
object BuiltInAcquireBuilder {
|
||||
def apply(
|
||||
a_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0),
|
||||
addr_byte: UInt = UInt(0),
|
||||
operand_size: UInt = UInt(0),
|
||||
opcode: UInt = UInt(0),
|
||||
wmask: UInt = UInt(0),
|
||||
alloc: Bool = Bool(true))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
Acquire(
|
||||
is_builtin_type = Bool(true),
|
||||
a_type = a_type,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data,
|
||||
union = Acquire.makeUnion(a_type, addr_byte, operand_size, opcode, wmask, alloc))
|
||||
}
|
||||
}
|
||||
|
||||
/** Get a single beat of data from the outer memory hierarchy
|
||||
*
|
||||
* The client can hint whether he block containing this beat should be
|
||||
* allocated in the intervening levels of the hierarchy.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat sub-block address (which beat)
|
||||
* @param addr_byte sub-block address (which byte)
|
||||
* @param operand_size {byte, half, word, double} from [[uncore.MemoryOpConstants]]
|
||||
* @param alloc hint whether the block should be allocated in intervening caches
|
||||
*/
|
||||
object Get {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
alloc: Bool = Bool(true))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.getType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
operand_size = Acquire.fullOperandSize,
|
||||
opcode = M_XRD,
|
||||
alloc = alloc)
|
||||
}
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
addr_byte: UInt,
|
||||
operand_size: UInt,
|
||||
alloc: Bool)
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.getType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
addr_byte = addr_byte,
|
||||
operand_size = operand_size,
|
||||
opcode = M_XRD,
|
||||
alloc = alloc)
|
||||
}
|
||||
}
|
||||
|
||||
/** Get a whole cache block of data from the outer memory hierarchy
|
||||
*
|
||||
* The client can hint whether the block should be allocated in the
|
||||
* intervening levels of the hierarchy.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param alloc hint whether the block should be allocated in intervening caches
|
||||
*/
|
||||
object GetBlock {
|
||||
def apply(
|
||||
client_xact_id: UInt = UInt(0),
|
||||
addr_block: UInt,
|
||||
alloc: Bool = Bool(true))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.getBlockType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
operand_size = Acquire.fullOperandSize,
|
||||
opcode = M_XRD,
|
||||
alloc = alloc)
|
||||
}
|
||||
}
|
||||
|
||||
/** Prefetch a cache block into the next-outermost level of the memory hierarchy
|
||||
* with read permissions.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
*/
|
||||
object GetPrefetch {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt)
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.getPrefetchType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block)
|
||||
}
|
||||
}
|
||||
|
||||
/** Put a single beat of data into the outer memory hierarchy
|
||||
*
|
||||
* The block will be allocated in the next-outermost level of the hierarchy.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat sub-block address (which beat)
|
||||
* @param data data being refilled to the original requestor
|
||||
* @param wmask per-byte write mask for this beat
|
||||
* @param alloc hint whether the block should be allocated in intervening caches
|
||||
*/
|
||||
object Put {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt,
|
||||
wmask: Option[UInt]= None,
|
||||
alloc: Bool = Bool(true))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.putType,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
client_xact_id = client_xact_id,
|
||||
data = data,
|
||||
wmask = wmask.getOrElse(Acquire.fullWriteMask),
|
||||
alloc = alloc)
|
||||
}
|
||||
}
|
||||
|
||||
/** Put a whole cache block of data into the outer memory hierarchy
|
||||
*
|
||||
* If the write mask is not full, the block will be allocated in the
|
||||
* next-outermost level of the hierarchy. If the write mask is full, the
|
||||
* client can hint whether the block should be allocated or not.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat sub-block address (which beat of several)
|
||||
* @param data data being refilled to the original requestor
|
||||
* @param wmask per-byte write mask for this beat
|
||||
* @param alloc hint whether the block should be allocated in intervening caches
|
||||
*/
|
||||
object PutBlock {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt,
|
||||
wmask: Option[UInt] = None,
|
||||
alloc: Bool = Bool(true))
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.putBlockType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data,
|
||||
wmask = wmask.getOrElse(Acquire.fullWriteMask),
|
||||
alloc = alloc)
|
||||
}
|
||||
}
|
||||
|
||||
/** Prefetch a cache block into the next-outermost level of the memory hierarchy
|
||||
* with write permissions.
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
*/
|
||||
object PutPrefetch {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt)
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.putPrefetchType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block)
|
||||
}
|
||||
}
|
||||
|
||||
/** Perform an atomic memory operation in the next-outermost level of the memory hierarchy
|
||||
*
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat sub-block address (within which beat)
|
||||
* @param addr_byte sub-block address (which byte)
|
||||
* @param atomic_opcode {swap, add, xor, and, min, max, minu, maxu} from [[uncore.MemoryOpConstants]]
|
||||
* @param operand_size {byte, half, word, double} from [[uncore.MemoryOpConstants]]
|
||||
* @param data source operand data
|
||||
*/
|
||||
object PutAtomic {
|
||||
def apply(
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
addr_byte: UInt,
|
||||
atomic_opcode: UInt,
|
||||
operand_size: UInt,
|
||||
data: UInt)
|
||||
(implicit p: Parameters): Acquire = {
|
||||
BuiltInAcquireBuilder(
|
||||
a_type = Acquire.putAtomicType,
|
||||
client_xact_id = client_xact_id,
|
||||
addr_block = addr_block,
|
||||
addr_beat = addr_beat,
|
||||
data = data,
|
||||
addr_byte = addr_byte,
|
||||
operand_size = operand_size,
|
||||
opcode = atomic_opcode)
|
||||
}
|
||||
}
|
||||
|
||||
/** The Probe channel is used to force clients to release data or cede permissions
|
||||
* on a cache block. Clients respond to Probes with [[uncore.Release]] messages.
|
||||
* The available types of Probes are customized by a particular
|
||||
* [[uncore.CoherencePolicy]].
|
||||
*/
|
||||
class Probe(implicit p: Parameters) extends ManagerToClientChannel
|
||||
with HasCacheBlockAddress
|
||||
with HasProbeType
|
||||
|
||||
/** [[uncore.Probe]] with an extra field stating its destination id */
|
||||
class ProbeToDst(implicit p: Parameters) extends Probe()(p) with HasClientId
|
||||
|
||||
/** Contains factories for [[uncore.Probe]] and [[uncore.ProbeToDst]]
|
||||
*
|
||||
* In general you should avoid using these factories directly and use
|
||||
* [[uncore.ManagerMetadata.makeProbe(UInt,Acquire)* makeProbe]] instead.
|
||||
*
|
||||
* @param dst id of client to which probe should be sent
|
||||
* @param p_type custom probe type
|
||||
* @param addr_block address of the cache block
|
||||
*/
|
||||
object Probe {
|
||||
def apply(p_type: UInt, addr_block: UInt)(implicit p: Parameters): Probe = {
|
||||
val prb = Wire(new Probe)
|
||||
prb.p_type := p_type
|
||||
prb.addr_block := addr_block
|
||||
prb
|
||||
}
|
||||
def apply(dst: UInt, p_type: UInt, addr_block: UInt)(implicit p: Parameters): ProbeToDst = {
|
||||
val prb = Wire(new ProbeToDst)
|
||||
prb.client_id := dst
|
||||
prb.p_type := p_type
|
||||
prb.addr_block := addr_block
|
||||
prb
|
||||
}
|
||||
}
|
||||
|
||||
/** The Release channel is used to release data or permission back to the manager
|
||||
* in response to [[uncore.Probe]] messages. It can also be used to voluntarily
|
||||
* write back data, for example in the event that dirty data must be evicted on
|
||||
* a cache miss. The available types of Release messages are always customized by
|
||||
* a particular [[uncore.CoherencePolicy]]. Releases may contain data or may be
|
||||
* simple acknowledgements. Voluntary Releases are acknowledged with [[uncore.Grant Grants]].
|
||||
*/
|
||||
class ReleaseMetadata(implicit p: Parameters) extends ClientToManagerChannel
|
||||
with HasTileLinkBeatId
|
||||
with HasCacheBlockAddress
|
||||
with HasClientTransactionId
|
||||
with HasReleaseType {
|
||||
def full_addr(dummy: Int = 0) = Cat(this.addr_block, this.addr_beat, UInt(0, width = tlByteAddrBits))
|
||||
}
|
||||
|
||||
/** [[uncore.ReleaseMetadata]] with an extra field containing the data beat */
|
||||
class Release(implicit p: Parameters) extends ReleaseMetadata
|
||||
with HasTileLinkData
|
||||
|
||||
/** [[uncore.ReleaseMetadata]] with an extra field containing the entire cache block */
|
||||
class BufferedRelease(implicit p: Parameters) extends ReleaseMetadata
|
||||
with HasTileLinkBlock
|
||||
|
||||
/** [[uncore.Release]] with an extra field stating its source id */
|
||||
class ReleaseFromSrc(implicit p: Parameters) extends Release
|
||||
with HasClientId
|
||||
|
||||
/** [[uncore.BufferedRelease]] with an extra field stating its source id */
|
||||
class BufferedReleaseFromSrc(implicit p: Parameters) extends BufferedRelease
|
||||
with HasClientId
|
||||
|
||||
/** Contains a [[uncore.Release]] factory
|
||||
*
|
||||
* In general you should avoid using this factory directly and use
|
||||
* [[uncore.ClientMetadata.makeRelease]] instead.
|
||||
*
|
||||
* @param voluntary is this a voluntary writeback
|
||||
* @param r_type type enum defined by coherence protocol
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param addr_block address of the cache block
|
||||
* @param addr_beat beat id of the data
|
||||
* @param data data being written back
|
||||
*/
|
||||
object Release {
|
||||
def apply(
|
||||
voluntary: Bool,
|
||||
r_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt)
|
||||
(implicit p: Parameters): Release = {
|
||||
val rel = Wire(new Release)
|
||||
rel.r_type := r_type
|
||||
rel.client_xact_id := client_xact_id
|
||||
rel.addr_block := addr_block
|
||||
rel.addr_beat := addr_beat
|
||||
rel.data := data
|
||||
rel.voluntary := voluntary
|
||||
rel
|
||||
}
|
||||
|
||||
def apply(
|
||||
src: UInt,
|
||||
voluntary: Bool,
|
||||
r_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
addr_block: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0))
|
||||
(implicit p: Parameters): ReleaseFromSrc = {
|
||||
val rel = Wire(new ReleaseFromSrc)
|
||||
rel.client_id := src
|
||||
rel.voluntary := voluntary
|
||||
rel.r_type := r_type
|
||||
rel.client_xact_id := client_xact_id
|
||||
rel.addr_block := addr_block
|
||||
rel.addr_beat := addr_beat
|
||||
rel.data := data
|
||||
rel
|
||||
}
|
||||
}
|
||||
|
||||
/** The Grant channel is used to refill data or grant permissions requested of the
|
||||
* manager agent via an [[uncore.Acquire]] message. It is also used to acknowledge
|
||||
* the receipt of voluntary writeback from clients in the form of [[uncore.Release]]
|
||||
* messages. There are built-in Grant messages used for Gets and Puts, and
|
||||
* coherence policies may also define custom Grant types. Grants may contain data
|
||||
* or may be simple acknowledgements. Grants are responded to with [[uncore.Finish]].
|
||||
*/
|
||||
class GrantMetadata(implicit p: Parameters) extends ManagerToClientChannel
|
||||
with HasTileLinkBeatId
|
||||
with HasClientTransactionId
|
||||
with HasManagerTransactionId
|
||||
with HasGrantType {
|
||||
def makeFinish(dummy: Int = 0): Finish = {
|
||||
val f = Wire(new Finish)
|
||||
f.manager_xact_id := this.manager_xact_id
|
||||
f
|
||||
}
|
||||
}
|
||||
|
||||
/** [[uncore.GrantMetadata]] with an extra field containing a single beat of data */
|
||||
class Grant(implicit p: Parameters) extends GrantMetadata
|
||||
with HasTileLinkData
|
||||
|
||||
/** [[uncore.Grant]] with an extra field stating its destination */
|
||||
class GrantToDst(implicit p: Parameters) extends Grant
|
||||
with HasClientId
|
||||
|
||||
/** [[uncore.Grant]] with an extra field stating its destination */
|
||||
class GrantFromSrc(implicit p: Parameters) extends Grant
|
||||
with HasManagerId {
|
||||
override def makeFinish(dummy: Int = 0): FinishToDst = {
|
||||
val f = Wire(new FinishToDst)
|
||||
f.manager_xact_id := this.manager_xact_id
|
||||
f.manager_id := this.manager_id
|
||||
f
|
||||
}
|
||||
}
|
||||
|
||||
/** [[uncore.GrantMetadata]] with an extra field containing an entire cache block */
|
||||
class BufferedGrant(implicit p: Parameters) extends GrantMetadata
|
||||
with HasTileLinkBlock
|
||||
|
||||
/** [[uncore.BufferedGrant]] with an extra field stating its destination */
|
||||
class BufferedGrantToDst(implicit p: Parameters) extends BufferedGrant
|
||||
with HasClientId
|
||||
|
||||
/** Contains definitions of the the built-in grant types and factories
|
||||
* for [[uncore.Grant]] and [[uncore.GrantToDst]]
|
||||
*
|
||||
* In general you should avoid using these factories directly and use
|
||||
* [[uncore.ManagerMetadata.makeGrant(uncore.AcquireFromSrc* makeGrant]] instead.
|
||||
*
|
||||
* @param dst id of client to which grant should be sent
|
||||
* @param is_builtin_type built-in or custom type message?
|
||||
* @param g_type built-in type enum or custom type enum
|
||||
* @param client_xact_id client's transaction id
|
||||
* @param manager_xact_id manager's transaction id
|
||||
* @param addr_beat beat id of the data
|
||||
* @param data data being refilled to the original requestor
|
||||
*/
|
||||
object Grant {
|
||||
val nBuiltInTypes = 5
|
||||
def voluntaryAckType = UInt("b000") // For acking Releases
|
||||
def prefetchAckType = UInt("b001") // For acking any kind of Prefetch
|
||||
def putAckType = UInt("b011") // For acking any kind of non-prfetch Put
|
||||
def getDataBeatType = UInt("b100") // Supplying a single beat of Get
|
||||
def getDataBlockType = UInt("b101") // Supplying all beats of a GetBlock
|
||||
def typesWithData = Vec(getDataBlockType, getDataBeatType)
|
||||
def typesWithMultibeatData= Vec(getDataBlockType)
|
||||
|
||||
def apply(
|
||||
is_builtin_type: Bool,
|
||||
g_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
manager_xact_id: UInt,
|
||||
addr_beat: UInt,
|
||||
data: UInt)
|
||||
(implicit p: Parameters): Grant = {
|
||||
val gnt = Wire(new Grant)
|
||||
gnt.is_builtin_type := is_builtin_type
|
||||
gnt.g_type := g_type
|
||||
gnt.client_xact_id := client_xact_id
|
||||
gnt.manager_xact_id := manager_xact_id
|
||||
gnt.addr_beat := addr_beat
|
||||
gnt.data := data
|
||||
gnt
|
||||
}
|
||||
|
||||
def apply(
|
||||
dst: UInt,
|
||||
is_builtin_type: Bool,
|
||||
g_type: UInt,
|
||||
client_xact_id: UInt,
|
||||
manager_xact_id: UInt,
|
||||
addr_beat: UInt = UInt(0),
|
||||
data: UInt = UInt(0))
|
||||
(implicit p: Parameters): GrantToDst = {
|
||||
val gnt = Wire(new GrantToDst)
|
||||
gnt.client_id := dst
|
||||
gnt.is_builtin_type := is_builtin_type
|
||||
gnt.g_type := g_type
|
||||
gnt.client_xact_id := client_xact_id
|
||||
gnt.manager_xact_id := manager_xact_id
|
||||
gnt.addr_beat := addr_beat
|
||||
gnt.data := data
|
||||
gnt
|
||||
}
|
||||
}
|
||||
|
||||
/** The Finish channel is used to provide a global ordering of transactions
|
||||
* in networks that do not guarantee point-to-point ordering of messages.
|
||||
* A Finsish message is sent as acknowledgement of receipt of a [[uncore.Grant]].
|
||||
* When a Finish message is received, a manager knows it is safe to begin
|
||||
* processing other transactions that touch the same cache block.
|
||||
*/
|
||||
class Finish(implicit p: Parameters) extends ClientToManagerChannel()(p)
|
||||
with HasManagerTransactionId {
|
||||
def hasData(dummy: Int = 0) = Bool(false)
|
||||
def hasMultibeatData(dummy: Int = 0) = Bool(false)
|
||||
}
|
||||
|
||||
/** [[uncore.Finish]] with an extra field stating its destination */
|
||||
class FinishToDst(implicit p: Parameters) extends Finish
|
||||
with HasManagerId
|
||||
|
||||
/** Complete IO definition for incoherent TileLink, including networking headers */
|
||||
class UncachedTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
||||
val acquire = new DecoupledIO(new LogicalNetworkIO(new Acquire))
|
||||
val grant = new DecoupledIO(new LogicalNetworkIO(new Grant)).flip
|
||||
val finish = new DecoupledIO(new LogicalNetworkIO(new Finish))
|
||||
}
|
||||
|
||||
/** Complete IO definition for coherent TileLink, including networking headers */
|
||||
class TileLinkIO(implicit p: Parameters) extends UncachedTileLinkIO()(p) {
|
||||
val probe = new DecoupledIO(new LogicalNetworkIO(new Probe)).flip
|
||||
val release = new DecoupledIO(new LogicalNetworkIO(new Release))
|
||||
}
|
||||
|
||||
/** This version of UncachedTileLinkIO does not contain network headers.
|
||||
* It is intended for use within client agents.
|
||||
*
|
||||
* Headers are provided in the top-level that instantiates the clients and network,
|
||||
* probably using a [[uncore.ClientTileLinkNetworkPort]] module.
|
||||
* By eliding the header subbundles within the clients we can enable
|
||||
* hierarchical P-and-R while minimizing unconnected port errors in GDS.
|
||||
*
|
||||
* Secondly, this version of the interface elides [[uncore.Finish]] messages, with the
|
||||
* assumption that a [[uncore.FinishUnit]] has been coupled to the TileLinkIO port
|
||||
* to deal with acking received [[uncore.Grant Grants]].
|
||||
*/
|
||||
class ClientUncachedTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
||||
val acquire = new DecoupledIO(new Acquire)
|
||||
val grant = new DecoupledIO(new Grant).flip
|
||||
}
|
||||
|
||||
/** This version of TileLinkIO does not contain network headers.
|
||||
* It is intended for use within client agents.
|
||||
*/
|
||||
class ClientTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
||||
val acquire = new DecoupledIO(new Acquire)
|
||||
val probe = new DecoupledIO(new Probe).flip
|
||||
val release = new DecoupledIO(new Release)
|
||||
val grant = new DecoupledIO(new GrantFromSrc).flip
|
||||
val finish = new DecoupledIO(new FinishToDst)
|
||||
}
|
||||
|
||||
/** This version of TileLinkIO does not contain network headers, but
|
||||
* every channel does include an extra client_id subbundle.
|
||||
* It is intended for use within Management agents.
|
||||
*
|
||||
* Managers need to track where [[uncore.Acquire]] and [[uncore.Release]] messages
|
||||
* originated so that they can send a [[uncore.Grant]] to the right place.
|
||||
* Similarly they must be able to issues Probes to particular clients.
|
||||
* However, we'd still prefer to have [[uncore.ManagerTileLinkNetworkPort]] fill in
|
||||
* the header.src to enable hierarchical p-and-r of the managers. Additionally,
|
||||
* coherent clients might be mapped to random network port ids, and we'll leave it to the
|
||||
* [[uncore.ManagerTileLinkNetworkPort]] to apply the correct mapping. Managers do need to
|
||||
* see Finished so they know when to allow new transactions on a cache
|
||||
* block to proceed.
|
||||
*/
|
||||
class ManagerTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
||||
val acquire = new DecoupledIO(new AcquireFromSrc).flip
|
||||
val grant = new DecoupledIO(new GrantToDst)
|
||||
val finish = new DecoupledIO(new Finish).flip
|
||||
val probe = new DecoupledIO(new ProbeToDst)
|
||||
val release = new DecoupledIO(new ReleaseFromSrc).flip
|
||||
}
|
386
src/main/scala/uncore/tilelink/Interconnect.scala
Normal file
386
src/main/scala/uncore/tilelink/Interconnect.scala
Normal file
@ -0,0 +1,386 @@
|
||||
package uncore.tilelink
|
||||
|
||||
import Chisel._
|
||||
import junctions._
|
||||
import scala.collection.mutable.ArraySeq
|
||||
import uncore.util._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
|
||||
/** PortedTileLinkNetworks combine a TileLink protocol with a particular physical
|
||||
* network implementation.
|
||||
*
|
||||
* Specifically, they provide mappings between ClientTileLinkIO/
|
||||
* ManagerTileLinkIO channels and LogicalNetwork ports (i.e. generic
|
||||
* TileLinkIO with networking headers). Channels coming into the network have
|
||||
* appropriate networking headers appended and outgoing channels have their
|
||||
* headers stripped.
|
||||
*
|
||||
* @constructor base class constructor for Ported TileLink NoC
|
||||
* @param addrToManagerId a mapping from a physical address to the network
|
||||
* id of a coherence manager
|
||||
* @param sharerToClientId a mapping from the id of a particular coherent
|
||||
* client (as determined by e.g. the directory) and the network id
|
||||
* of that client
|
||||
* @param clientDepths the depths of the queue that should be used to buffer
|
||||
* each channel on the client side of the network
|
||||
* @param managerDepths the depths of the queue that should be used to buffer
|
||||
* each channel on the manager side of the network
|
||||
*/
|
||||
abstract class PortedTileLinkNetwork(
|
||||
addrToManagerId: UInt => UInt,
|
||||
sharerToClientId: UInt => UInt,
|
||||
clientDepths: TileLinkDepths,
|
||||
managerDepths: TileLinkDepths)
|
||||
(implicit p: Parameters) extends TLModule()(p) {
|
||||
val nClients = tlNClients
|
||||
val nManagers = tlNManagers
|
||||
val io = new Bundle {
|
||||
val clients_cached = Vec(tlNCachingClients, new ClientTileLinkIO).flip
|
||||
val clients_uncached = Vec(tlNCachelessClients, new ClientUncachedTileLinkIO).flip
|
||||
val managers = Vec(nManagers, new ManagerTileLinkIO).flip
|
||||
}
|
||||
|
||||
val clients = (io.clients_cached ++ io.clients_uncached).zipWithIndex.map {
|
||||
case (io, idx) => {
|
||||
val qs = Module(new TileLinkEnqueuer(clientDepths))
|
||||
io match {
|
||||
case c: ClientTileLinkIO => {
|
||||
val port = Module(new ClientTileLinkNetworkPort(idx, addrToManagerId))
|
||||
port.io.client <> c
|
||||
qs.io.client <> port.io.network
|
||||
qs.io.manager
|
||||
}
|
||||
case u: ClientUncachedTileLinkIO => {
|
||||
val port = Module(new ClientUncachedTileLinkNetworkPort(idx, addrToManagerId))
|
||||
port.io.client <> u
|
||||
qs.io.client <> port.io.network
|
||||
qs.io.manager
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
val managers = io.managers.zipWithIndex.map {
|
||||
case (m, i) => {
|
||||
val port = Module(new ManagerTileLinkNetworkPort(i, sharerToClientId))
|
||||
val qs = Module(new TileLinkEnqueuer(managerDepths))
|
||||
port.io.manager <> m
|
||||
port.io.network <> qs.io.manager
|
||||
qs.io.client
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** A simple arbiter for each channel that also deals with header-based routing.
|
||||
* Assumes a single manager agent. */
|
||||
class PortedTileLinkArbiter(
|
||||
sharerToClientId: UInt => UInt = (u: UInt) => u,
|
||||
clientDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0),
|
||||
managerDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0))
|
||||
(implicit p: Parameters)
|
||||
extends PortedTileLinkNetwork(u => UInt(0), sharerToClientId, clientDepths, managerDepths)(p)
|
||||
with TileLinkArbiterLike
|
||||
with PassesId {
|
||||
val arbN = nClients
|
||||
require(nManagers == 1)
|
||||
if(arbN > 1) {
|
||||
hookupClientSource(clients.map(_.acquire), managers.head.acquire)
|
||||
hookupClientSource(clients.map(_.release), managers.head.release)
|
||||
hookupFinish(clients.map(_.finish), managers.head.finish)
|
||||
hookupManagerSourceWithHeader(clients.map(_.probe), managers.head.probe)
|
||||
hookupManagerSourceWithHeader(clients.map(_.grant), managers.head.grant)
|
||||
} else {
|
||||
managers.head <> clients.head
|
||||
}
|
||||
}
|
||||
|
||||
/** Provides a separate physical crossbar for each channel. Assumes multiple manager
|
||||
* agents. Managers are assigned to higher physical network port ids than
|
||||
* clients, and translations between logical network id and physical crossbar
|
||||
* port id are done automatically.
|
||||
*/
|
||||
class PortedTileLinkCrossbar(
|
||||
addrToManagerId: UInt => UInt = u => UInt(0),
|
||||
sharerToClientId: UInt => UInt = u => u,
|
||||
clientDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0),
|
||||
managerDepths: TileLinkDepths = TileLinkDepths(0,0,0,0,0))
|
||||
(implicit p: Parameters)
|
||||
extends PortedTileLinkNetwork(addrToManagerId, sharerToClientId, clientDepths, managerDepths)(p) {
|
||||
val n = p(LNEndpoints)
|
||||
val phyHdrWidth = log2Up(n)
|
||||
val count = tlDataBeats
|
||||
// Actually instantiate the particular networks required for TileLink
|
||||
val acqNet = Module(new BasicBus(CrossbarConfig(n, new Acquire, count, Some((a: PhysicalNetworkIO[Acquire]) => a.payload.hasMultibeatData()))))
|
||||
val relNet = Module(new BasicBus(CrossbarConfig(n, new Release, count, Some((r: PhysicalNetworkIO[Release]) => r.payload.hasMultibeatData()))))
|
||||
val prbNet = Module(new BasicBus(CrossbarConfig(n, new Probe)))
|
||||
val gntNet = Module(new BasicBus(CrossbarConfig(n, new Grant, count, Some((g: PhysicalNetworkIO[Grant]) => g.payload.hasMultibeatData()))))
|
||||
val ackNet = Module(new BasicBus(CrossbarConfig(n, new Finish)))
|
||||
|
||||
// Aliases for the various network IO bundle types
|
||||
type PNIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]]
|
||||
type LNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]]
|
||||
type FromCrossbar[T <: Data] = PNIO[T] => LNIO[T]
|
||||
type ToCrossbar[T <: Data] = LNIO[T] => PNIO[T]
|
||||
|
||||
// Shims for converting between logical network IOs and physical network IOs
|
||||
def crossbarToManagerShim[T <: Data](in: PNIO[T]): LNIO[T] = {
|
||||
val out = DefaultFromPhysicalShim(in)
|
||||
out.bits.header.src := in.bits.header.src - UInt(nManagers)
|
||||
out
|
||||
}
|
||||
def crossbarToClientShim[T <: Data](in: PNIO[T]): LNIO[T] = {
|
||||
val out = DefaultFromPhysicalShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst - UInt(nManagers)
|
||||
out
|
||||
}
|
||||
def managerToCrossbarShim[T <: Data](in: LNIO[T]): PNIO[T] = {
|
||||
val out = DefaultToPhysicalShim(n, in)
|
||||
out.bits.header.dst := in.bits.header.dst + UInt(nManagers, phyHdrWidth)
|
||||
out
|
||||
}
|
||||
def clientToCrossbarShim[T <: Data](in: LNIO[T]): PNIO[T] = {
|
||||
val out = DefaultToPhysicalShim(n, in)
|
||||
out.bits.header.src := in.bits.header.src + UInt(nManagers, phyHdrWidth)
|
||||
out
|
||||
}
|
||||
|
||||
// Make an individual connection between virtual and physical ports using
|
||||
// a particular shim. Also pin the unused Decoupled control signal low.
|
||||
def doDecoupledInputHookup[T <: Data](phys_in: PNIO[T], phys_out: PNIO[T], log_io: LNIO[T], shim: ToCrossbar[T]) = {
|
||||
val s = shim(log_io)
|
||||
phys_in.valid := s.valid
|
||||
phys_in.bits := s.bits
|
||||
s.ready := phys_in.ready
|
||||
phys_out.ready := Bool(false)
|
||||
}
|
||||
|
||||
def doDecoupledOutputHookup[T <: Data](phys_in: PNIO[T], phys_out: PNIO[T], log_io: LNIO[T], shim: FromCrossbar[T]) = {
|
||||
val s = shim(phys_out)
|
||||
log_io.valid := s.valid
|
||||
log_io.bits := s.bits
|
||||
s.ready := log_io.ready
|
||||
phys_in.valid := Bool(false)
|
||||
}
|
||||
|
||||
//Hookup all instances of a particular subbundle of TileLink
|
||||
def doDecoupledHookups[T <: Data](physIO: BasicCrossbarIO[T], getLogIO: TileLinkIO => LNIO[T]) = {
|
||||
physIO.in.head.bits.payload match {
|
||||
case c: ClientToManagerChannel => {
|
||||
managers.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledOutputHookup(physIO.in(id), physIO.out(id), getLogIO(i), crossbarToManagerShim[T])
|
||||
}
|
||||
clients.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledInputHookup(physIO.in(id+nManagers), physIO.out(id+nManagers), getLogIO(i), clientToCrossbarShim[T])
|
||||
}
|
||||
}
|
||||
case m: ManagerToClientChannel => {
|
||||
managers.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledInputHookup(physIO.in(id), physIO.out(id), getLogIO(i), managerToCrossbarShim[T])
|
||||
}
|
||||
clients.zipWithIndex.map { case (i, id) =>
|
||||
doDecoupledOutputHookup(physIO.in(id+nManagers), physIO.out(id+nManagers), getLogIO(i), crossbarToClientShim[T])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
doDecoupledHookups(acqNet.io, (tl: TileLinkIO) => tl.acquire)
|
||||
doDecoupledHookups(relNet.io, (tl: TileLinkIO) => tl.release)
|
||||
doDecoupledHookups(prbNet.io, (tl: TileLinkIO) => tl.probe)
|
||||
doDecoupledHookups(gntNet.io, (tl: TileLinkIO) => tl.grant)
|
||||
doDecoupledHookups(ackNet.io, (tl: TileLinkIO) => tl.finish)
|
||||
}
|
||||
|
||||
class ClientUncachedTileLinkIORouter(
|
||||
nOuter: Int, routeSel: UInt => UInt)(implicit p: Parameters)
|
||||
extends TLModule {
|
||||
|
||||
val io = new Bundle {
|
||||
val in = (new ClientUncachedTileLinkIO).flip
|
||||
val out = Vec(nOuter, new ClientUncachedTileLinkIO)
|
||||
}
|
||||
|
||||
val acq_route = routeSel(io.in.acquire.bits.full_addr())
|
||||
|
||||
io.in.acquire.ready := Bool(false)
|
||||
|
||||
io.out.zipWithIndex.foreach { case (out, i) =>
|
||||
out.acquire.valid := io.in.acquire.valid && acq_route(i)
|
||||
out.acquire.bits := io.in.acquire.bits
|
||||
when (acq_route(i)) { io.in.acquire.ready := out.acquire.ready }
|
||||
}
|
||||
|
||||
val gnt_arb = Module(new LockingRRArbiter(
|
||||
new Grant, nOuter, tlDataBeats, Some((gnt: Grant) => gnt.hasMultibeatData())))
|
||||
gnt_arb.io.in <> io.out.map(_.grant)
|
||||
io.in.grant <> gnt_arb.io.out
|
||||
|
||||
assert(!io.in.acquire.valid || acq_route.orR, "No valid route")
|
||||
}
|
||||
|
||||
class TileLinkInterconnectIO(val nInner: Int, val nOuter: Int)
|
||||
(implicit p: Parameters) extends Bundle {
|
||||
val in = Vec(nInner, new ClientUncachedTileLinkIO).flip
|
||||
val out = Vec(nOuter, new ClientUncachedTileLinkIO)
|
||||
}
|
||||
|
||||
class ClientUncachedTileLinkIOCrossbar(
|
||||
nInner: Int, nOuter: Int, routeSel: UInt => UInt)
|
||||
(implicit p: Parameters) extends TLModule {
|
||||
|
||||
val io = new TileLinkInterconnectIO(nInner, nOuter)
|
||||
|
||||
if (nInner == 1) {
|
||||
val router = Module(new ClientUncachedTileLinkIORouter(nOuter, routeSel))
|
||||
router.io.in <> io.in.head
|
||||
io.out <> router.io.out
|
||||
} else {
|
||||
val routers = List.fill(nInner) {
|
||||
Module(new ClientUncachedTileLinkIORouter(nOuter, routeSel)) }
|
||||
val arbiters = List.fill(nOuter) {
|
||||
Module(new ClientUncachedTileLinkIOArbiter(nInner)) }
|
||||
|
||||
for (i <- 0 until nInner) {
|
||||
routers(i).io.in <> io.in(i)
|
||||
}
|
||||
|
||||
for (i <- 0 until nOuter) {
|
||||
arbiters(i).io.in <> routers.map(r => r.io.out(i))
|
||||
io.out(i) <> arbiters(i).io.out
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abstract class TileLinkInterconnect(implicit p: Parameters) extends TLModule()(p) {
|
||||
val nInner: Int
|
||||
val nOuter: Int
|
||||
|
||||
lazy val io = new TileLinkInterconnectIO(nInner, nOuter)
|
||||
}
|
||||
|
||||
class TileLinkRecursiveInterconnect(val nInner: Int, addrMap: AddrMap)
|
||||
(implicit p: Parameters) extends TileLinkInterconnect()(p) {
|
||||
def port(name: String) = io.out(addrMap.port(name))
|
||||
val nOuter = addrMap.numSlaves
|
||||
val routeSel = (addr: UInt) =>
|
||||
Cat(addrMap.entries.map(e => addrMap(e.name).containsAddress(addr)).reverse)
|
||||
|
||||
val xbar = Module(new ClientUncachedTileLinkIOCrossbar(nInner, addrMap.length, routeSel))
|
||||
xbar.io.in <> io.in
|
||||
|
||||
io.out <> addrMap.entries.zip(xbar.io.out).flatMap {
|
||||
case (entry, xbarOut) => {
|
||||
entry.region match {
|
||||
case submap: AddrMap if submap.isEmpty =>
|
||||
xbarOut.acquire.ready := Bool(false)
|
||||
xbarOut.grant.valid := Bool(false)
|
||||
None
|
||||
case submap: AddrMap if !submap.collapse =>
|
||||
val ic = Module(new TileLinkRecursiveInterconnect(1, submap))
|
||||
ic.io.in.head <> xbarOut
|
||||
ic.io.out
|
||||
case _ =>
|
||||
Some(xbarOut)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class TileLinkMemoryInterconnect(
|
||||
nBanksPerChannel: Int, nChannels: Int)
|
||||
(implicit p: Parameters) extends TileLinkInterconnect()(p) {
|
||||
|
||||
val nBanks = nBanksPerChannel * nChannels
|
||||
val nInner = nBanks
|
||||
val nOuter = nChannels
|
||||
|
||||
def connectChannel(outer: ClientUncachedTileLinkIO, inner: ClientUncachedTileLinkIO) {
|
||||
outer <> inner
|
||||
outer.acquire.bits.addr_block := inner.acquire.bits.addr_block >> UInt(log2Ceil(nChannels))
|
||||
}
|
||||
|
||||
for (i <- 0 until nChannels) {
|
||||
/* Bank assignments to channels are strided so that consecutive banks
|
||||
* map to different channels. That way, consecutive cache lines also
|
||||
* map to different channels */
|
||||
val banks = (i until nBanks by nChannels).map(j => io.in(j))
|
||||
|
||||
val channelArb = Module(new ClientUncachedTileLinkIOArbiter(nBanksPerChannel))
|
||||
channelArb.io.in <> banks
|
||||
connectChannel(io.out(i), channelArb.io.out)
|
||||
}
|
||||
}
|
||||
|
||||
/** Allows users to switch between various memory configurations. Note that
|
||||
* this is a dangerous operation: not only does switching the select input to
|
||||
* this module violate TileLink, it also causes the memory of the machine to
|
||||
* become garbled. It's expected that select only changes at boot time, as
|
||||
* part of the memory controller configuration. */
|
||||
class TileLinkMemorySelectorIO(val nBanks: Int, val maxMemChannels: Int, nConfigs: Int)
|
||||
(implicit p: Parameters)
|
||||
extends TileLinkInterconnectIO(nBanks, maxMemChannels) {
|
||||
val select = UInt(INPUT, width = log2Up(nConfigs))
|
||||
override def cloneType =
|
||||
new TileLinkMemorySelectorIO(nBanks, maxMemChannels, nConfigs).asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
class TileLinkMemorySelector(nBanks: Int, maxMemChannels: Int, configs: Seq[Int])
|
||||
(implicit p: Parameters)
|
||||
extends TileLinkInterconnect()(p) {
|
||||
val nInner = nBanks
|
||||
val nOuter = maxMemChannels
|
||||
val nConfigs = configs.size
|
||||
|
||||
override lazy val io = new TileLinkMemorySelectorIO(nBanks, maxMemChannels, nConfigs)
|
||||
|
||||
def muxOnSelect[T <: Data](up: DecoupledIO[T], dn: DecoupledIO[T], active: Bool): Unit = {
|
||||
when (active) { dn.bits := up.bits }
|
||||
when (active) { up.ready := dn.ready }
|
||||
when (active) { dn.valid := up.valid }
|
||||
}
|
||||
|
||||
def muxOnSelect(up: ClientUncachedTileLinkIO, dn: ClientUncachedTileLinkIO, active: Bool): Unit = {
|
||||
muxOnSelect(up.acquire, dn.acquire, active)
|
||||
muxOnSelect(dn.grant, up.grant, active)
|
||||
}
|
||||
|
||||
def muxOnSelect(up: Vec[ClientUncachedTileLinkIO], dn: Vec[ClientUncachedTileLinkIO], active: Bool) : Unit = {
|
||||
for (i <- 0 until up.size)
|
||||
muxOnSelect(up(i), dn(i), active)
|
||||
}
|
||||
|
||||
/* Disconnects a vector of TileLink ports, which involves setting them to
|
||||
* invalid. Due to Chisel reasons, we need to also set the bits to 0 (since
|
||||
* there can't be any unconnected inputs). */
|
||||
def disconnectOuter(outer: Vec[ClientUncachedTileLinkIO]) = {
|
||||
outer.foreach{ m =>
|
||||
m.acquire.valid := Bool(false)
|
||||
m.acquire.bits := m.acquire.bits.fromBits(UInt(0))
|
||||
m.grant.ready := Bool(false)
|
||||
}
|
||||
}
|
||||
|
||||
def disconnectInner(inner: Vec[ClientUncachedTileLinkIO]) = {
|
||||
inner.foreach { m =>
|
||||
m.grant.valid := Bool(false)
|
||||
m.grant.bits := m.grant.bits.fromBits(UInt(0))
|
||||
m.acquire.ready := Bool(false)
|
||||
}
|
||||
}
|
||||
|
||||
/* Provides default wires on all our outputs. */
|
||||
disconnectOuter(io.out)
|
||||
disconnectInner(io.in)
|
||||
|
||||
/* Constructs interconnects for each of the layouts suggested by the
|
||||
* configuration and switches between them based on the select input. */
|
||||
configs.zipWithIndex.foreach{ case (nChannels, select) =>
|
||||
val nBanksPerChannel = nBanks / nChannels
|
||||
val ic = Module(new TileLinkMemoryInterconnect(nBanksPerChannel, nChannels))
|
||||
disconnectInner(ic.io.out)
|
||||
disconnectOuter(ic.io.in)
|
||||
muxOnSelect(io.in, ic.io.in, io.select === UInt(select))
|
||||
muxOnSelect(ic.io.out, io.out, io.select === UInt(select))
|
||||
}
|
||||
}
|
308
src/main/scala/uncore/tilelink/Network.scala
Normal file
308
src/main/scala/uncore/tilelink/Network.scala
Normal file
@ -0,0 +1,308 @@
|
||||
// See LICENSE for license details.
|
||||
|
||||
package uncore.tilelink
|
||||
|
||||
import Chisel._
|
||||
import uncore.util._
|
||||
import cde.{Parameters, Field}
|
||||
|
||||
case object LNEndpoints extends Field[Int]
|
||||
case object LNHeaderBits extends Field[Int]
|
||||
|
||||
class PhysicalHeader(n: Int) extends Bundle {
|
||||
val src = UInt(width = log2Up(n))
|
||||
val dst = UInt(width = log2Up(n))
|
||||
}
|
||||
|
||||
class PhysicalNetworkIO[T <: Data](n: Int, dType: T) extends Bundle {
|
||||
val header = new PhysicalHeader(n)
|
||||
val payload = dType.cloneType
|
||||
override def cloneType = new PhysicalNetworkIO(n,dType).asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
class BasicCrossbarIO[T <: Data](n: Int, dType: T) extends Bundle {
|
||||
val in = Vec(n, Decoupled(new PhysicalNetworkIO(n,dType))).flip
|
||||
val out = Vec(n, Decoupled(new PhysicalNetworkIO(n,dType)))
|
||||
}
|
||||
|
||||
abstract class PhysicalNetwork extends Module
|
||||
|
||||
case class CrossbarConfig[T <: Data](n: Int, dType: T, count: Int = 1, needsLock: Option[PhysicalNetworkIO[T] => Bool] = None)
|
||||
|
||||
abstract class AbstractCrossbar[T <: Data](conf: CrossbarConfig[T]) extends PhysicalNetwork {
|
||||
val io = new BasicCrossbarIO(conf.n, conf.dType)
|
||||
}
|
||||
|
||||
class BasicBus[T <: Data](conf: CrossbarConfig[T]) extends AbstractCrossbar(conf) {
|
||||
val arb = Module(new LockingRRArbiter(io.in(0).bits, conf.n, conf.count, conf.needsLock))
|
||||
arb.io.in <> io.in
|
||||
|
||||
arb.io.out.ready := io.out(arb.io.out.bits.header.dst).ready
|
||||
for ((out, i) <- io.out zipWithIndex) {
|
||||
out.valid := arb.io.out.valid && arb.io.out.bits.header.dst === UInt(i)
|
||||
out.bits := arb.io.out.bits
|
||||
}
|
||||
}
|
||||
|
||||
class BasicCrossbar[T <: Data](conf: CrossbarConfig[T]) extends AbstractCrossbar(conf) {
|
||||
io.in.foreach { _.ready := Bool(false) }
|
||||
|
||||
io.out.zipWithIndex.map{ case (out, i) => {
|
||||
val rrarb = Module(new LockingRRArbiter(io.in(0).bits, conf.n, conf.count, conf.needsLock))
|
||||
(rrarb.io.in, io.in).zipped.map{ case (arb, in) => {
|
||||
val destined = in.bits.header.dst === UInt(i)
|
||||
arb.valid := in.valid && destined
|
||||
arb.bits := in.bits
|
||||
when (arb.ready && destined) { in.ready := Bool(true) }
|
||||
}}
|
||||
out <> rrarb.io.out
|
||||
}}
|
||||
}
|
||||
|
||||
abstract class LogicalNetwork extends Module
|
||||
|
||||
class LogicalHeader(implicit p: Parameters) extends junctions.ParameterizedBundle()(p) {
|
||||
val src = UInt(width = p(LNHeaderBits))
|
||||
val dst = UInt(width = p(LNHeaderBits))
|
||||
}
|
||||
|
||||
class LogicalNetworkIO[T <: Data](dType: T)(implicit p: Parameters) extends Bundle {
|
||||
val header = new LogicalHeader
|
||||
val payload = dType.cloneType
|
||||
override def cloneType = new LogicalNetworkIO(dType)(p).asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
object DecoupledLogicalNetworkIOWrapper {
|
||||
def apply[T <: Data](
|
||||
in: DecoupledIO[T],
|
||||
src: UInt = UInt(0),
|
||||
dst: UInt = UInt(0))
|
||||
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = Wire(Decoupled(new LogicalNetworkIO(in.bits)))
|
||||
out.valid := in.valid
|
||||
out.bits.payload := in.bits
|
||||
out.bits.header.dst := dst
|
||||
out.bits.header.src := src
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
object DecoupledLogicalNetworkIOUnwrapper {
|
||||
def apply[T <: Data](in: DecoupledIO[LogicalNetworkIO[T]])
|
||||
(implicit p: Parameters): DecoupledIO[T] = {
|
||||
val out = Wire(Decoupled(in.bits.payload))
|
||||
out.valid := in.valid
|
||||
out.bits := in.bits.payload
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
object DefaultFromPhysicalShim {
|
||||
def apply[T <: Data](in: DecoupledIO[PhysicalNetworkIO[T]])
|
||||
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = Wire(Decoupled(new LogicalNetworkIO(in.bits.payload)))
|
||||
out.bits.header := in.bits.header
|
||||
out.bits.payload := in.bits.payload
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
object DefaultToPhysicalShim {
|
||||
def apply[T <: Data](n: Int, in: DecoupledIO[LogicalNetworkIO[T]])
|
||||
(implicit p: Parameters): DecoupledIO[PhysicalNetworkIO[T]] = {
|
||||
val out = Wire(Decoupled(new PhysicalNetworkIO(n, in.bits.payload)))
|
||||
out.bits.header := in.bits.header
|
||||
out.bits.payload := in.bits.payload
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/** A helper module that automatically issues [[uncore.Finish]] messages in repsonse
|
||||
* to [[uncore.Grant]] that it receives from a manager and forwards to a client
|
||||
*/
|
||||
class FinishUnit(srcId: Int = 0, outstanding: Int = 2)(implicit p: Parameters) extends TLModule()(p)
|
||||
with HasDataBeatCounters {
|
||||
val io = new Bundle {
|
||||
val grant = Decoupled(new LogicalNetworkIO(new Grant)).flip
|
||||
val refill = Decoupled(new Grant)
|
||||
val finish = Decoupled(new LogicalNetworkIO(new Finish))
|
||||
val ready = Bool(OUTPUT)
|
||||
}
|
||||
|
||||
val g = io.grant.bits.payload
|
||||
|
||||
if(tlNetworkPreservesPointToPointOrdering) {
|
||||
io.finish.valid := Bool(false)
|
||||
io.refill.valid := io.grant.valid
|
||||
io.refill.bits := g
|
||||
io.grant.ready := io.refill.ready
|
||||
io.ready := Bool(true)
|
||||
} else {
|
||||
// We only want to send Finishes after we have collected all beats of
|
||||
// a multibeat Grant. But Grants from multiple managers or transactions may
|
||||
// get interleaved, so we could need a counter for each.
|
||||
val done = if(tlNetworkDoesNotInterleaveBeats) {
|
||||
connectIncomingDataBeatCounterWithHeader(io.grant)
|
||||
} else {
|
||||
val entries = 1 << tlClientXactIdBits
|
||||
def getId(g: LogicalNetworkIO[Grant]) = g.payload.client_xact_id
|
||||
assert(getId(io.grant.bits) <= UInt(entries), "Not enough grant beat counters, only " + entries + " entries.")
|
||||
connectIncomingDataBeatCountersWithHeader(io.grant, entries, getId).reduce(_||_)
|
||||
}
|
||||
val q = Module(new FinishQueue(outstanding))
|
||||
q.io.enq.valid := io.grant.fire() && g.requiresAck() && (!g.hasMultibeatData() || done)
|
||||
q.io.enq.bits := g.makeFinish()
|
||||
q.io.enq.bits.manager_id := io.grant.bits.header.src
|
||||
|
||||
io.finish.bits.header.src := UInt(srcId)
|
||||
io.finish.bits.header.dst := q.io.deq.bits.manager_id
|
||||
io.finish.bits.payload := q.io.deq.bits
|
||||
io.finish.valid := q.io.deq.valid
|
||||
q.io.deq.ready := io.finish.ready
|
||||
|
||||
io.refill.valid := (q.io.enq.ready || !g.requiresAck()) && io.grant.valid
|
||||
io.refill.bits := g
|
||||
io.grant.ready := (q.io.enq.ready || !g.requiresAck()) && io.refill.ready
|
||||
io.ready := q.io.enq.ready
|
||||
}
|
||||
}
|
||||
|
||||
class FinishQueue(entries: Int)(implicit p: Parameters) extends Queue(new FinishToDst()(p), entries)
|
||||
|
||||
/** A port to convert [[uncore.ClientTileLinkIO]].flip into [[uncore.TileLinkIO]]
|
||||
*
|
||||
* Creates network headers for [[uncore.Acquire]] and [[uncore.Release]] messages,
|
||||
* calculating header.dst and filling in header.src.
|
||||
* Strips headers from [[uncore.Probe Probes]].
|
||||
* Passes [[uncore.GrantFromSrc]] and accepts [[uncore.FinishFromDst]] in response,
|
||||
* setting up the headers for each.
|
||||
*
|
||||
* @param clientId network port id of this agent
|
||||
* @param addrConvert how a physical address maps to a destination manager port id
|
||||
*/
|
||||
class ClientTileLinkNetworkPort(clientId: Int, addrConvert: UInt => UInt)
|
||||
(implicit p: Parameters) extends TLModule()(p) {
|
||||
val io = new Bundle {
|
||||
val client = new ClientTileLinkIO().flip
|
||||
val network = new TileLinkIO
|
||||
}
|
||||
|
||||
val acq_with_header = ClientTileLinkHeaderCreator(io.client.acquire, clientId, addrConvert)
|
||||
val rel_with_header = ClientTileLinkHeaderCreator(io.client.release, clientId, addrConvert)
|
||||
val fin_with_header = ClientTileLinkHeaderCreator(io.client.finish, clientId)
|
||||
val prb_without_header = DecoupledLogicalNetworkIOUnwrapper(io.network.probe)
|
||||
val gnt_without_header = DecoupledLogicalNetworkIOUnwrapper(io.network.grant)
|
||||
|
||||
io.network.acquire <> acq_with_header
|
||||
io.network.release <> rel_with_header
|
||||
io.network.finish <> fin_with_header
|
||||
io.client.probe <> prb_without_header
|
||||
io.client.grant.bits.manager_id := io.network.grant.bits.header.src
|
||||
io.client.grant <> gnt_without_header
|
||||
}
|
||||
|
||||
/** A port to convert [[uncore.ClientUncachedTileLinkIO]].flip into [[uncore.TileLinkIO]]
|
||||
*
|
||||
* Creates network headers for [[uncore.Acquire]] and [[uncore.Release]] messages,
|
||||
* calculating header.dst and filling in header.src.
|
||||
* Responds to [[uncore.Grant]] by automatically issuing [[uncore.Finish]] to the granting managers.
|
||||
*
|
||||
* @param clientId network port id of this agent
|
||||
* @param addrConvert how a physical address maps to a destination manager port id
|
||||
*/
|
||||
class ClientUncachedTileLinkNetworkPort(clientId: Int, addrConvert: UInt => UInt)
|
||||
(implicit p: Parameters) extends TLModule()(p) {
|
||||
val io = new Bundle {
|
||||
val client = new ClientUncachedTileLinkIO().flip
|
||||
val network = new TileLinkIO
|
||||
}
|
||||
|
||||
val finisher = Module(new FinishUnit(clientId))
|
||||
finisher.io.grant <> io.network.grant
|
||||
io.network.finish <> finisher.io.finish
|
||||
|
||||
val acq_with_header = ClientTileLinkHeaderCreator(io.client.acquire, clientId, addrConvert)
|
||||
val gnt_without_header = finisher.io.refill
|
||||
|
||||
io.network.acquire.bits := acq_with_header.bits
|
||||
io.network.acquire.valid := acq_with_header.valid && finisher.io.ready
|
||||
acq_with_header.ready := io.network.acquire.ready && finisher.io.ready
|
||||
io.client.grant <> gnt_without_header
|
||||
io.network.probe.ready := Bool(false)
|
||||
io.network.release.valid := Bool(false)
|
||||
}
|
||||
|
||||
object ClientTileLinkHeaderCreator {
|
||||
def apply[T <: ClientToManagerChannel with HasManagerId](
|
||||
in: DecoupledIO[T],
|
||||
clientId: Int)
|
||||
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = Wire(new DecoupledIO(new LogicalNetworkIO(in.bits)))
|
||||
out.bits.payload := in.bits
|
||||
out.bits.header.src := UInt(clientId)
|
||||
out.bits.header.dst := in.bits.manager_id
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
def apply[T <: ClientToManagerChannel with HasCacheBlockAddress](
|
||||
in: DecoupledIO[T],
|
||||
clientId: Int,
|
||||
addrConvert: UInt => UInt)
|
||||
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = Wire(new DecoupledIO(new LogicalNetworkIO(in.bits)))
|
||||
out.bits.payload := in.bits
|
||||
out.bits.header.src := UInt(clientId)
|
||||
out.bits.header.dst := addrConvert(in.bits.addr_block)
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/** A port to convert [[uncore.ManagerTileLinkIO]].flip into [[uncore.TileLinkIO]].flip
|
||||
*
|
||||
* Creates network headers for [[uncore.Probe]] and [[uncore.Grant]] messagess,
|
||||
* calculating header.dst and filling in header.src.
|
||||
* Strips headers from [[uncore.Acquire]], [[uncore.Release]] and [[uncore.Finish]],
|
||||
* but supplies client_id instead.
|
||||
*
|
||||
* @param managerId the network port id of this agent
|
||||
* @param idConvert how a sharer id maps to a destination client port id
|
||||
*/
|
||||
class ManagerTileLinkNetworkPort(managerId: Int, idConvert: UInt => UInt)
|
||||
(implicit p: Parameters) extends TLModule()(p) {
|
||||
val io = new Bundle {
|
||||
val manager = new ManagerTileLinkIO().flip
|
||||
val network = new TileLinkIO().flip
|
||||
}
|
||||
io.network.grant <> ManagerTileLinkHeaderCreator(io.manager.grant, managerId, (u: UInt) => u)
|
||||
io.network.probe <> ManagerTileLinkHeaderCreator(io.manager.probe, managerId, idConvert)
|
||||
io.manager.acquire <> DecoupledLogicalNetworkIOUnwrapper(io.network.acquire)
|
||||
io.manager.acquire.bits.client_id := io.network.acquire.bits.header.src
|
||||
io.manager.release <> DecoupledLogicalNetworkIOUnwrapper(io.network.release)
|
||||
io.manager.release.bits.client_id := io.network.release.bits.header.src
|
||||
io.manager.finish <> DecoupledLogicalNetworkIOUnwrapper(io.network.finish)
|
||||
}
|
||||
|
||||
object ManagerTileLinkHeaderCreator {
|
||||
def apply[T <: ManagerToClientChannel with HasClientId](
|
||||
in: DecoupledIO[T],
|
||||
managerId: Int,
|
||||
idConvert: UInt => UInt)
|
||||
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = Wire(new DecoupledIO(new LogicalNetworkIO(in.bits)))
|
||||
out.bits.payload := in.bits
|
||||
out.bits.header.src := UInt(managerId)
|
||||
out.bits.header.dst := idConvert(in.bits.client_id)
|
||||
out.valid := in.valid
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user