2015-04-29 22:18:26 +02:00
|
|
|
// See LICENSE for license details.
|
|
|
|
|
|
|
|
package uncore
|
|
|
|
import Chisel._
|
2015-07-30 03:04:30 +02:00
|
|
|
import junctions._
|
2015-04-29 22:18:26 +02:00
|
|
|
import scala.math.max
|
2015-10-22 03:16:44 +02:00
|
|
|
import cde.{Parameters, Field}
|
2015-04-29 22:18:26 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
case object TLId extends Field[String]
|
|
|
|
case class TLKey(id: String) extends Field[TileLinkParameters]
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Parameters exposed to the top-level design, set based on
|
|
|
|
* external requirements or design space exploration
|
2015-10-14 08:42:28 +02:00
|
|
|
*
|
|
|
|
* Coherency policy used to define custom mesage types
|
|
|
|
* Number of manager agents
|
|
|
|
* Number of client agents that cache data and use custom [[uncore.Acquire]] types
|
|
|
|
* Number of client agents that do not cache data and use built-in [[uncore.Acquire]] types
|
|
|
|
* Maximum number of unique outstanding transactions per client
|
|
|
|
* Maximum number of clients multiplexed onto a single port
|
|
|
|
* Maximum number of unique outstanding transactions per manager
|
|
|
|
* Width of cache block addresses
|
|
|
|
* Total amount of data per cache block
|
|
|
|
* Number of data beats per cache block
|
|
|
|
**/
|
|
|
|
|
|
|
|
case class TileLinkParameters(
|
|
|
|
coherencePolicy: CoherencePolicy,
|
|
|
|
nManagers: Int,
|
|
|
|
nCachingClients: Int,
|
|
|
|
nCachelessClients: Int,
|
|
|
|
maxClientXacts: Int,
|
|
|
|
maxClientsPerPort: Int,
|
|
|
|
maxManagerXacts: Int,
|
|
|
|
dataBits: Int,
|
2015-10-17 03:24:02 +02:00
|
|
|
dataBeats: Int = 4,
|
|
|
|
overrideDataBitsPerBeat: Option[Int] = None
|
|
|
|
) {
|
2015-10-14 08:42:28 +02:00
|
|
|
val nClients = nCachingClients + nCachelessClients
|
2015-10-17 03:24:02 +02:00
|
|
|
val writeMaskBits: Int = ((dataBits / dataBeats) - 1) / 8 + 1
|
|
|
|
val dataBitsPerBeat: Int = overrideDataBitsPerBeat.getOrElse(dataBits / dataBeats)
|
2015-10-14 08:42:28 +02:00
|
|
|
}
|
2015-04-29 22:18:26 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Utility trait for building Modules and Bundles that use TileLink parameters */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasTileLinkParameters {
|
|
|
|
implicit val p: Parameters
|
2015-10-14 08:42:28 +02:00
|
|
|
val tlExternal = p(TLKey(p(TLId)))
|
|
|
|
val tlCoh = tlExternal.coherencePolicy
|
|
|
|
val tlNManagers = tlExternal.nManagers
|
|
|
|
val tlNCachingClients = tlExternal.nCachingClients
|
|
|
|
val tlNCachelessClients = tlExternal.nCachelessClients
|
|
|
|
val tlNClients = tlExternal.nClients
|
2015-04-29 22:18:26 +02:00
|
|
|
val tlClientIdBits = log2Up(tlNClients)
|
|
|
|
val tlManagerIdBits = log2Up(tlNManagers)
|
2015-10-14 08:42:28 +02:00
|
|
|
val tlMaxClientXacts = tlExternal.maxClientXacts
|
|
|
|
val tlMaxClientsPerPort = tlExternal.maxClientsPerPort
|
|
|
|
val tlMaxManagerXacts = tlExternal.maxManagerXacts
|
2015-04-29 22:18:26 +02:00
|
|
|
val tlClientXactIdBits = log2Up(tlMaxClientXacts*tlMaxClientsPerPort)
|
|
|
|
val tlManagerXactIdBits = log2Up(tlMaxManagerXacts)
|
2015-10-20 23:54:36 +02:00
|
|
|
val tlBlockAddrBits = p(PAddrBits) - p(CacheBlockOffsetBits)
|
2015-10-14 08:42:28 +02:00
|
|
|
val tlDataBeats = tlExternal.dataBeats
|
|
|
|
val tlDataBits = tlExternal.dataBitsPerBeat
|
2015-04-29 22:18:26 +02:00
|
|
|
val tlDataBytes = tlDataBits/8
|
2015-10-14 08:42:28 +02:00
|
|
|
val tlWriteMaskBits = tlExternal.writeMaskBits
|
2015-04-29 22:18:26 +02:00
|
|
|
val tlBeatAddrBits = log2Up(tlDataBeats)
|
|
|
|
val tlByteAddrBits = log2Up(tlWriteMaskBits)
|
|
|
|
val tlMemoryOpcodeBits = M_SZ
|
|
|
|
val tlMemoryOperandSizeBits = MT_SZ
|
|
|
|
val tlAcquireTypeBits = max(log2Up(Acquire.nBuiltInTypes),
|
|
|
|
tlCoh.acquireTypeWidth)
|
|
|
|
val tlAcquireUnionBits = max(tlWriteMaskBits,
|
|
|
|
(tlByteAddrBits +
|
|
|
|
tlMemoryOperandSizeBits +
|
|
|
|
tlMemoryOpcodeBits)) + 1
|
|
|
|
val tlGrantTypeBits = max(log2Up(Grant.nBuiltInTypes),
|
|
|
|
tlCoh.grantTypeWidth) + 1
|
2015-10-14 08:42:28 +02:00
|
|
|
/** Whether the underlying physical network preserved point-to-point ordering of messages */
|
|
|
|
val tlNetworkPreservesPointToPointOrdering = false
|
2015-04-29 22:18:26 +02:00
|
|
|
val tlNetworkDoesNotInterleaveBeats = true
|
2015-10-06 06:41:46 +02:00
|
|
|
val amoAluOperandBits = p(AmoAluOperandBits)
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class TLModule(implicit val p: Parameters) extends Module
|
|
|
|
with HasTileLinkParameters
|
|
|
|
abstract class TLBundle(implicit val p: Parameters) extends junctions.ParameterizedBundle()(p)
|
|
|
|
with HasTileLinkParameters
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** Base trait for all TileLink channels */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class TileLinkChannel(implicit p: Parameters) extends TLBundle()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
def hasData(dummy: Int = 0): Bool
|
|
|
|
def hasMultibeatData(dummy: Int = 0): Bool
|
|
|
|
}
|
|
|
|
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class ClientToManagerChannel(implicit p: Parameters) extends TileLinkChannel()(p)
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class ManagerToClientChannel(implicit p: Parameters) extends TileLinkChannel()(p)
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Directionality of message channel. Used to hook up logical network ports to physical network ports */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class ClientToClientChannel(implicit p: Parameters) extends TileLinkChannel()(p) // Unused for now
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** Common signals that are used in multiple channels.
|
|
|
|
* These traits are useful for type parameterizing bundle wiring functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** Address of a cache block. */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasCacheBlockAddress extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val addr_block = UInt(width = tlBlockAddrBits)
|
|
|
|
|
|
|
|
def conflicts(that: HasCacheBlockAddress) = this.addr_block === that.addr_block
|
|
|
|
def conflicts(addr: UInt) = this.addr_block === addr
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Sub-block address or beat id of multi-beat data */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasTileLinkBeatId extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val addr_beat = UInt(width = tlBeatAddrBits)
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Client-side transaction id. Usually Miss Status Handling Register File index */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasClientTransactionId extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val client_xact_id = Bits(width = tlClientXactIdBits)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Manager-side transaction id. Usually Transaction Status Handling Register File index. */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasManagerTransactionId extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val manager_xact_id = Bits(width = tlManagerXactIdBits)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** A single beat of cache block data */
|
|
|
|
trait HasTileLinkData extends HasTileLinkBeatId {
|
|
|
|
val data = UInt(width = tlDataBits)
|
|
|
|
|
|
|
|
def hasData(dummy: Int = 0): Bool
|
|
|
|
def hasMultibeatData(dummy: Int = 0): Bool
|
|
|
|
}
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** An entire cache block of data */
|
|
|
|
trait HasTileLinkBlock extends HasTileLinkParameters {
|
|
|
|
val data_buffer = Vec(tlDataBeats, UInt(width = tlDataBits))
|
|
|
|
}
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** The id of a client source or destination. Used in managers. */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait HasClientId extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val client_id = UInt(width = tlClientIdBits)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** TileLink channel bundle definitions */
|
|
|
|
|
|
|
|
/** The Acquire channel is used to intiate coherence protocol transactions in
|
|
|
|
* order to gain access to a cache block's data with certain permissions
|
|
|
|
* enabled. Messages sent over this channel may be custom types defined by
|
|
|
|
* a [[uncore.CoherencePolicy]] for cached data accesse or may be built-in types
|
|
|
|
* used for uncached data accesses. Acquires may contain data for Put or
|
|
|
|
* PutAtomic built-in types. After sending an Acquire, clients must
|
|
|
|
* wait for a manager to send them a [[uncore.Grant]] message in response.
|
|
|
|
*/
|
2015-10-14 08:42:28 +02:00
|
|
|
class AcquireMetadata(implicit p: Parameters) extends ClientToManagerChannel()(p)
|
2015-04-29 22:18:26 +02:00
|
|
|
with HasCacheBlockAddress
|
2015-10-14 08:42:28 +02:00
|
|
|
with HasClientTransactionId
|
|
|
|
with HasTileLinkBeatId {
|
2015-04-29 22:18:26 +02:00
|
|
|
// Actual bundle fields:
|
|
|
|
val is_builtin_type = Bool()
|
|
|
|
val a_type = UInt(width = tlAcquireTypeBits)
|
|
|
|
val union = Bits(width = tlAcquireUnionBits)
|
|
|
|
|
|
|
|
// Utility funcs for accessing subblock union:
|
|
|
|
val opCodeOff = 1
|
|
|
|
val opSizeOff = tlMemoryOpcodeBits + opCodeOff
|
|
|
|
val addrByteOff = tlMemoryOperandSizeBits + opSizeOff
|
|
|
|
val addrByteMSB = tlByteAddrBits + addrByteOff
|
|
|
|
/** Hint whether to allocate the block in any interveneing caches */
|
|
|
|
def allocate(dummy: Int = 0) = union(0)
|
|
|
|
/** Op code for [[uncore.PutAtomic]] operations */
|
|
|
|
def op_code(dummy: Int = 0) = Mux(
|
|
|
|
isBuiltInType(Acquire.putType) || isBuiltInType(Acquire.putBlockType),
|
|
|
|
M_XWR, union(opSizeOff-1, opCodeOff))
|
|
|
|
/** Operand size for [[uncore.PutAtomic]] */
|
|
|
|
def op_size(dummy: Int = 0) = union(addrByteOff-1, opSizeOff)
|
|
|
|
/** Byte address for [[uncore.PutAtomic]] operand */
|
|
|
|
def addr_byte(dummy: Int = 0) = union(addrByteMSB-1, addrByteOff)
|
|
|
|
private def amo_offset(dummy: Int = 0) = addr_byte()(tlByteAddrBits-1, log2Up(amoAluOperandBits/8))
|
|
|
|
/** Bit offset of [[uncore.PutAtomic]] operand */
|
|
|
|
def amo_shift_bits(dummy: Int = 0) = UInt(amoAluOperandBits)*amo_offset()
|
|
|
|
/** Write mask for [[uncore.Put]], [[uncore.PutBlock]], [[uncore.PutAtomic]] */
|
|
|
|
def wmask(dummy: Int = 0) =
|
|
|
|
Mux(isBuiltInType(Acquire.putAtomicType),
|
|
|
|
FillInterleaved(amoAluOperandBits/8, UIntToOH(amo_offset())),
|
|
|
|
Mux(isBuiltInType(Acquire.putBlockType) || isBuiltInType(Acquire.putType),
|
|
|
|
union(tlWriteMaskBits, 1),
|
|
|
|
UInt(0, width = tlWriteMaskBits)))
|
|
|
|
/** Full, beat-sized writemask */
|
|
|
|
def full_wmask(dummy: Int = 0) = FillInterleaved(8, wmask())
|
|
|
|
/** Complete physical address for block, beat or operand */
|
2015-05-20 03:40:34 +02:00
|
|
|
def full_addr(dummy: Int = 0) = Cat(this.addr_block, this.addr_beat, this.addr_byte())
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
// Other helper functions:
|
|
|
|
/** Message type equality */
|
|
|
|
def is(t: UInt) = a_type === t //TODO: make this more opaque; def ===?
|
|
|
|
|
|
|
|
/** Is this message a built-in or custom type */
|
|
|
|
def isBuiltInType(dummy: Int = 0): Bool = is_builtin_type
|
|
|
|
/** Is this message a particular built-in type */
|
|
|
|
def isBuiltInType(t: UInt): Bool = is_builtin_type && a_type === t
|
|
|
|
|
|
|
|
/** Does this message refer to subblock operands using info in the Acquire.union subbundle */
|
|
|
|
def isSubBlockType(dummy: Int = 0): Bool = isBuiltInType() && Acquire.typesOnSubBlocks.contains(a_type)
|
|
|
|
|
|
|
|
/** Is this message a built-in prefetch message */
|
|
|
|
def isPrefetch(dummy: Int = 0): Bool = isBuiltInType() && is(Acquire.prefetchType)
|
|
|
|
|
|
|
|
/** Does this message contain data? Assumes that no custom message types have data. */
|
|
|
|
def hasData(dummy: Int = 0): Bool = isBuiltInType() && Acquire.typesWithData.contains(a_type)
|
|
|
|
|
|
|
|
/** Does this message contain multiple beats of data? Assumes that no custom message types have data. */
|
|
|
|
def hasMultibeatData(dummy: Int = 0): Bool = Bool(tlDataBeats > 1) && isBuiltInType() &&
|
|
|
|
Acquire.typesWithMultibeatData.contains(a_type)
|
|
|
|
|
|
|
|
/** Does this message require the manager to probe the client the very client that sent it?
|
|
|
|
* Needed if multiple caches are attached to the same port.
|
|
|
|
*/
|
|
|
|
def requiresSelfProbe(dummy: Int = 0) = Bool(false)
|
|
|
|
|
|
|
|
/** Mapping between each built-in Acquire type (defined in companion object)
|
|
|
|
* and a built-in Grant type.
|
|
|
|
*/
|
|
|
|
def getBuiltInGrantType(dummy: Int = 0): UInt = {
|
|
|
|
MuxLookup(this.a_type, Grant.putAckType, Array(
|
|
|
|
Acquire.getType -> Grant.getDataBeatType,
|
|
|
|
Acquire.getBlockType -> Grant.getDataBlockType,
|
|
|
|
Acquire.putType -> Grant.putAckType,
|
|
|
|
Acquire.putBlockType -> Grant.putAckType,
|
|
|
|
Acquire.putAtomicType -> Grant.getDataBeatType,
|
|
|
|
Acquire.prefetchType -> Grant.prefetchAckType))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.AcquireMetadata]] with an extra field containing the data beat */
|
|
|
|
class Acquire(implicit p: Parameters) extends AcquireMetadata()(p) with HasTileLinkData
|
|
|
|
|
|
|
|
/** [[uncore.AcquireMetadata]] with an extra field containing the entire cache block */
|
|
|
|
class BufferedAcquire(implicit p: Parameters) extends AcquireMetadata()(p) with HasTileLinkBlock
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** [[uncore.Acquire]] with an extra field stating its source id */
|
2015-10-06 06:41:46 +02:00
|
|
|
class AcquireFromSrc(implicit p: Parameters) extends Acquire()(p) with HasClientId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.BufferedAcquire]] with an extra field stating its source id */
|
|
|
|
class BufferedAcquireFromSrc(implicit p: Parameters) extends BufferedAcquire()(p) with HasClientId
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Contains definitions of the the built-in Acquire types and a factory
|
|
|
|
* for [[uncore.Acquire]]
|
|
|
|
*
|
|
|
|
* In general you should avoid using this factory directly and use
|
|
|
|
* [[uncore.ClientMetadata.makeAcquire]] for custom cached Acquires and
|
|
|
|
* [[uncore.Get]], [[uncore.Put]], etc. for built-in uncached Acquires.
|
|
|
|
*
|
|
|
|
* @param is_builtin_type built-in or custom type message?
|
|
|
|
* @param a_type built-in type enum or custom type enum
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat sub-block address (which beat)
|
|
|
|
* @param data data being put outwards
|
|
|
|
* @param union additional fields used for uncached types
|
|
|
|
*/
|
|
|
|
object Acquire {
|
|
|
|
val nBuiltInTypes = 5
|
|
|
|
//TODO: Use Enum
|
|
|
|
def getType = UInt("b000") // Get a single beat of data
|
|
|
|
def getBlockType = UInt("b001") // Get a whole block of data
|
|
|
|
def putType = UInt("b010") // Put a single beat of data
|
|
|
|
def putBlockType = UInt("b011") // Put a whole block of data
|
|
|
|
def putAtomicType = UInt("b100") // Perform an atomic memory op
|
|
|
|
def prefetchType = UInt("b101") // Prefetch a whole block of data
|
|
|
|
def typesWithData = Vec(putType, putBlockType, putAtomicType)
|
|
|
|
def typesWithMultibeatData = Vec(putBlockType)
|
|
|
|
def typesOnSubBlocks = Vec(putType, getType, putAtomicType)
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
def fullWriteMask(implicit p: Parameters) = SInt(-1, width = p(TLKey(p(TLId))).writeMaskBits).toUInt
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
// Most generic constructor
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
is_builtin_type: Bool,
|
|
|
|
a_type: Bits,
|
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt = UInt(0),
|
|
|
|
data: UInt = UInt(0),
|
|
|
|
union: UInt = UInt(0))
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val acq = Wire(new Acquire)
|
2015-04-29 22:18:26 +02:00
|
|
|
acq.is_builtin_type := is_builtin_type
|
|
|
|
acq.a_type := a_type
|
|
|
|
acq.client_xact_id := client_xact_id
|
|
|
|
acq.addr_block := addr_block
|
|
|
|
acq.addr_beat := addr_beat
|
|
|
|
acq.data := data
|
|
|
|
acq.union := union
|
|
|
|
acq
|
|
|
|
}
|
|
|
|
// Copy constructor
|
|
|
|
def apply(a: Acquire): Acquire = {
|
2015-10-06 06:41:46 +02:00
|
|
|
val acq = Wire(new Acquire()(a.p))
|
2015-04-29 22:18:26 +02:00
|
|
|
acq := a
|
|
|
|
acq
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Get a single beat of data from the outer memory hierarchy
|
|
|
|
*
|
|
|
|
* The client can hint whether he block containing this beat should be
|
|
|
|
* allocated in the intervening levels of the hierarchy.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat sub-block address (which beat)
|
2015-05-14 08:28:18 +02:00
|
|
|
* @param addr_byte sub-block address (which byte)
|
|
|
|
* @param operand_size {byte, half, word, double} from [[uncore.MemoryOpConstants]]
|
2015-04-29 22:18:26 +02:00
|
|
|
* @param alloc hint whether the block should be allocated in intervening caches
|
|
|
|
*/
|
|
|
|
object Get {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
alloc: Bool = Bool(true))
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.getType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
2015-05-14 08:28:18 +02:00
|
|
|
union = Cat(MT_Q, M_XRD, alloc))
|
|
|
|
}
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
addr_byte: UInt,
|
|
|
|
operand_size: UInt,
|
|
|
|
alloc: Bool)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-05-14 08:28:18 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.getType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
|
|
|
union = Cat(addr_byte, operand_size, M_XRD, alloc))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Get a whole cache block of data from the outer memory hierarchy
|
|
|
|
*
|
|
|
|
* The client can hint whether the block should be allocated in the
|
|
|
|
* intervening levels of the hierarchy.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param alloc hint whether the block should be allocated in intervening caches
|
|
|
|
*/
|
|
|
|
object GetBlock {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt = UInt(0),
|
|
|
|
addr_block: UInt,
|
|
|
|
alloc: Bool = Bool(true))
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.getBlockType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
2015-05-20 03:40:34 +02:00
|
|
|
union = Cat(MT_Q, M_XRD, alloc))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Prefetch a cache block into the next-outermost level of the memory hierarchy
|
|
|
|
* with read permissions.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
*/
|
|
|
|
object GetPrefetch {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.prefetchType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = UInt(0),
|
2015-05-20 03:40:34 +02:00
|
|
|
union = Cat(MT_Q, M_XRD, Bool(true)))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Put a single beat of data into the outer memory hierarchy
|
|
|
|
*
|
|
|
|
* The block will be allocated in the next-outermost level of the hierarchy.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat sub-block address (which beat)
|
|
|
|
* @param data data being refilled to the original requestor
|
|
|
|
* @param wmask per-byte write mask for this beat
|
|
|
|
*/
|
|
|
|
object Put {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
data: UInt,
|
|
|
|
wmask: Option[UInt]= None)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.putType,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
data = data,
|
2015-10-06 06:41:46 +02:00
|
|
|
union = Cat(wmask.getOrElse(Acquire.fullWriteMask), Bool(true)))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Put a whole cache block of data into the outer memory hierarchy
|
|
|
|
*
|
|
|
|
* If the write mask is not full, the block will be allocated in the
|
|
|
|
* next-outermost level of the hierarchy. If the write mask is full, the
|
|
|
|
* client can hint whether the block should be allocated or not.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat sub-block address (which beat of several)
|
|
|
|
* @param data data being refilled to the original requestor
|
|
|
|
* @param wmask per-byte write mask for this beat
|
|
|
|
* @param alloc hint whether the block should be allocated in intervening caches
|
|
|
|
*/
|
|
|
|
object PutBlock {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
data: UInt,
|
|
|
|
wmask: UInt)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.putBlockType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
|
|
|
data = data,
|
|
|
|
union = Cat(wmask, (wmask != Acquire.fullWriteMask)))
|
|
|
|
}
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
data: UInt,
|
|
|
|
alloc: Bool = Bool(true))
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.putBlockType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
|
|
|
data = data,
|
|
|
|
union = Cat(Acquire.fullWriteMask, alloc))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Prefetch a cache block into the next-outermost level of the memory hierarchy
|
|
|
|
* with write permissions.
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
*/
|
|
|
|
object PutPrefetch {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.prefetchType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = UInt(0),
|
|
|
|
union = Cat(M_XWR, Bool(true)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Perform an atomic memory operation in the next-outermost level of the memory hierarchy
|
|
|
|
*
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat sub-block address (within which beat)
|
|
|
|
* @param addr_byte sub-block address (which byte)
|
|
|
|
* @param atomic_opcode {swap, add, xor, and, min, max, minu, maxu} from [[uncore.MemoryOpConstants]]
|
|
|
|
* @param operand_size {byte, half, word, double} from [[uncore.MemoryOpConstants]]
|
|
|
|
* @param data source operand data
|
|
|
|
*/
|
|
|
|
object PutAtomic {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
addr_byte: UInt,
|
|
|
|
atomic_opcode: UInt,
|
|
|
|
operand_size: UInt,
|
|
|
|
data: UInt)
|
|
|
|
(implicit p: Parameters): Acquire = {
|
2015-04-29 22:18:26 +02:00
|
|
|
Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Acquire.putAtomicType,
|
|
|
|
client_xact_id = client_xact_id,
|
|
|
|
addr_block = addr_block,
|
|
|
|
addr_beat = addr_beat,
|
|
|
|
data = data,
|
|
|
|
union = Cat(addr_byte, operand_size, atomic_opcode, Bool(true)))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** The Probe channel is used to force clients to release data or cede permissions
|
|
|
|
* on a cache block. Clients respond to Probes with [[uncore.Release]] messages.
|
|
|
|
* The available types of Probes are customized by a particular
|
|
|
|
* [[uncore.CoherencePolicy]].
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class Probe(implicit p: Parameters) extends ManagerToClientChannel()(p)
|
2015-04-29 22:18:26 +02:00
|
|
|
with HasCacheBlockAddress {
|
|
|
|
val p_type = UInt(width = tlCoh.probeTypeWidth)
|
|
|
|
|
|
|
|
def is(t: UInt) = p_type === t
|
|
|
|
def hasData(dummy: Int = 0) = Bool(false)
|
|
|
|
def hasMultibeatData(dummy: Int = 0) = Bool(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** [[uncore.Probe]] with an extra field stating its destination id */
|
2015-10-06 06:41:46 +02:00
|
|
|
class ProbeToDst(implicit p: Parameters) extends Probe()(p) with HasClientId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** Contains factories for [[uncore.Probe]] and [[uncore.ProbeToDst]]
|
|
|
|
*
|
|
|
|
* In general you should avoid using these factories directly and use
|
2015-05-07 21:35:14 +02:00
|
|
|
* [[uncore.ManagerMetadata.makeProbe(UInt,Acquire)* makeProbe]] instead.
|
2015-04-29 22:18:26 +02:00
|
|
|
*
|
|
|
|
* @param dst id of client to which probe should be sent
|
|
|
|
* @param p_type custom probe type
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
*/
|
|
|
|
object Probe {
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(p_type: UInt, addr_block: UInt)(implicit p: Parameters): Probe = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val prb = Wire(new Probe)
|
2015-04-29 22:18:26 +02:00
|
|
|
prb.p_type := p_type
|
|
|
|
prb.addr_block := addr_block
|
|
|
|
prb
|
|
|
|
}
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(dst: UInt, p_type: UInt, addr_block: UInt)(implicit p: Parameters): ProbeToDst = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val prb = Wire(new ProbeToDst)
|
2015-04-29 22:18:26 +02:00
|
|
|
prb.client_id := dst
|
|
|
|
prb.p_type := p_type
|
|
|
|
prb.addr_block := addr_block
|
|
|
|
prb
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** The Release channel is used to release data or permission back to the manager
|
|
|
|
* in response to [[uncore.Probe]] messages. It can also be used to voluntarily
|
|
|
|
* write back data, for example in the event that dirty data must be evicted on
|
|
|
|
* a cache miss. The available types of Release messages are always customized by
|
|
|
|
* a particular [[uncore.CoherencePolicy]]. Releases may contain data or may be
|
|
|
|
* simple acknowledgements. Voluntary Releases are acknowledged with [[uncore.Grant Grants]].
|
|
|
|
*/
|
2015-10-14 08:42:28 +02:00
|
|
|
class ReleaseMetadata(implicit p: Parameters) extends ClientToManagerChannel()(p)
|
|
|
|
with HasTileLinkBeatId
|
2015-04-29 22:18:26 +02:00
|
|
|
with HasCacheBlockAddress
|
2015-10-14 08:42:28 +02:00
|
|
|
with HasClientTransactionId {
|
2015-04-29 22:18:26 +02:00
|
|
|
val r_type = UInt(width = tlCoh.releaseTypeWidth)
|
|
|
|
val voluntary = Bool()
|
|
|
|
|
|
|
|
// Helper funcs
|
|
|
|
def is(t: UInt) = r_type === t
|
|
|
|
def hasData(dummy: Int = 0) = tlCoh.releaseTypesWithData.contains(r_type)
|
|
|
|
//TODO: Assumes all releases write back full cache blocks:
|
|
|
|
def hasMultibeatData(dummy: Int = 0) = Bool(tlDataBeats > 1) && tlCoh.releaseTypesWithData.contains(r_type)
|
|
|
|
def isVoluntary(dummy: Int = 0) = voluntary
|
|
|
|
def requiresAck(dummy: Int = 0) = !Bool(tlNetworkPreservesPointToPointOrdering)
|
2015-05-20 03:40:34 +02:00
|
|
|
def full_addr(dummy: Int = 0) = Cat(this.addr_block, this.addr_beat, UInt(0, width = tlByteAddrBits))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.ReleaseMetadata]] with an extra field containing the data beat */
|
|
|
|
class Release(implicit p: Parameters) extends ReleaseMetadata()(p) with HasTileLinkData
|
|
|
|
|
|
|
|
/** [[uncore.ReleaseMetadata]] with an extra field containing the entire cache block */
|
|
|
|
class BufferedRelease(implicit p: Parameters) extends ReleaseMetadata()(p) with HasTileLinkBlock
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** [[uncore.Release]] with an extra field stating its source id */
|
2015-10-06 06:41:46 +02:00
|
|
|
class ReleaseFromSrc(implicit p: Parameters) extends Release()(p) with HasClientId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.BufferedRelease]] with an extra field stating its source id */
|
|
|
|
class BufferedReleaseFromSrc(implicit p: Parameters) extends BufferedRelease()(p) with HasClientId
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Contains a [[uncore.Release]] factory
|
|
|
|
*
|
|
|
|
* In general you should avoid using this factory directly and use
|
|
|
|
* [[uncore.ClientMetadata.makeRelease]] instead.
|
|
|
|
*
|
|
|
|
* @param voluntary is this a voluntary writeback
|
|
|
|
* @param r_type type enum defined by coherence protocol
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param addr_block address of the cache block
|
|
|
|
* @param addr_beat beat id of the data
|
|
|
|
* @param data data being written back
|
|
|
|
*/
|
|
|
|
object Release {
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
voluntary: Bool,
|
|
|
|
r_type: UInt,
|
|
|
|
client_xact_id: UInt,
|
|
|
|
addr_block: UInt,
|
|
|
|
addr_beat: UInt = UInt(0),
|
|
|
|
data: UInt = UInt(0))
|
|
|
|
(implicit p: Parameters): Release = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val rel = Wire(new Release)
|
2015-04-29 22:18:26 +02:00
|
|
|
rel.r_type := r_type
|
|
|
|
rel.client_xact_id := client_xact_id
|
|
|
|
rel.addr_block := addr_block
|
|
|
|
rel.addr_beat := addr_beat
|
|
|
|
rel.data := data
|
|
|
|
rel.voluntary := voluntary
|
|
|
|
rel
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** The Grant channel is used to refill data or grant permissions requested of the
|
|
|
|
* manager agent via an [[uncore.Acquire]] message. It is also used to acknowledge
|
|
|
|
* the receipt of voluntary writeback from clients in the form of [[uncore.Release]]
|
|
|
|
* messages. There are built-in Grant messages used for Gets and Puts, and
|
|
|
|
* coherence policies may also define custom Grant types. Grants may contain data
|
|
|
|
* or may be simple acknowledgements. Grants are responded to with [[uncore.Finish]].
|
|
|
|
*/
|
2015-10-14 08:42:28 +02:00
|
|
|
class GrantMetadata(implicit p: Parameters) extends ManagerToClientChannel()(p)
|
|
|
|
with HasTileLinkBeatId
|
2015-04-29 22:18:26 +02:00
|
|
|
with HasClientTransactionId
|
|
|
|
with HasManagerTransactionId {
|
|
|
|
val is_builtin_type = Bool()
|
|
|
|
val g_type = UInt(width = tlGrantTypeBits)
|
|
|
|
|
|
|
|
// Helper funcs
|
|
|
|
def isBuiltInType(dummy: Int = 0): Bool = is_builtin_type
|
|
|
|
def isBuiltInType(t: UInt): Bool = is_builtin_type && g_type === t
|
|
|
|
def is(t: UInt):Bool = g_type === t
|
|
|
|
def hasData(dummy: Int = 0): Bool = Mux(isBuiltInType(),
|
|
|
|
Grant.typesWithData.contains(g_type),
|
|
|
|
tlCoh.grantTypesWithData.contains(g_type))
|
|
|
|
def hasMultibeatData(dummy: Int = 0): Bool =
|
|
|
|
Bool(tlDataBeats > 1) && Mux(isBuiltInType(),
|
|
|
|
Grant.typesWithMultibeatData.contains(g_type),
|
|
|
|
tlCoh.grantTypesWithData.contains(g_type))
|
|
|
|
def isVoluntary(dummy: Int = 0): Bool = isBuiltInType() && (g_type === Grant.voluntaryAckType)
|
|
|
|
def requiresAck(dummy: Int = 0): Bool = !Bool(tlNetworkPreservesPointToPointOrdering) && !isVoluntary()
|
|
|
|
def makeFinish(dummy: Int = 0): Finish = {
|
2015-10-22 03:16:44 +02:00
|
|
|
val f = Wire(new Finish)
|
2015-04-29 22:18:26 +02:00
|
|
|
f.manager_xact_id := this.manager_xact_id
|
|
|
|
f
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.GrantMetadata]] with an extra field containing a single beat of data */
|
|
|
|
class Grant(implicit p: Parameters) extends GrantMetadata()(p) with HasTileLinkData
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** [[uncore.Grant]] with an extra field stating its destination */
|
2015-10-06 06:41:46 +02:00
|
|
|
class GrantToDst(implicit p: Parameters) extends Grant()(p) with HasClientId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
/** [[uncore.GrantMetadata]] with an extra field containing an entire cache block */
|
|
|
|
class BufferedGrant(implicit p: Parameters) extends GrantMetadata()(p) with HasTileLinkBlock
|
|
|
|
|
|
|
|
/** [[uncore.BufferedGrant]] with an extra field stating its destination */
|
|
|
|
class BufferedGrantToDst(implicit p: Parameters) extends BufferedGrant()(p) with HasClientId
|
|
|
|
|
2015-04-29 22:18:26 +02:00
|
|
|
/** Contains definitions of the the built-in grant types and factories
|
|
|
|
* for [[uncore.Grant]] and [[uncore.GrantToDst]]
|
|
|
|
*
|
|
|
|
* In general you should avoid using these factories directly and use
|
|
|
|
* [[uncore.ManagerMetadata.makeGrant(uncore.AcquireFromSrc* makeGrant]] instead.
|
|
|
|
*
|
|
|
|
* @param dst id of client to which grant should be sent
|
|
|
|
* @param is_builtin_type built-in or custom type message?
|
|
|
|
* @param g_type built-in type enum or custom type enum
|
|
|
|
* @param client_xact_id client's transaction id
|
|
|
|
* @param manager_xact_id manager's transaction id
|
|
|
|
* @param addr_beat beat id of the data
|
|
|
|
* @param data data being refilled to the original requestor
|
|
|
|
*/
|
|
|
|
object Grant {
|
|
|
|
val nBuiltInTypes = 5
|
|
|
|
def voluntaryAckType = UInt("b000") // For acking Releases
|
|
|
|
def prefetchAckType = UInt("b001") // For acking any kind of Prefetch
|
|
|
|
def putAckType = UInt("b011") // For acking any kind of non-prfetch Put
|
|
|
|
def getDataBeatType = UInt("b100") // Supplying a single beat of Get
|
|
|
|
def getDataBlockType = UInt("b101") // Supplying all beats of a GetBlock
|
|
|
|
def typesWithData = Vec(getDataBlockType, getDataBeatType)
|
|
|
|
def typesWithMultibeatData= Vec(getDataBlockType)
|
|
|
|
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
is_builtin_type: Bool,
|
|
|
|
g_type: UInt,
|
|
|
|
client_xact_id: UInt,
|
|
|
|
manager_xact_id: UInt,
|
|
|
|
addr_beat: UInt,
|
|
|
|
data: UInt)
|
|
|
|
(implicit p: Parameters): Grant = {
|
2015-09-11 02:53:04 +02:00
|
|
|
val gnt = Wire(new Grant)
|
2015-04-29 22:18:26 +02:00
|
|
|
gnt.is_builtin_type := is_builtin_type
|
|
|
|
gnt.g_type := g_type
|
|
|
|
gnt.client_xact_id := client_xact_id
|
|
|
|
gnt.manager_xact_id := manager_xact_id
|
|
|
|
gnt.addr_beat := addr_beat
|
|
|
|
gnt.data := data
|
|
|
|
gnt
|
|
|
|
}
|
|
|
|
|
|
|
|
def apply(
|
2015-10-06 06:41:46 +02:00
|
|
|
dst: UInt,
|
|
|
|
is_builtin_type: Bool,
|
|
|
|
g_type: UInt,
|
|
|
|
client_xact_id: UInt,
|
|
|
|
manager_xact_id: UInt,
|
|
|
|
addr_beat: UInt = UInt(0),
|
|
|
|
data: UInt = UInt(0))
|
|
|
|
(implicit p: Parameters): GrantToDst = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val gnt = Wire(new GrantToDst)
|
2015-04-29 22:18:26 +02:00
|
|
|
gnt.client_id := dst
|
|
|
|
gnt.is_builtin_type := is_builtin_type
|
|
|
|
gnt.g_type := g_type
|
|
|
|
gnt.client_xact_id := client_xact_id
|
|
|
|
gnt.manager_xact_id := manager_xact_id
|
|
|
|
gnt.addr_beat := addr_beat
|
|
|
|
gnt.data := data
|
|
|
|
gnt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** The Finish channel is used to provide a global ordering of transactions
|
|
|
|
* in networks that do not guarantee point-to-point ordering of messages.
|
|
|
|
* A Finsish message is sent as acknowledgement of receipt of a [[uncore.Grant]].
|
|
|
|
* When a Finish message is received, a manager knows it is safe to begin
|
|
|
|
* processing other transactions that touch the same cache block.
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class Finish(implicit p: Parameters) extends ClientToManagerChannel()(p)
|
|
|
|
with HasManagerTransactionId {
|
2015-04-29 22:18:26 +02:00
|
|
|
def hasData(dummy: Int = 0) = Bool(false)
|
|
|
|
def hasMultibeatData(dummy: Int = 0) = Bool(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Complete IO definition for incoherent TileLink, including networking headers */
|
2015-10-06 06:41:46 +02:00
|
|
|
class UncachedTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val acquire = new DecoupledIO(new LogicalNetworkIO(new Acquire))
|
|
|
|
val grant = new DecoupledIO(new LogicalNetworkIO(new Grant)).flip
|
|
|
|
val finish = new DecoupledIO(new LogicalNetworkIO(new Finish))
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Complete IO definition for coherent TileLink, including networking headers */
|
2015-10-06 06:41:46 +02:00
|
|
|
class TileLinkIO(implicit p: Parameters) extends UncachedTileLinkIO()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val probe = new DecoupledIO(new LogicalNetworkIO(new Probe)).flip
|
|
|
|
val release = new DecoupledIO(new LogicalNetworkIO(new Release))
|
|
|
|
}
|
|
|
|
|
|
|
|
/** This version of UncachedTileLinkIO does not contain network headers.
|
|
|
|
* It is intended for use within client agents.
|
|
|
|
*
|
|
|
|
* Headers are provided in the top-level that instantiates the clients and network,
|
|
|
|
* probably using a [[uncore.ClientTileLinkNetworkPort]] module.
|
|
|
|
* By eliding the header subbundles within the clients we can enable
|
|
|
|
* hierarchical P-and-R while minimizing unconnected port errors in GDS.
|
|
|
|
*
|
|
|
|
* Secondly, this version of the interface elides [[uncore.Finish]] messages, with the
|
|
|
|
* assumption that a [[uncore.FinishUnit]] has been coupled to the TileLinkIO port
|
|
|
|
* to deal with acking received [[uncore.Grant Grants]].
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientUncachedTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val acquire = new DecoupledIO(new Acquire)
|
|
|
|
val grant = new DecoupledIO(new Grant).flip
|
|
|
|
}
|
|
|
|
|
|
|
|
/** This version of TileLinkIO does not contain network headers.
|
|
|
|
* It is intended for use within client agents.
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientTileLinkIO(implicit p: Parameters) extends ClientUncachedTileLinkIO()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val probe = new DecoupledIO(new Probe).flip
|
|
|
|
val release = new DecoupledIO(new Release)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** This version of TileLinkIO does not contain network headers, but
|
|
|
|
* every channel does include an extra client_id subbundle.
|
|
|
|
* It is intended for use within Management agents.
|
|
|
|
*
|
|
|
|
* Managers need to track where [[uncore.Acquire]] and [[uncore.Release]] messages
|
|
|
|
* originated so that they can send a [[uncore.Grant]] to the right place.
|
|
|
|
* Similarly they must be able to issues Probes to particular clients.
|
|
|
|
* However, we'd still prefer to have [[uncore.ManagerTileLinkNetworkPort]] fill in
|
|
|
|
* the header.src to enable hierarchical p-and-r of the managers. Additionally,
|
|
|
|
* coherent clients might be mapped to random network port ids, and we'll leave it to the
|
|
|
|
* [[uncore.ManagerTileLinkNetworkPort]] to apply the correct mapping. Managers do need to
|
|
|
|
* see Finished so they know when to allow new transactions on a cache
|
|
|
|
* block to proceed.
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class ManagerTileLinkIO(implicit p: Parameters) extends TLBundle()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val acquire = new DecoupledIO(new AcquireFromSrc).flip
|
|
|
|
val grant = new DecoupledIO(new GrantToDst)
|
|
|
|
val finish = new DecoupledIO(new Finish).flip
|
|
|
|
val probe = new DecoupledIO(new ProbeToDst)
|
|
|
|
val release = new DecoupledIO(new ReleaseFromSrc).flip
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Utilities for safely wrapping a *UncachedTileLink by pinning probe.ready and release.valid low */
|
|
|
|
object TileLinkIOWrapper {
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(tl: ClientUncachedTileLinkIO)(implicit p: Parameters): ClientTileLinkIO = {
|
2015-04-29 22:18:26 +02:00
|
|
|
val conv = Module(new ClientTileLinkIOWrapper)
|
2015-10-06 06:41:46 +02:00
|
|
|
conv.io.in <> tl
|
2015-04-29 22:18:26 +02:00
|
|
|
conv.io.out
|
|
|
|
}
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(tl: UncachedTileLinkIO)(implicit p: Parameters): TileLinkIO = {
|
2015-04-29 22:18:26 +02:00
|
|
|
val conv = Module(new TileLinkIOWrapper)
|
2015-10-06 06:41:46 +02:00
|
|
|
conv.io.in <> tl
|
2015-04-29 22:18:26 +02:00
|
|
|
conv.io.out
|
|
|
|
}
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(tl: ClientTileLinkIO): ClientTileLinkIO = tl
|
2015-04-29 22:18:26 +02:00
|
|
|
def apply(tl: TileLinkIO): TileLinkIO = tl
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class TileLinkIOWrapper(implicit p: Parameters) extends TLModule()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val in = new UncachedTileLinkIO().flip
|
|
|
|
val out = new TileLinkIO
|
|
|
|
}
|
|
|
|
io.out.acquire <> io.in.acquire
|
2015-08-04 03:01:14 +02:00
|
|
|
io.in.grant <> io.out.grant
|
2015-04-29 22:18:26 +02:00
|
|
|
io.out.finish <> io.in.finish
|
|
|
|
io.out.probe.ready := Bool(true)
|
|
|
|
io.out.release.valid := Bool(false)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientTileLinkIOWrapper(implicit p: Parameters) extends TLModule()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val in = new ClientUncachedTileLinkIO().flip
|
|
|
|
val out = new ClientTileLinkIO
|
|
|
|
}
|
|
|
|
io.out.acquire <> io.in.acquire
|
2015-08-04 03:01:14 +02:00
|
|
|
io.in.grant <> io.out.grant
|
2015-04-29 22:18:26 +02:00
|
|
|
io.out.probe.ready := Bool(true)
|
|
|
|
io.out.release.valid := Bool(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Used to track metadata for transactions where multiple secondary misses have been merged
|
|
|
|
* and handled by a single transaction tracker.
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class SecondaryMissInfo(implicit p: Parameters) extends TLBundle()(p)
|
2015-04-29 22:18:26 +02:00
|
|
|
with HasTileLinkBeatId
|
|
|
|
with HasClientTransactionId
|
2015-10-06 06:41:46 +02:00
|
|
|
// TODO: add a_type to merge e.g. Get+GetBlocks, and/or HasClientId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** A helper module that automatically issues [[uncore.Finish]] messages in repsonse
|
|
|
|
* to [[uncore.Grant]] that it receives from a manager and forwards to a client
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class FinishUnit(srcId: Int = 0, outstanding: Int = 2)(implicit p: Parameters) extends TLModule()(p)
|
|
|
|
with HasDataBeatCounters {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val grant = Decoupled(new LogicalNetworkIO(new Grant)).flip
|
|
|
|
val refill = Decoupled(new Grant)
|
|
|
|
val finish = Decoupled(new LogicalNetworkIO(new Finish))
|
|
|
|
val ready = Bool(OUTPUT)
|
|
|
|
}
|
|
|
|
|
|
|
|
val g = io.grant.bits.payload
|
|
|
|
|
|
|
|
if(tlNetworkPreservesPointToPointOrdering) {
|
|
|
|
io.finish.valid := Bool(false)
|
|
|
|
io.refill.valid := io.grant.valid
|
|
|
|
io.refill.bits := g
|
|
|
|
io.grant.ready := io.refill.ready
|
|
|
|
io.ready := Bool(true)
|
|
|
|
} else {
|
|
|
|
// We only want to send Finishes after we have collected all beats of
|
|
|
|
// a multibeat Grant. But Grants from multiple managers or transactions may
|
|
|
|
// get interleaved, so we could need a counter for each.
|
|
|
|
val done = if(tlNetworkDoesNotInterleaveBeats) {
|
|
|
|
connectIncomingDataBeatCounterWithHeader(io.grant)
|
|
|
|
} else {
|
|
|
|
val entries = 1 << tlClientXactIdBits
|
|
|
|
def getId(g: LogicalNetworkIO[Grant]) = g.payload.client_xact_id
|
|
|
|
assert(getId(io.grant.bits) <= UInt(entries), "Not enough grant beat counters, only " + entries + " entries.")
|
|
|
|
connectIncomingDataBeatCountersWithHeader(io.grant, entries, getId).reduce(_||_)
|
|
|
|
}
|
|
|
|
val q = Module(new FinishQueue(outstanding))
|
|
|
|
q.io.enq.valid := io.grant.fire() && g.requiresAck() && (!g.hasMultibeatData() || done)
|
|
|
|
q.io.enq.bits.fin := g.makeFinish()
|
|
|
|
q.io.enq.bits.dst := io.grant.bits.header.src
|
|
|
|
|
|
|
|
io.finish.bits.header.src := UInt(srcId)
|
|
|
|
io.finish.bits.header.dst := q.io.deq.bits.dst
|
|
|
|
io.finish.bits.payload := q.io.deq.bits.fin
|
|
|
|
io.finish.valid := q.io.deq.valid
|
|
|
|
q.io.deq.ready := io.finish.ready
|
|
|
|
|
|
|
|
io.refill.valid := io.grant.valid
|
|
|
|
io.refill.bits := g
|
|
|
|
io.grant.ready := (q.io.enq.ready || !g.requiresAck()) && io.refill.ready
|
|
|
|
io.ready := q.io.enq.ready
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class FinishQueueEntry(implicit p: Parameters) extends TLBundle()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val fin = new Finish
|
2015-10-06 06:41:46 +02:00
|
|
|
val dst = UInt(width = log2Up(p(LNEndpoints)))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class FinishQueue(entries: Int)(implicit p: Parameters) extends Queue(new FinishQueueEntry()(p), entries)
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** A port to convert [[uncore.ClientTileLinkIO]].flip into [[uncore.TileLinkIO]]
|
|
|
|
*
|
|
|
|
* Creates network headers for [[uncore.Acquire]] and [[uncore.Release]] messages,
|
|
|
|
* calculating header.dst and filling in header.src.
|
|
|
|
* Strips headers from [[uncore.Probe Probes]].
|
|
|
|
* Responds to [[uncore.Grant]] by automatically issuing [[uncore.Finish]] to the granting managers.
|
|
|
|
*
|
|
|
|
* @param clientId network port id of this agent
|
|
|
|
* @param addrConvert how a physical address maps to a destination manager port id
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientTileLinkNetworkPort(clientId: Int, addrConvert: UInt => UInt)
|
|
|
|
(implicit p: Parameters) extends TLModule()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val client = new ClientTileLinkIO().flip
|
|
|
|
val network = new TileLinkIO
|
|
|
|
}
|
|
|
|
|
|
|
|
val finisher = Module(new FinishUnit(clientId))
|
|
|
|
finisher.io.grant <> io.network.grant
|
|
|
|
io.network.finish <> finisher.io.finish
|
|
|
|
|
|
|
|
val acq_with_header = ClientTileLinkHeaderCreator(io.client.acquire, clientId, addrConvert)
|
|
|
|
val rel_with_header = ClientTileLinkHeaderCreator(io.client.release, clientId, addrConvert)
|
|
|
|
val prb_without_header = DecoupledLogicalNetworkIOUnwrapper(io.network.probe)
|
|
|
|
val gnt_without_header = finisher.io.refill
|
|
|
|
|
|
|
|
io.network.acquire.bits := acq_with_header.bits
|
|
|
|
io.network.acquire.valid := acq_with_header.valid && finisher.io.ready
|
|
|
|
acq_with_header.ready := io.network.acquire.ready && finisher.io.ready
|
|
|
|
io.network.release <> rel_with_header
|
|
|
|
io.client.probe <> prb_without_header
|
|
|
|
io.client.grant <> gnt_without_header
|
|
|
|
}
|
|
|
|
|
|
|
|
object ClientTileLinkHeaderCreator {
|
|
|
|
def apply[T <: ClientToManagerChannel with HasCacheBlockAddress](
|
2015-10-06 06:41:46 +02:00
|
|
|
in: DecoupledIO[T],
|
|
|
|
clientId: Int,
|
|
|
|
addrConvert: UInt => UInt)
|
|
|
|
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val out = Wire(new DecoupledIO(new LogicalNetworkIO(in.bits)))
|
2015-04-29 22:18:26 +02:00
|
|
|
out.bits.payload := in.bits
|
|
|
|
out.bits.header.src := UInt(clientId)
|
|
|
|
out.bits.header.dst := addrConvert(in.bits.addr_block)
|
|
|
|
out.valid := in.valid
|
|
|
|
in.ready := out.ready
|
|
|
|
out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** A port to convert [[uncore.ManagerTileLinkIO]].flip into [[uncore.TileLinkIO]].flip
|
|
|
|
*
|
|
|
|
* Creates network headers for [[uncore.Probe]] and [[uncore.Grant]] messagess,
|
|
|
|
* calculating header.dst and filling in header.src.
|
|
|
|
* Strips headers from [[uncore.Acquire]], [[uncore.Release]] and [[uncore.Finish]],
|
|
|
|
* but supplies client_id instead.
|
|
|
|
*
|
|
|
|
* @param managerId the network port id of this agent
|
|
|
|
* @param idConvert how a sharer id maps to a destination client port id
|
|
|
|
*/
|
2015-10-06 06:41:46 +02:00
|
|
|
class ManagerTileLinkNetworkPort(managerId: Int, idConvert: UInt => UInt)
|
|
|
|
(implicit p: Parameters) extends TLModule()(p) {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val manager = new ManagerTileLinkIO().flip
|
|
|
|
val network = new TileLinkIO().flip
|
|
|
|
}
|
|
|
|
io.network.grant <> ManagerTileLinkHeaderCreator(io.manager.grant, managerId, (u: UInt) => u)
|
|
|
|
io.network.probe <> ManagerTileLinkHeaderCreator(io.manager.probe, managerId, idConvert)
|
|
|
|
io.manager.acquire.bits.client_id := io.network.acquire.bits.header.src
|
|
|
|
io.manager.acquire <> DecoupledLogicalNetworkIOUnwrapper(io.network.acquire)
|
|
|
|
io.manager.release.bits.client_id := io.network.release.bits.header.src
|
|
|
|
io.manager.release <> DecoupledLogicalNetworkIOUnwrapper(io.network.release)
|
|
|
|
io.manager.finish <> DecoupledLogicalNetworkIOUnwrapper(io.network.finish)
|
|
|
|
}
|
|
|
|
|
|
|
|
object ManagerTileLinkHeaderCreator {
|
|
|
|
def apply[T <: ManagerToClientChannel with HasClientId](
|
2015-10-06 06:41:46 +02:00
|
|
|
in: DecoupledIO[T],
|
|
|
|
managerId: Int,
|
|
|
|
idConvert: UInt => UInt)
|
|
|
|
(implicit p: Parameters): DecoupledIO[LogicalNetworkIO[T]] = {
|
2015-07-16 05:24:03 +02:00
|
|
|
val out = Wire(new DecoupledIO(new LogicalNetworkIO(in.bits)))
|
2015-04-29 22:18:26 +02:00
|
|
|
out.bits.payload := in.bits
|
|
|
|
out.bits.header.src := UInt(managerId)
|
|
|
|
out.bits.header.dst := idConvert(in.bits.client_id)
|
|
|
|
out.valid := in.valid
|
|
|
|
in.ready := out.ready
|
|
|
|
out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Struct for describing per-channel queue depths */
|
|
|
|
case class TileLinkDepths(acq: Int, prb: Int, rel: Int, gnt: Int, fin: Int)
|
|
|
|
|
|
|
|
/** Optionally enqueues each [[uncore.TileLinkChannel]] individually */
|
2015-10-06 06:41:46 +02:00
|
|
|
class TileLinkEnqueuer(depths: TileLinkDepths)(implicit p: Parameters) extends Module {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val client = new TileLinkIO().flip
|
|
|
|
val manager = new TileLinkIO
|
|
|
|
}
|
|
|
|
io.manager.acquire <> (if(depths.acq > 0) Queue(io.client.acquire, depths.acq) else io.client.acquire)
|
|
|
|
io.client.probe <> (if(depths.prb > 0) Queue(io.manager.probe, depths.prb) else io.manager.probe)
|
|
|
|
io.manager.release <> (if(depths.rel > 0) Queue(io.client.release, depths.rel) else io.client.release)
|
|
|
|
io.client.grant <> (if(depths.gnt > 0) Queue(io.manager.grant, depths.gnt) else io.manager.grant)
|
|
|
|
io.manager.finish <> (if(depths.fin > 0) Queue(io.client.finish, depths.fin) else io.client.finish)
|
|
|
|
}
|
|
|
|
|
|
|
|
object TileLinkEnqueuer {
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(in: TileLinkIO, depths: TileLinkDepths)(implicit p: Parameters): TileLinkIO = {
|
|
|
|
val t = Module(new TileLinkEnqueuer(depths))
|
2015-04-29 22:18:26 +02:00
|
|
|
t.io.client <> in
|
|
|
|
t.io.manager
|
|
|
|
}
|
2015-10-06 06:41:46 +02:00
|
|
|
def apply(in: TileLinkIO, depth: Int)(implicit p: Parameters): TileLinkIO = {
|
|
|
|
apply(in, TileLinkDepths(depth, depth, depth, depth, depth))
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Utility functions for constructing TileLinkIO arbiters */
|
2015-10-06 06:41:46 +02:00
|
|
|
trait TileLinkArbiterLike extends HasTileLinkParameters {
|
2015-04-29 22:18:26 +02:00
|
|
|
// Some shorthand type variables
|
|
|
|
type ManagerSourcedWithId = ManagerToClientChannel with HasClientTransactionId
|
|
|
|
type ClientSourcedWithId = ClientToManagerChannel with HasClientTransactionId
|
|
|
|
type ClientSourcedWithIdAndData = ClientToManagerChannel with HasClientTransactionId with HasTileLinkData
|
|
|
|
|
|
|
|
val arbN: Int // The number of ports on the client side
|
|
|
|
|
|
|
|
// These abstract funcs are filled in depending on whether the arbiter mucks with the
|
|
|
|
// outgoing client ids to track sourcing and then needs to revert them on the way back
|
|
|
|
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int): Bits
|
|
|
|
def managerSourcedClientXactId(in: ManagerSourcedWithId): Bits
|
|
|
|
def arbIdx(in: ManagerSourcedWithId): UInt
|
|
|
|
|
|
|
|
// The following functions are all wiring helpers for each of the different types of TileLink channels
|
|
|
|
|
|
|
|
def hookupClientSource[M <: ClientSourcedWithIdAndData](
|
|
|
|
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
|
|
|
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
|
|
|
def hasData(m: LogicalNetworkIO[M]) = m.payload.hasMultibeatData()
|
2015-07-16 03:06:27 +02:00
|
|
|
val arb = Module(new LockingRRArbiter(mngr.bits, arbN, tlDataBeats, Some(hasData _)))
|
2015-04-29 22:18:26 +02:00
|
|
|
clts.zipWithIndex.zip(arb.io.in).map{ case ((req, id), arb) => {
|
|
|
|
arb.valid := req.valid
|
|
|
|
arb.bits := req.bits
|
|
|
|
arb.bits.payload.client_xact_id := clientSourcedClientXactId(req.bits.payload, id)
|
|
|
|
req.ready := arb.ready
|
|
|
|
}}
|
2015-08-02 06:09:00 +02:00
|
|
|
mngr <> arb.io.out
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
def hookupClientSourceHeaderless[M <: ClientSourcedWithIdAndData](
|
|
|
|
clts: Seq[DecoupledIO[M]],
|
|
|
|
mngr: DecoupledIO[M]) {
|
|
|
|
def hasData(m: M) = m.hasMultibeatData()
|
2015-07-16 03:06:27 +02:00
|
|
|
val arb = Module(new LockingRRArbiter(mngr.bits, arbN, tlDataBeats, Some(hasData _)))
|
2015-04-29 22:18:26 +02:00
|
|
|
clts.zipWithIndex.zip(arb.io.in).map{ case ((req, id), arb) => {
|
|
|
|
arb.valid := req.valid
|
|
|
|
arb.bits := req.bits
|
|
|
|
arb.bits.client_xact_id := clientSourcedClientXactId(req.bits, id)
|
|
|
|
req.ready := arb.ready
|
|
|
|
}}
|
2015-08-02 06:09:00 +02:00
|
|
|
mngr <> arb.io.out
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
def hookupManagerSourceWithHeader[M <: ManagerToClientChannel](
|
|
|
|
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
|
|
|
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
|
|
|
mngr.ready := Bool(false)
|
|
|
|
for (i <- 0 until arbN) {
|
|
|
|
clts(i).valid := Bool(false)
|
|
|
|
when (mngr.bits.header.dst === UInt(i)) {
|
|
|
|
clts(i).valid := mngr.valid
|
|
|
|
mngr.ready := clts(i).ready
|
|
|
|
}
|
|
|
|
clts(i).bits := mngr.bits
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def hookupManagerSourceWithId[M <: ManagerSourcedWithId](
|
|
|
|
clts: Seq[DecoupledIO[LogicalNetworkIO[M]]],
|
|
|
|
mngr: DecoupledIO[LogicalNetworkIO[M]]) {
|
|
|
|
mngr.ready := Bool(false)
|
|
|
|
for (i <- 0 until arbN) {
|
|
|
|
clts(i).valid := Bool(false)
|
|
|
|
when (arbIdx(mngr.bits.payload) === UInt(i)) {
|
|
|
|
clts(i).valid := mngr.valid
|
|
|
|
mngr.ready := clts(i).ready
|
|
|
|
}
|
|
|
|
clts(i).bits := mngr.bits
|
|
|
|
clts(i).bits.payload.client_xact_id := managerSourcedClientXactId(mngr.bits.payload)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def hookupManagerSourceHeaderlessWithId[M <: ManagerSourcedWithId](
|
|
|
|
clts: Seq[DecoupledIO[M]],
|
|
|
|
mngr: DecoupledIO[M]) {
|
|
|
|
mngr.ready := Bool(false)
|
|
|
|
for (i <- 0 until arbN) {
|
|
|
|
clts(i).valid := Bool(false)
|
|
|
|
when (arbIdx(mngr.bits) === UInt(i)) {
|
|
|
|
clts(i).valid := mngr.valid
|
|
|
|
mngr.ready := clts(i).ready
|
|
|
|
}
|
|
|
|
clts(i).bits := mngr.bits
|
|
|
|
clts(i).bits.client_xact_id := managerSourcedClientXactId(mngr.bits)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
def hookupManagerSourceBroadcast[M <: Data](clts: Seq[DecoupledIO[M]], mngr: DecoupledIO[M]) {
|
|
|
|
clts.map{ _.valid := mngr.valid }
|
|
|
|
clts.map{ _.bits := mngr.bits }
|
|
|
|
mngr.ready := clts.map(_.ready).reduce(_&&_)
|
|
|
|
}
|
|
|
|
|
|
|
|
def hookupFinish[M <: LogicalNetworkIO[Finish]]( clts: Seq[DecoupledIO[M]], mngr: DecoupledIO[M]) {
|
2015-07-16 03:06:27 +02:00
|
|
|
val arb = Module(new RRArbiter(mngr.bits, arbN))
|
2015-04-29 22:18:26 +02:00
|
|
|
arb.io.in <> clts
|
2015-08-02 06:09:00 +02:00
|
|
|
mngr <> arb.io.out
|
2015-04-29 22:18:26 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Abstract base case for any Arbiters that have UncachedTileLinkIOs */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class UncachedTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module
|
|
|
|
with TileLinkArbiterLike {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
2015-08-27 18:47:02 +02:00
|
|
|
val in = Vec(new UncachedTileLinkIO, arbN).flip
|
2015-04-29 22:18:26 +02:00
|
|
|
val out = new UncachedTileLinkIO
|
|
|
|
}
|
|
|
|
hookupClientSource(io.in.map(_.acquire), io.out.acquire)
|
|
|
|
hookupFinish(io.in.map(_.finish), io.out.finish)
|
|
|
|
hookupManagerSourceWithId(io.in.map(_.grant), io.out.grant)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Abstract base case for any Arbiters that have cached TileLinkIOs */
|
2015-10-06 06:41:46 +02:00
|
|
|
abstract class TileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module
|
|
|
|
with TileLinkArbiterLike {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
2015-08-27 18:47:02 +02:00
|
|
|
val in = Vec(new TileLinkIO, arbN).flip
|
2015-04-29 22:18:26 +02:00
|
|
|
val out = new TileLinkIO
|
|
|
|
}
|
|
|
|
hookupClientSource(io.in.map(_.acquire), io.out.acquire)
|
|
|
|
hookupClientSource(io.in.map(_.release), io.out.release)
|
|
|
|
hookupFinish(io.in.map(_.finish), io.out.finish)
|
|
|
|
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
|
|
|
hookupManagerSourceWithId(io.in.map(_.grant), io.out.grant)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Appends the port index of the arbiter to the client_xact_id */
|
|
|
|
trait AppendsArbiterId extends TileLinkArbiterLike {
|
|
|
|
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) =
|
|
|
|
Cat(in.client_xact_id, UInt(id, log2Up(arbN)))
|
|
|
|
def managerSourcedClientXactId(in: ManagerSourcedWithId) =
|
2015-08-04 22:15:17 +02:00
|
|
|
in.client_xact_id >> log2Up(arbN)
|
2015-04-29 22:18:26 +02:00
|
|
|
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id(log2Up(arbN)-1,0).toUInt
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Uses the client_xact_id as is (assumes it has been set to port index) */
|
|
|
|
trait PassesId extends TileLinkArbiterLike {
|
|
|
|
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = in.client_xact_id
|
|
|
|
def managerSourcedClientXactId(in: ManagerSourcedWithId) = in.client_xact_id
|
|
|
|
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Overwrites some default client_xact_id with the port idx */
|
|
|
|
trait UsesNewId extends TileLinkArbiterLike {
|
|
|
|
def clientSourcedClientXactId(in: ClientSourcedWithId, id: Int) = UInt(id, log2Up(arbN))
|
|
|
|
def managerSourcedClientXactId(in: ManagerSourcedWithId) = UInt(0)
|
|
|
|
def arbIdx(in: ManagerSourcedWithId) = in.client_xact_id
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we can mix-in thevarious id-generation traits to make concrete arbiter classes
|
2015-10-06 06:41:46 +02:00
|
|
|
class UncachedTileLinkIOArbiterThatAppendsArbiterId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with AppendsArbiterId
|
|
|
|
class UncachedTileLinkIOArbiterThatPassesId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with PassesId
|
|
|
|
class UncachedTileLinkIOArbiterThatUsesNewId(val n: Int)(implicit p: Parameters) extends UncachedTileLinkIOArbiter(n)(p) with UsesNewId
|
|
|
|
class TileLinkIOArbiterThatAppendsArbiterId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with AppendsArbiterId
|
|
|
|
class TileLinkIOArbiterThatPassesId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with PassesId
|
|
|
|
class TileLinkIOArbiterThatUsesNewId(val n: Int)(implicit p: Parameters) extends TileLinkIOArbiter(n)(p) with UsesNewId
|
2015-04-29 22:18:26 +02:00
|
|
|
|
|
|
|
/** Concrete uncached client-side arbiter that appends the arbiter's port id to client_xact_id */
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientUncachedTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module with TileLinkArbiterLike with AppendsArbiterId {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
2015-08-27 18:47:02 +02:00
|
|
|
val in = Vec(new ClientUncachedTileLinkIO, arbN).flip
|
2015-04-29 22:18:26 +02:00
|
|
|
val out = new ClientUncachedTileLinkIO
|
|
|
|
}
|
|
|
|
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
|
|
|
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Concrete client-side arbiter that appends the arbiter's port id to client_xact_id */
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientTileLinkIOArbiter(val arbN: Int)(implicit val p: Parameters) extends Module with TileLinkArbiterLike with AppendsArbiterId {
|
2015-04-29 22:18:26 +02:00
|
|
|
val io = new Bundle {
|
2015-08-27 18:47:02 +02:00
|
|
|
val in = Vec(new ClientTileLinkIO, arbN).flip
|
2015-04-29 22:18:26 +02:00
|
|
|
val out = new ClientTileLinkIO
|
|
|
|
}
|
|
|
|
hookupClientSourceHeaderless(io.in.map(_.acquire), io.out.acquire)
|
|
|
|
hookupClientSourceHeaderless(io.in.map(_.release), io.out.release)
|
|
|
|
hookupManagerSourceBroadcast(io.in.map(_.probe), io.out.probe)
|
|
|
|
hookupManagerSourceHeaderlessWithId(io.in.map(_.grant), io.out.grant)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Utility trait containing wiring functions to keep track of how many data beats have
|
|
|
|
* been sent or recieved over a particular [[uncore.TileLinkChannel]] or pair of channels.
|
|
|
|
*
|
|
|
|
* Won't count message types that don't have data.
|
|
|
|
* Used in [[uncore.XactTracker]] and [[uncore.FinishUnit]].
|
|
|
|
*/
|
|
|
|
trait HasDataBeatCounters {
|
|
|
|
type HasBeat = TileLinkChannel with HasTileLinkBeatId
|
|
|
|
|
|
|
|
/** Returns the current count on this channel and when a message is done
|
|
|
|
* @param inc increment the counter (usually .valid or .fire())
|
|
|
|
* @param data the actual channel data
|
|
|
|
* @param beat count to return for single-beat messages
|
|
|
|
*/
|
|
|
|
def connectDataBeatCounter[S <: TileLinkChannel](inc: Bool, data: S, beat: UInt) = {
|
|
|
|
val multi = data.hasMultibeatData()
|
|
|
|
val (multi_cnt, multi_done) = Counter(inc && multi, data.tlDataBeats)
|
|
|
|
val cnt = Mux(multi, multi_cnt, beat)
|
|
|
|
val done = Mux(multi, multi_done, inc)
|
|
|
|
(cnt, done)
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Counter for beats on outgoing [[chisel.DecoupledIO]] */
|
|
|
|
def connectOutgoingDataBeatCounter[T <: TileLinkChannel](in: DecoupledIO[T], beat: UInt = UInt(0)): (UInt, Bool) =
|
|
|
|
connectDataBeatCounter(in.fire(), in.bits, beat)
|
|
|
|
|
|
|
|
/** Returns done but not cnt. Use the addr_beat subbundle instead of cnt for beats on
|
|
|
|
* incoming channels in case of network reordering.
|
|
|
|
*/
|
|
|
|
def connectIncomingDataBeatCounter[T <: TileLinkChannel](in: DecoupledIO[T]): Bool =
|
|
|
|
connectDataBeatCounter(in.fire(), in.bits, UInt(0))._2
|
|
|
|
|
|
|
|
/** Counter for beats on incoming DecoupledIO[LogicalNetworkIO[]]s returns done */
|
|
|
|
def connectIncomingDataBeatCounterWithHeader[T <: TileLinkChannel](in: DecoupledIO[LogicalNetworkIO[T]]): Bool =
|
|
|
|
connectDataBeatCounter(in.fire(), in.bits.payload, UInt(0))._2
|
|
|
|
|
|
|
|
/** If the network might interleave beats from different messages, we need a Vec of counters,
|
|
|
|
* one for every outstanding message id that might be interleaved.
|
|
|
|
*
|
|
|
|
* @param getId mapping from Message to counter id
|
|
|
|
*/
|
|
|
|
def connectIncomingDataBeatCountersWithHeader[T <: TileLinkChannel with HasClientTransactionId](
|
|
|
|
in: DecoupledIO[LogicalNetworkIO[T]],
|
|
|
|
entries: Int,
|
|
|
|
getId: LogicalNetworkIO[T] => UInt): Vec[Bool] = {
|
|
|
|
Vec((0 until entries).map { i =>
|
|
|
|
connectDataBeatCounter(in.fire() && getId(in.bits) === UInt(i), in.bits.payload, UInt(0))._2
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Provides counters on two channels, as well a meta-counter that tracks how many
|
|
|
|
* messages have been sent over the up channel but not yet responded to over the down channel
|
|
|
|
*
|
|
|
|
* @param max max number of outstanding ups with no down
|
|
|
|
* @param up outgoing channel
|
|
|
|
* @param down incoming channel
|
|
|
|
* @param beat overrides cnts on single-beat messages
|
|
|
|
* @param track whether up's message should be tracked
|
|
|
|
* @return a tuple containing whether their are outstanding messages, up's count,
|
|
|
|
* up's done, down's count, down's done
|
|
|
|
*/
|
|
|
|
def connectTwoWayBeatCounter[T <: TileLinkChannel, S <: TileLinkChannel](
|
|
|
|
max: Int,
|
|
|
|
up: DecoupledIO[T],
|
|
|
|
down: DecoupledIO[S],
|
|
|
|
beat: UInt = UInt(0),
|
|
|
|
track: T => Bool = (t: T) => Bool(true)): (Bool, UInt, Bool, UInt, Bool) = {
|
|
|
|
val cnt = Reg(init = UInt(0, width = log2Up(max+1)))
|
|
|
|
val (up_idx, up_done) = connectDataBeatCounter(up.fire(), up.bits, beat)
|
|
|
|
val (down_idx, down_done) = connectDataBeatCounter(down.fire(), down.bits, beat)
|
|
|
|
val do_inc = up_done && track(up.bits)
|
|
|
|
val do_dec = down_done
|
|
|
|
cnt := Mux(do_dec,
|
|
|
|
Mux(do_inc, cnt, cnt - UInt(1)),
|
|
|
|
Mux(do_inc, cnt + UInt(1), cnt))
|
|
|
|
(cnt > UInt(0), up_idx, up_done, down_idx, down_done)
|
|
|
|
}
|
|
|
|
}
|
2015-07-30 03:04:30 +02:00
|
|
|
|
2015-09-06 06:28:18 +02:00
|
|
|
class ClientTileLinkIOUnwrapperInfo extends Bundle {
|
|
|
|
val voluntary = Bool()
|
|
|
|
val builtin = Bool()
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class ClientTileLinkIOUnwrapper(implicit p: Parameters) extends TLModule()(p) {
|
2015-09-06 06:28:18 +02:00
|
|
|
val io = new Bundle {
|
|
|
|
val in = new ClientTileLinkIO().flip
|
|
|
|
val out = new ClientUncachedTileLinkIO
|
|
|
|
}
|
|
|
|
|
|
|
|
def needsRoqEnq(channel: HasTileLinkData): Bool =
|
|
|
|
!channel.hasMultibeatData() || channel.addr_beat === UInt(0)
|
|
|
|
|
|
|
|
def needsRoqDeq(channel: HasTileLinkData): Bool =
|
|
|
|
!channel.hasMultibeatData() || channel.addr_beat === UInt(tlDataBeats - 1)
|
|
|
|
|
|
|
|
val acqArb = Module(new LockingRRArbiter(new Acquire, 2, tlDataBeats,
|
|
|
|
Some((acq: Acquire) => acq.hasMultibeatData())))
|
2015-10-21 08:26:11 +02:00
|
|
|
|
|
|
|
val acqRoq = Module(new ReorderQueue(
|
|
|
|
Bool(), tlClientXactIdBits, tlMaxClientsPerPort))
|
|
|
|
|
|
|
|
val relRoq = Module(new ReorderQueue(
|
|
|
|
Bool(), tlClientXactIdBits, tlMaxClientsPerPort))
|
2015-09-06 06:28:18 +02:00
|
|
|
|
|
|
|
val iacq = io.in.acquire.bits
|
|
|
|
val irel = io.in.release.bits
|
|
|
|
val ognt = io.out.grant.bits
|
|
|
|
|
2015-10-20 02:25:33 +02:00
|
|
|
val acq_roq_enq = needsRoqEnq(iacq)
|
|
|
|
val rel_roq_enq = needsRoqEnq(irel)
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
val acq_roq_ready = !acq_roq_enq || acqRoq.io.enq.ready
|
|
|
|
val rel_roq_ready = !rel_roq_enq || relRoq.io.enq.ready
|
2015-10-20 02:25:33 +02:00
|
|
|
|
|
|
|
val acq_helper = DecoupledHelper(
|
|
|
|
io.in.acquire.valid,
|
|
|
|
acq_roq_ready,
|
|
|
|
acqArb.io.in(0).ready)
|
|
|
|
|
|
|
|
val rel_helper = DecoupledHelper(
|
|
|
|
io.in.release.valid,
|
|
|
|
rel_roq_ready,
|
|
|
|
acqArb.io.in(1).ready)
|
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
acqRoq.io.enq.valid := acq_helper.fire(acq_roq_ready, acq_roq_enq)
|
|
|
|
acqRoq.io.enq.bits.data := iacq.isBuiltInType()
|
|
|
|
acqRoq.io.enq.bits.tag := iacq.client_xact_id
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-20 02:25:33 +02:00
|
|
|
acqArb.io.in(0).valid := acq_helper.fire(acqArb.io.in(0).ready)
|
2015-09-06 06:28:18 +02:00
|
|
|
acqArb.io.in(0).bits := Acquire(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
a_type = Mux(iacq.isBuiltInType(),
|
|
|
|
iacq.a_type, Acquire.getBlockType),
|
|
|
|
client_xact_id = iacq.client_xact_id,
|
|
|
|
addr_block = iacq.addr_block,
|
|
|
|
addr_beat = iacq.addr_beat,
|
|
|
|
data = iacq.data,
|
|
|
|
union = Mux(iacq.isBuiltInType(),
|
2015-10-21 08:26:11 +02:00
|
|
|
iacq.union, Cat(MT_Q, M_XRD, Bool(true))))
|
2015-10-20 02:25:33 +02:00
|
|
|
io.in.acquire.ready := acq_helper.fire(io.in.acquire.valid)
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
relRoq.io.enq.valid := rel_helper.fire(rel_roq_ready, rel_roq_enq)
|
|
|
|
relRoq.io.enq.bits.data := irel.isVoluntary()
|
|
|
|
relRoq.io.enq.bits.tag := irel.client_xact_id
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-20 02:25:33 +02:00
|
|
|
acqArb.io.in(1).valid := rel_helper.fire(acqArb.io.in(1).ready)
|
2015-09-06 06:28:18 +02:00
|
|
|
acqArb.io.in(1).bits := PutBlock(
|
|
|
|
client_xact_id = irel.client_xact_id,
|
|
|
|
addr_block = irel.addr_block,
|
|
|
|
addr_beat = irel.addr_beat,
|
|
|
|
data = irel.data,
|
|
|
|
wmask = Acquire.fullWriteMask)
|
2015-10-20 02:25:33 +02:00
|
|
|
io.in.release.ready := rel_helper.fire(io.in.release.valid)
|
2015-09-06 06:28:18 +02:00
|
|
|
|
|
|
|
io.out.acquire <> acqArb.io.out
|
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
acqRoq.io.deq.valid := io.out.grant.fire() && needsRoqDeq(ognt)
|
|
|
|
acqRoq.io.deq.tag := ognt.client_xact_id
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
relRoq.io.deq.valid := io.out.grant.fire() && needsRoqDeq(ognt)
|
|
|
|
relRoq.io.deq.tag := ognt.client_xact_id
|
2015-09-06 06:28:18 +02:00
|
|
|
|
2015-10-21 08:26:11 +02:00
|
|
|
val gnt_builtin = acqRoq.io.deq.data
|
|
|
|
val gnt_voluntary = relRoq.io.deq.data
|
|
|
|
|
|
|
|
val acq_grant = Grant(
|
2015-09-06 06:28:18 +02:00
|
|
|
is_builtin_type = gnt_builtin,
|
2015-10-21 08:26:11 +02:00
|
|
|
g_type = Mux(gnt_builtin, ognt.g_type, tlCoh.getExclusiveGrantType),
|
|
|
|
client_xact_id = ognt.client_xact_id,
|
|
|
|
manager_xact_id = ognt.manager_xact_id,
|
|
|
|
addr_beat = ognt.addr_beat,
|
|
|
|
data = ognt.data)
|
|
|
|
|
|
|
|
val rel_grant = Grant(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
g_type = Mux(gnt_voluntary, Grant.voluntaryAckType, ognt.g_type),
|
2015-09-06 06:28:18 +02:00
|
|
|
client_xact_id = ognt.client_xact_id,
|
|
|
|
manager_xact_id = ognt.manager_xact_id,
|
|
|
|
addr_beat = ognt.addr_beat,
|
|
|
|
data = ognt.data)
|
2015-10-21 08:26:11 +02:00
|
|
|
|
|
|
|
io.in.grant.valid := io.out.grant.valid
|
|
|
|
io.in.grant.bits := Mux(acqRoq.io.deq.matches, acq_grant, rel_grant)
|
2015-09-06 06:28:18 +02:00
|
|
|
io.out.grant.ready := io.in.grant.ready
|
|
|
|
|
|
|
|
io.in.probe.valid := Bool(false)
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class NastiIOTileLinkIOConverterInfo(implicit p: Parameters) extends TLBundle()(p) {
|
2015-09-18 18:41:37 +02:00
|
|
|
val byteOff = UInt(width = tlByteAddrBits)
|
|
|
|
val subblock = Bool()
|
|
|
|
}
|
|
|
|
|
2015-10-06 06:41:46 +02:00
|
|
|
class NastiIOTileLinkIOConverter(implicit p: Parameters) extends TLModule()(p)
|
|
|
|
with HasNastiParameters {
|
2015-07-30 03:04:30 +02:00
|
|
|
val io = new Bundle {
|
2015-09-06 06:28:18 +02:00
|
|
|
val tl = new ClientUncachedTileLinkIO().flip
|
2015-10-02 23:20:31 +02:00
|
|
|
val nasti = new NastiIO
|
2015-07-30 03:04:30 +02:00
|
|
|
}
|
|
|
|
|
2015-09-11 02:53:04 +02:00
|
|
|
private def opSizeToXSize(ops: UInt) = MuxLookup(ops, UInt("b111"), Seq(
|
|
|
|
MT_B -> UInt(0),
|
|
|
|
MT_BU -> UInt(0),
|
|
|
|
MT_H -> UInt(1),
|
|
|
|
MT_HU -> UInt(1),
|
|
|
|
MT_W -> UInt(2),
|
|
|
|
MT_D -> UInt(3),
|
|
|
|
MT_Q -> UInt(log2Up(tlDataBytes))))
|
|
|
|
|
2015-07-30 03:04:30 +02:00
|
|
|
val dataBits = tlDataBits*tlDataBeats
|
2015-10-06 06:41:46 +02:00
|
|
|
val dstIdBits = p(LNHeaderBits)
|
2015-07-30 03:04:30 +02:00
|
|
|
require(tlDataBits == nastiXDataBits, "Data sizes between LLC and MC don't agree") // TODO: remove this restriction
|
|
|
|
require(tlDataBeats < (1 << nastiXLenBits), "Can't have that many beats")
|
2015-10-02 23:20:31 +02:00
|
|
|
require(dstIdBits + tlClientXactIdBits < nastiXIdBits, "NastiIO converter is going truncate tags: " + dstIdBits + " + " + tlClientXactIdBits + " >= " + nastiXIdBits)
|
2015-07-30 03:04:30 +02:00
|
|
|
|
|
|
|
val dst_off = dstIdBits + tlClientXactIdBits
|
2015-10-19 22:47:13 +02:00
|
|
|
val has_data = io.tl.acquire.bits.hasData()
|
2015-07-30 03:04:30 +02:00
|
|
|
|
2015-08-11 04:06:02 +02:00
|
|
|
val is_subblock = io.tl.acquire.bits.isSubBlockType()
|
2015-10-19 22:47:13 +02:00
|
|
|
val is_multibeat = io.tl.acquire.bits.hasMultibeatData()
|
2015-09-06 06:28:18 +02:00
|
|
|
val (tl_cnt_out, tl_wrap_out) = Counter(
|
2015-10-19 22:47:13 +02:00
|
|
|
io.tl.acquire.fire() && is_multibeat, tlDataBeats)
|
2015-07-30 03:04:30 +02:00
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
val get_valid = io.tl.acquire.valid && !has_data
|
|
|
|
val put_valid = io.tl.acquire.valid && has_data
|
|
|
|
|
2015-10-19 22:47:13 +02:00
|
|
|
// Reorder queue saves extra information needed to send correct
|
|
|
|
// grant back to TL client
|
2015-09-06 06:28:18 +02:00
|
|
|
val roq = Module(new ReorderQueue(
|
2015-10-02 23:20:31 +02:00
|
|
|
new NastiIOTileLinkIOConverterInfo,
|
2015-09-18 18:41:37 +02:00
|
|
|
nastiRIdBits, tlMaxClientsPerPort))
|
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
// For Get/GetBlock, make sure Reorder queue can accept new entry
|
|
|
|
val get_helper = DecoupledHelper(
|
|
|
|
get_valid,
|
|
|
|
roq.io.enq.ready,
|
|
|
|
io.nasti.ar.ready)
|
|
|
|
|
|
|
|
val w_inflight = Reg(init = Bool(false))
|
|
|
|
|
|
|
|
// For Put/PutBlock, make sure aw and w channel are both ready before
|
|
|
|
// we send the first beat
|
|
|
|
val aw_ready = w_inflight || io.nasti.aw.ready
|
|
|
|
val put_helper = DecoupledHelper(
|
|
|
|
put_valid,
|
|
|
|
aw_ready,
|
|
|
|
io.nasti.w.ready)
|
|
|
|
|
2015-09-18 18:41:37 +02:00
|
|
|
val (nasti_cnt_out, nasti_wrap_out) = Counter(
|
|
|
|
io.nasti.r.fire() && !roq.io.deq.data.subblock, tlDataBeats)
|
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
roq.io.enq.valid := get_helper.fire(roq.io.enq.ready)
|
2015-09-06 06:28:18 +02:00
|
|
|
roq.io.enq.bits.tag := io.nasti.ar.bits.id
|
2015-09-18 18:41:37 +02:00
|
|
|
roq.io.enq.bits.data.byteOff := io.tl.acquire.bits.addr_byte()
|
|
|
|
roq.io.enq.bits.data.subblock := is_subblock
|
|
|
|
roq.io.deq.valid := io.nasti.r.fire() && (nasti_wrap_out || roq.io.deq.data.subblock)
|
2015-09-06 06:28:18 +02:00
|
|
|
roq.io.deq.tag := io.nasti.r.bits.id
|
2015-08-11 04:06:02 +02:00
|
|
|
|
2015-10-19 22:47:13 +02:00
|
|
|
// Decompose outgoing TL Acquires into Nasti address and data channels
|
2015-10-20 07:46:03 +02:00
|
|
|
io.nasti.ar.valid := get_helper.fire(io.nasti.ar.ready)
|
2015-10-02 23:20:31 +02:00
|
|
|
io.nasti.ar.bits := NastiReadAddressChannel(
|
2015-10-19 22:47:13 +02:00
|
|
|
id = io.tl.acquire.bits.client_xact_id,
|
2015-10-20 07:46:03 +02:00
|
|
|
addr = io.tl.acquire.bits.full_addr(),
|
|
|
|
size = Mux(is_subblock,
|
|
|
|
opSizeToXSize(io.tl.acquire.bits.op_size()),
|
2015-10-19 22:47:13 +02:00
|
|
|
UInt(log2Ceil(tlDataBytes))),
|
|
|
|
len = Mux(is_subblock, UInt(0), UInt(tlDataBeats - 1)))
|
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
io.nasti.aw.valid := put_helper.fire(aw_ready, !w_inflight)
|
2015-10-19 22:47:13 +02:00
|
|
|
io.nasti.aw.bits := NastiWriteAddressChannel(
|
|
|
|
id = io.tl.acquire.bits.client_xact_id,
|
2015-10-20 07:46:03 +02:00
|
|
|
addr = io.tl.acquire.bits.full_addr(),
|
2015-09-11 02:53:04 +02:00
|
|
|
size = UInt(log2Ceil(tlDataBytes)),
|
2015-10-19 22:47:13 +02:00
|
|
|
len = Mux(is_multibeat, UInt(tlDataBeats - 1), UInt(0)))
|
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
io.nasti.w.valid := put_helper.fire(io.nasti.w.ready)
|
2015-10-02 23:20:31 +02:00
|
|
|
io.nasti.w.bits := NastiWriteDataChannel(
|
2015-09-11 02:53:04 +02:00
|
|
|
data = io.tl.acquire.bits.data,
|
2015-09-25 21:07:27 +02:00
|
|
|
strb = io.tl.acquire.bits.wmask(),
|
2015-09-11 02:53:04 +02:00
|
|
|
last = tl_wrap_out || (io.tl.acquire.fire() && is_subblock))
|
2015-07-30 03:04:30 +02:00
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
io.tl.acquire.ready := Mux(has_data,
|
|
|
|
put_helper.fire(put_valid),
|
|
|
|
get_helper.fire(get_valid))
|
2015-10-19 22:47:13 +02:00
|
|
|
|
2015-10-20 07:46:03 +02:00
|
|
|
when (!w_inflight && io.tl.acquire.fire() && is_multibeat) {
|
|
|
|
w_inflight := Bool(true)
|
2015-07-30 03:04:30 +02:00
|
|
|
}
|
2015-10-19 22:47:13 +02:00
|
|
|
|
|
|
|
when (w_inflight) {
|
|
|
|
when (tl_wrap_out) { w_inflight := Bool(false) }
|
2015-07-30 03:04:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Aggregate incoming NASTI responses into TL Grants
|
2015-10-19 22:47:13 +02:00
|
|
|
val (tl_cnt_in, tl_wrap_in) = Counter(
|
|
|
|
io.tl.grant.fire() && io.tl.grant.bits.hasMultibeatData(), tlDataBeats)
|
2015-07-30 03:04:30 +02:00
|
|
|
val gnt_arb = Module(new Arbiter(new GrantToDst, 2))
|
|
|
|
io.tl.grant <> gnt_arb.io.out
|
|
|
|
|
2015-09-18 18:41:37 +02:00
|
|
|
val r_aligned_data = Mux(roq.io.deq.data.subblock,
|
|
|
|
io.nasti.r.bits.data << Cat(roq.io.deq.data.byteOff, UInt(0, 3)),
|
|
|
|
io.nasti.r.bits.data)
|
2015-08-11 04:06:02 +02:00
|
|
|
|
2015-07-30 03:04:30 +02:00
|
|
|
gnt_arb.io.in(0).valid := io.nasti.r.valid
|
|
|
|
io.nasti.r.ready := gnt_arb.io.in(0).ready
|
|
|
|
gnt_arb.io.in(0).bits := Grant(
|
2015-08-11 04:06:02 +02:00
|
|
|
is_builtin_type = Bool(true),
|
2015-09-18 18:41:37 +02:00
|
|
|
g_type = Mux(roq.io.deq.data.subblock,
|
|
|
|
Grant.getDataBeatType, Grant.getDataBlockType),
|
|
|
|
client_xact_id = io.nasti.r.bits.id,
|
2015-07-30 03:04:30 +02:00
|
|
|
manager_xact_id = UInt(0),
|
|
|
|
addr_beat = tl_cnt_in,
|
2015-08-11 04:06:02 +02:00
|
|
|
data = r_aligned_data)
|
2015-07-30 03:04:30 +02:00
|
|
|
|
|
|
|
gnt_arb.io.in(1).valid := io.nasti.b.valid
|
|
|
|
io.nasti.b.ready := gnt_arb.io.in(1).ready
|
|
|
|
gnt_arb.io.in(1).bits := Grant(
|
|
|
|
is_builtin_type = Bool(true),
|
2015-09-06 06:28:18 +02:00
|
|
|
g_type = Grant.putAckType,
|
2015-09-18 18:41:37 +02:00
|
|
|
client_xact_id = io.nasti.b.bits.id,
|
2015-09-06 06:28:18 +02:00
|
|
|
manager_xact_id = UInt(0),
|
|
|
|
addr_beat = UInt(0),
|
|
|
|
data = Bits(0))
|
2015-07-30 03:04:30 +02:00
|
|
|
}
|
2015-10-13 21:45:39 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
class TileLinkIONarrower(innerTLId: String, outerTLId: String)(implicit p: Parameters) extends Module {
|
|
|
|
val innerParams = p(TLKey(innerTLId))
|
|
|
|
val outerParams = p(TLKey(outerTLId))
|
|
|
|
val innerDataBeats = innerParams.dataBeats
|
|
|
|
val innerDataBits = innerParams.dataBitsPerBeat
|
|
|
|
val innerWriteMaskBits = innerParams.writeMaskBits
|
|
|
|
val outerDataBeats = outerParams.dataBeats
|
|
|
|
val outerDataBits = outerParams.dataBitsPerBeat
|
|
|
|
val outerWriteMaskBits = outerParams.writeMaskBits
|
2015-10-15 02:58:35 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
require(outerDataBeats >= innerDataBeats)
|
|
|
|
require(outerDataBeats % innerDataBeats == 0)
|
2015-10-15 02:58:35 +02:00
|
|
|
require(outerDataBits <= innerDataBits)
|
|
|
|
require(outerDataBits * outerDataBeats == innerDataBits * innerDataBeats)
|
2015-10-14 08:42:28 +02:00
|
|
|
|
|
|
|
val factor = outerDataBeats / innerDataBeats
|
2015-10-13 21:45:39 +02:00
|
|
|
|
|
|
|
val io = new Bundle {
|
2015-10-14 08:42:28 +02:00
|
|
|
val in = new ClientUncachedTileLinkIO()(p.alterPartial({case TLId => innerTLId})).flip
|
|
|
|
val out = new ClientUncachedTileLinkIO()(p.alterPartial({case TLId => outerTLId}))
|
2015-10-13 21:45:39 +02:00
|
|
|
}
|
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
if (factor > 1) {
|
|
|
|
val iacq = io.in.acquire.bits
|
|
|
|
val ognt = io.out.grant.bits
|
|
|
|
|
|
|
|
val stretch = iacq.a_type === Acquire.putBlockType
|
|
|
|
val shrink = iacq.a_type === Acquire.getBlockType
|
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
val acq_data_buffer = Reg(UInt(width = innerDataBits))
|
|
|
|
val acq_wmask_buffer = Reg(UInt(width = innerWriteMaskBits))
|
2015-10-13 22:28:47 +02:00
|
|
|
val acq_client_id = Reg(iacq.client_xact_id)
|
|
|
|
val acq_addr_block = Reg(iacq.addr_block)
|
|
|
|
val acq_addr_beat = Reg(iacq.addr_beat)
|
|
|
|
val oacq_ctr = Counter(factor)
|
|
|
|
|
2015-10-15 02:58:35 +02:00
|
|
|
val outerConfig = p.alterPartial({ case TLId => outerTLId })
|
|
|
|
val innerConfig = p.alterPartial({ case TLId => innerTLId })
|
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
val get_block_acquire = GetBlock(
|
|
|
|
client_xact_id = iacq.client_xact_id,
|
|
|
|
addr_block = iacq.addr_block,
|
2015-10-15 02:58:35 +02:00
|
|
|
alloc = iacq.allocate())(outerConfig)
|
2015-10-13 22:28:47 +02:00
|
|
|
|
|
|
|
val put_block_acquire = PutBlock(
|
|
|
|
client_xact_id = acq_client_id,
|
|
|
|
addr_block = acq_addr_block,
|
|
|
|
addr_beat = if (factor > 1)
|
|
|
|
Cat(acq_addr_beat, oacq_ctr.value)
|
|
|
|
else acq_addr_beat,
|
|
|
|
data = acq_data_buffer(outerDataBits - 1, 0),
|
2015-10-15 02:58:35 +02:00
|
|
|
wmask = acq_wmask_buffer(outerWriteMaskBits - 1, 0))(outerConfig)
|
2015-10-13 22:28:47 +02:00
|
|
|
|
|
|
|
val sending_put = Reg(init = Bool(false))
|
|
|
|
|
|
|
|
io.out.acquire.bits := MuxBundle(iacq, Seq(
|
|
|
|
(sending_put, put_block_acquire),
|
|
|
|
(shrink, get_block_acquire)))
|
|
|
|
io.out.acquire.valid := sending_put || (io.in.acquire.valid && !stretch)
|
|
|
|
io.in.acquire.ready := !sending_put && (stretch || io.out.acquire.ready)
|
|
|
|
|
|
|
|
when (io.in.acquire.fire() && stretch) {
|
|
|
|
acq_data_buffer := iacq.data
|
|
|
|
acq_wmask_buffer := iacq.wmask()
|
|
|
|
acq_client_id := iacq.client_xact_id
|
|
|
|
acq_addr_block := iacq.addr_block
|
|
|
|
acq_addr_beat := iacq.addr_beat
|
|
|
|
sending_put := Bool(true)
|
|
|
|
}
|
2015-10-13 21:45:39 +02:00
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
when (sending_put && io.out.acquire.ready) {
|
|
|
|
acq_data_buffer := acq_data_buffer >> outerDataBits
|
|
|
|
acq_wmask_buffer := acq_wmask_buffer >> outerWriteMaskBits
|
|
|
|
when (oacq_ctr.inc()) { sending_put := Bool(false) }
|
|
|
|
}
|
2015-10-13 21:45:39 +02:00
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
val ognt_block = ognt.hasMultibeatData()
|
|
|
|
val gnt_data_buffer = Reg(Vec(factor, UInt(width = outerDataBits)))
|
|
|
|
val gnt_client_id = Reg(ognt.client_xact_id)
|
|
|
|
val gnt_manager_id = Reg(ognt.manager_xact_id)
|
2015-10-13 21:45:39 +02:00
|
|
|
|
2015-10-14 08:42:28 +02:00
|
|
|
val ignt_ctr = Counter(innerDataBeats)
|
2015-10-13 22:28:47 +02:00
|
|
|
val ognt_ctr = Counter(factor)
|
|
|
|
val sending_get = Reg(init = Bool(false))
|
2015-10-13 21:45:39 +02:00
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
val get_block_grant = Grant(
|
|
|
|
is_builtin_type = Bool(true),
|
|
|
|
g_type = Grant.getDataBlockType,
|
|
|
|
client_xact_id = gnt_client_id,
|
|
|
|
manager_xact_id = gnt_manager_id,
|
|
|
|
addr_beat = ignt_ctr.value,
|
2015-10-15 02:58:35 +02:00
|
|
|
data = gnt_data_buffer.toBits)(innerConfig)
|
2015-10-13 22:28:47 +02:00
|
|
|
|
|
|
|
io.in.grant.valid := sending_get || (io.out.grant.valid && !ognt_block)
|
|
|
|
io.out.grant.ready := !sending_get && (ognt_block || io.in.grant.ready)
|
|
|
|
io.in.grant.bits := Mux(sending_get, get_block_grant, ognt)
|
|
|
|
|
|
|
|
when (io.out.grant.valid && ognt_block && !sending_get) {
|
|
|
|
gnt_data_buffer(ognt_ctr.value) := ognt.data
|
|
|
|
when (ognt_ctr.inc()) {
|
|
|
|
gnt_client_id := ognt.client_xact_id
|
|
|
|
gnt_manager_id := ognt.manager_xact_id
|
|
|
|
sending_get := Bool(true)
|
|
|
|
}
|
2015-10-13 21:45:39 +02:00
|
|
|
}
|
|
|
|
|
2015-10-13 22:28:47 +02:00
|
|
|
when (io.in.grant.ready && sending_get) {
|
|
|
|
ignt_ctr.inc()
|
|
|
|
sending_get := Bool(false)
|
|
|
|
}
|
|
|
|
} else { io.out <> io.in }
|
2015-10-13 21:45:39 +02:00
|
|
|
}
|