Partial conversion to params. Compiles but does not elaborate. Rocket and uncore conversion complete. FPGA and VLSI config are identical. HwachaConfig and MemoryControllerConfig not yet removed.
This commit is contained in:
parent
08d81d0892
commit
63bd0b9d2a
2
rocket
2
rocket
@ -1 +1 @@
|
||||
Subproject commit 1b01778c1743d779966829e0edfb904528ac472f
|
||||
Subproject commit 6beea1debbdd8115f45d02318210df624e67e9f8
|
@ -21,58 +21,196 @@ class DefaultConfig extends ChiselConfig {
|
||||
case "NL2_REL_XACTS" => 1
|
||||
case "NL2_ACQ_XACTS" => 7
|
||||
case "NMSHRS" => 2
|
||||
//FPUConstants
|
||||
case HasFPU => true
|
||||
//Coherence
|
||||
case Coherence => {
|
||||
val dir = new FullRepresentation(site[Int]("NTILES")+1)
|
||||
if(site[Boolean]("ENABLE_SHARING")) {
|
||||
if(site[Boolean]("ENABLE_CLEAN_EXCLUSIVE")) new MESICoherence(dir)
|
||||
else new MSICoherence(dir)
|
||||
} else {
|
||||
if(site[Boolean]("ENABLE_CLEAN_EXCLUSIVE")) new MEICoherence(dir)
|
||||
else new MICoherence(dir)
|
||||
}
|
||||
}
|
||||
//Rocket Constants
|
||||
// Number of ports into D$: 1 from core, 1 from PTW, maybe 1 from RoCC
|
||||
case NDCachePorts => 2 + (if(site(BuildRoCC).isEmpty) 0 else 1)
|
||||
// Number of ports to outer memory system from tile: 1 from I$, 1 from D$, maybe 1 from Rocc
|
||||
case NTilePorts => 2 + (if(site(BuildRoCC).isEmpty) 0 else 1)
|
||||
case BuildRoCC => None
|
||||
case RetireWidth => 1
|
||||
case UseVM => true
|
||||
case FastLoadWord => true
|
||||
case FastLoadByte => false
|
||||
case FastMulDiv => true
|
||||
case DcacheReqTagBits => 7 + log2Up(here(NDCachePorts))
|
||||
case XprLen => 64
|
||||
case NXpr => 32
|
||||
case NXprBits => log2Up(here(NXpr))
|
||||
case BuildFPU => Some(() => new FPU)
|
||||
case FPUParams => Alter({
|
||||
case SFMALatency => 2
|
||||
case DFMALatency => 3
|
||||
})
|
||||
case RocketDCacheParams => Alter({
|
||||
//L1 Specific
|
||||
case StoreDataQueueDepth => 17
|
||||
case ReplayQueueDepth => 16
|
||||
case NMSHRs => site[Int]("NMSHRS")
|
||||
case NTLBEntries => 8
|
||||
case CoreReqTagBits => site(DcacheReqTagBits)
|
||||
case CoreDataBits => site(XprLen)
|
||||
case RowWords => 2
|
||||
case ECCCode => new IdentityCode
|
||||
//From uncore/cache.scala
|
||||
case NSets => 128
|
||||
case NWays => 4
|
||||
case IsDM => here(NWays) == 1
|
||||
case OffBits => log2Up(site(TLDataBits))
|
||||
case IdxBits => log2Up(here(NSets))
|
||||
case UntagBits => here(OffBits) + here(IdxBits)
|
||||
case TagBits => here(PAddrBits) - here(UntagBits)
|
||||
case WayBits => log2Up(here(NWays))
|
||||
case Replacer => () => new RandomReplacement(site(NWays))
|
||||
case RowBits => here(RowWords)*here(CoreDataBits)
|
||||
case WordBits => here(CoreDataBits)
|
||||
case RefillCycles => site(TLDataBits)/here(RowBits)
|
||||
//Derived
|
||||
case MaxAddrBits => math.max(site(PPNBits),site(VPNBits)+1) + site(PgIdxBits)
|
||||
case CoreDataBytes => here(CoreDataBits)/8
|
||||
case WordOffBits => log2Up(here(CoreDataBytes))
|
||||
case RowBytes => here(RowWords)*here(CoreDataBytes)
|
||||
case RowOffBits => log2Up(here(RowBytes))
|
||||
case DoNarrowRead => here(CoreDataBits)*here(NWays) % here(RowBits) == 0
|
||||
case EncDataBits => here(ECCCode).width(here(CoreDataBits))
|
||||
case EncRowBits => here(RowWords)*here(EncDataBits)
|
||||
case LRSCCycles => 32
|
||||
})
|
||||
case RocketFrontendParams => Alter({
|
||||
case InstBytes => 4
|
||||
case RowBytes => 16
|
||||
case NTLBEntries => 8
|
||||
case ECCCode => new IdentityCode
|
||||
//From uncore/cache.scala
|
||||
case NSets => 128
|
||||
case NWays => 2
|
||||
case IsDM => here(NWays) == 1
|
||||
case OffBits => log2Up(site(TLDataBits)/8)
|
||||
case IdxBits => log2Up(here(NSets))
|
||||
case UntagBits => here(OffBits) + here(IdxBits)
|
||||
case TagBits => here(PAddrBits) - here(UntagBits)
|
||||
case WayBits => log2Up(here(NWays))
|
||||
case Replacer => () => new RandomReplacement(site(NWays))
|
||||
case RowBits => here(RowBytes)*8
|
||||
case RefillCycles => site(TLDataBits)/here(RowBits)
|
||||
case RowOffBits => log2Up(here(RowBytes))
|
||||
})
|
||||
case CoreBTBParams => Alter({
|
||||
case Entries => 62
|
||||
case NRAS => 2
|
||||
case MatchBits => site(PgIdxBits)
|
||||
case Pages => ((1 max(log2Up(here(Entries))))+1)/2*2
|
||||
// control logic assumes 2 divides pages
|
||||
case OpaqueBits => log2Up(here(Entries))
|
||||
case NBHT => 1 << log2Up(here(Entries)*2)
|
||||
})
|
||||
//MemoryConstants
|
||||
case "CACHE_DATA_SIZE_IN_BYTES" => 1 << 6 //TODO: How configurable is this really?
|
||||
case "CACHE_DATA_SIZE_IN_BYTES" => 1 << 6
|
||||
case "OFFSET_BITS" => log2Up(here[Int]("CACHE_DATA_SIZE_IN_BYTES"))
|
||||
case "PADDR_BITS" => 32
|
||||
case "VADDR_BITS" => 43
|
||||
case "PGIDX_BITS" => 13
|
||||
case "ASID_BITS" => 7
|
||||
case "PERM_BITS" => 6
|
||||
case "MEM_TAG_BITS" => 5
|
||||
case "MEM_DATA_BITS" => 128
|
||||
case "MEM_ADDR_BITS" => here[Int]("PADDR_BITS") - here[Int]("OFFSET_BITS")
|
||||
case "MEM_DATA_BEATS" => 4
|
||||
//TileLinkSizeConstants
|
||||
case "WRITE_MASK_BITS" => 6
|
||||
case "SUBWORD_ADDR_BITS" => 3
|
||||
case "ATOMIC_OP_BITS" => 4
|
||||
case PAddrBits => 32
|
||||
case VAddrBits => 43
|
||||
case PgIdxBits => 13
|
||||
case ASIdBits => 7
|
||||
case PermBits => 6
|
||||
case PPNBits => here(PAddrBits) - here(PgIdxBits)
|
||||
case VPNBits => here(VAddrBits) - here(PgIdxBits)
|
||||
case MIFTagBits => 5
|
||||
case MIFDataBits => 128
|
||||
case MIFAddrBits => here(PAddrBits) - here[Int]("OFFSET_BITS")
|
||||
case MIFDataBeats => 4
|
||||
//Uncore Constants
|
||||
case TileLinkL1Params => Alter({
|
||||
case LNMasters => site[Int]("NBANKS")
|
||||
case LNClients => site[Int]("NTILES")+1
|
||||
case LNEndpoints => here(LNMasters) + here(LNClients)
|
||||
case TLCoherence => site(Coherence)
|
||||
case TLAddrBits => site[Int]("PADDR_BITS") - site[Int]("OFFSET_BITS")
|
||||
case TLMasterXactIdBits => log2Up(site[Int]("NL2_REL_XACTS")+site[Int]("NL2_ACQ_XACTS"))
|
||||
case TLClientXactIdBits => 2*log2Up(site[Int]("NMSHRS")*site[Int]("NTILES")+1)
|
||||
case TLDataBits => site[Int]("CACHE_DATA_SIZE_IN_BYTES")*8
|
||||
case TLWriteMaskBits => 6
|
||||
case TLWordAddrBits => 3
|
||||
case TLAtomicOpBits => 4
|
||||
})
|
||||
case L2HellaCacheParams => Alter({
|
||||
case NReleaseTransactors => site[Int]("NL2_REL_XACTS")
|
||||
case NAcquireTransactors => site[Int]("NL2_ACQ_XACTS")
|
||||
case NTransactors => here(NReleaseTransactors) + here(NAcquireTransactors)
|
||||
case NClients => site[Int]("NTILES") + 1
|
||||
case NSets => 512
|
||||
case NWays => 8
|
||||
case IsDM => here(NWays) == 1
|
||||
case OffBits => 0
|
||||
case IdxBits => log2Up(here(NSets))
|
||||
case UntagBits => here(OffBits) + here(IdxBits)
|
||||
case TagBits => here(PAddrBits) - here(UntagBits)
|
||||
case WayBits => log2Up(here(NWays))
|
||||
case Replacer => () => new RandomReplacement(site(NWays))
|
||||
case RowBits => site(TLDataBits)
|
||||
case WordBits => 64
|
||||
case RefillCycles => site(TLDataBits)/here(RowBits)
|
||||
})
|
||||
case NTiles => here[Int]("NTILES")
|
||||
case NBanks => here[Int]("NBANKS")
|
||||
case BankIdLSB => 5
|
||||
case BuildDRAMSideLLC => () => {
|
||||
val refill = site(TLDataBits)/site(MIFDataBits)
|
||||
if(site[Boolean]("USE_DRAMSIDE_LLC")) {
|
||||
val tag = Mem(Bits(width = 152), 512, seqRead = true)
|
||||
val data = Mem(Bits(width = 64), 4096, seqRead = true)
|
||||
Module(new DRAMSideLLC(sets=512, ways=8, outstanding=16,
|
||||
refill_cycles=refill, tagLeaf=tag, dataLeaf=data))
|
||||
} else { Module(new DRAMSideLLCNull(16, refill)) }
|
||||
}
|
||||
case BuildCoherentMaster => (id: Int) => {
|
||||
if(!site[Boolean]("USE_DRAMSIDE_LLC")) {
|
||||
Module(new L2CoherenceAgent(id), here(L2HellaCacheParams))
|
||||
} else {
|
||||
Module(new L2HellaCache(id), here(L2HellaCacheParams))
|
||||
}
|
||||
}
|
||||
//HTIF Constants
|
||||
case HTIFWidth => 16
|
||||
case HTIFNSCR => 64
|
||||
case HTIFOffsetBits => here[Int]("OFFSET_BITS")
|
||||
case HTIFNCores => here[Int]("NTILES")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case object NTiles extends Field[Int]
|
||||
case object NBanks extends Field[Int]
|
||||
case object BankIdLSB extends Field[Int]
|
||||
case object TileLinkL1Params extends Field[PF]
|
||||
case object L2HellaCacheParams extends Field[PF]
|
||||
case object BuildDRAMSideLLC extends Field[() => DRAMSideLLCLike]
|
||||
case object BuildCoherentMaster extends Field[Int => CoherenceAgent]
|
||||
case object Coherence extends Field[CoherencePolicyWithUncached]
|
||||
|
||||
class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module
|
||||
class OuterMemorySystem extends Module
|
||||
{
|
||||
implicit val (tl, ln, l2, mif) = (conf.tl, conf.tl.ln, conf.l2, conf.mif)
|
||||
val io = new Bundle {
|
||||
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||
val tiles = Vec.fill(params(NTiles)){new TileLinkIO}.flip
|
||||
val htif = (new TileLinkIO).flip
|
||||
val incoherent = Vec.fill(ln.nClients){Bool()}.asInput
|
||||
val incoherent = Vec.fill(params(LNClients)){Bool()}.asInput
|
||||
val mem = new MemIO
|
||||
val mem_backup = new MemSerializedIO(htif_width)
|
||||
val mem_backup = new MemSerializedIO(params(HTIFWidth))
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
}
|
||||
|
||||
val refill_cycles = tl.dataBits/mif.dataBits
|
||||
val (llc, masterEndpoints) = if(conf.useDRAMSideLLC) {
|
||||
val llc_tag_leaf = Mem(Bits(width = 152), 512, seqRead = true)
|
||||
val llc_data_leaf = Mem(Bits(width = 64), 4096, seqRead = true)
|
||||
val llc = Module(new DRAMSideLLC(sets=512, ways=8, outstanding=16,
|
||||
refill_cycles=refill_cycles, tagLeaf=llc_tag_leaf, dataLeaf=llc_data_leaf))
|
||||
val mes = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i)))
|
||||
(llc, mes)
|
||||
} else {
|
||||
val llc = Module(new DRAMSideLLCNull(16, refill_cycles))
|
||||
val mes = (0 until ln.nMasters).map(i => Module(new L2HellaCache(i)))
|
||||
(llc, mes)
|
||||
}
|
||||
val refill_cycles = params(TLDataBits)/params(MIFDataBits)
|
||||
val llc = params(BuildDRAMSideLLC)()
|
||||
val masterEndpoints = (0 until params(NBanks)).map(params(BuildCoherentMaster))
|
||||
|
||||
val net = Module(new ReferenceChipCrossbarNetwork)
|
||||
net.io.clients zip (io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
||||
@ -80,8 +218,8 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext
|
||||
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
||||
|
||||
val conv = Module(new MemIOUncachedTileLinkIOConverter(2))
|
||||
if(ln.nMasters > 1) {
|
||||
val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters))
|
||||
if(params(NBanks) > 1) {
|
||||
val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(params(NBanks)))
|
||||
arb.io.in zip masterEndpoints.map(_.io.outer) map { case (arb, cache) => arb <> cache }
|
||||
conv.io.uncached <> arb.io.out
|
||||
} else {
|
||||
@ -92,7 +230,7 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext
|
||||
conv.io.mem.resp <> llc.io.cpu.resp
|
||||
|
||||
// mux between main and backup memory ports
|
||||
val mem_serdes = Module(new MemSerdes(htif_width))
|
||||
val mem_serdes = Module(new MemSerdes(params(HTIFWidth)))
|
||||
val mem_cmdq = Module(new Queue(new MemReqCmd, 2))
|
||||
mem_cmdq.io.enq <> llc.io.mem.req_cmd
|
||||
mem_cmdq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_cmd.ready, io.mem.req_cmd.ready)
|
||||
@ -116,22 +254,22 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext
|
||||
io.mem_backup <> mem_serdes.io.narrow
|
||||
}
|
||||
|
||||
case class UncoreConfiguration(l2: L2CacheConfig, tl: TileLinkConfiguration, mif: MemoryIFConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int, nSCR: Int, offsetBits: Int, useDRAMSideLLC: Boolean)
|
||||
|
||||
class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module
|
||||
class Uncore extends Module
|
||||
{
|
||||
implicit val (tl, mif) = (conf.tl, conf.mif)
|
||||
require(params(BankIdLSB) + log2Up(params(NBanks)) < params(MIFAddrBits))
|
||||
val htif_width = params(HTIFWidth)
|
||||
val io = new Bundle {
|
||||
val host = new HostIO(htif_width)
|
||||
val mem = new MemIO
|
||||
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||
val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip
|
||||
val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput
|
||||
val tiles = Vec.fill(params(NTiles)){new TileLinkIO}.flip
|
||||
val htif = Vec.fill(params(NTiles)){new HTIFIO}.flip
|
||||
val incoherent = Vec.fill(params(NTiles)){Bool()}.asInput
|
||||
val mem_backup = new MemSerializedIO(htif_width)
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
}
|
||||
val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR, conf.offsetBits))
|
||||
val outmemsys = Module(new OuterMemorySystem(htif_width))
|
||||
val htif = Module(new HTIF(CSRs.reset))
|
||||
val outmemsys = Module(new OuterMemorySystem)
|
||||
val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput)
|
||||
outmemsys.io.incoherent := incoherentWithHtif
|
||||
htif.io.cpu <> io.htif
|
||||
@ -140,21 +278,20 @@ class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module
|
||||
|
||||
// Add networking headers and endpoint queues
|
||||
def convertAddrToBank(addr: Bits): UInt = {
|
||||
require(conf.bankIdLsb + log2Up(conf.nBanks) < conf.mif.addrBits, {println("Invalid bits for bank multiplexing.")})
|
||||
addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb)
|
||||
addr(params(BankIdLSB) + log2Up(params(NBanks)) - 1, params(BankIdLSB))
|
||||
}
|
||||
|
||||
(outmemsys.io.tiles :+ outmemsys.io.htif).zip(io.tiles :+ htif.io.mem).zipWithIndex.map {
|
||||
case ((outer, client), i) =>
|
||||
outer.acquire <> Queue(TileLinkHeaderOverwriter(client.acquire, i, conf.nBanks, convertAddrToBank _))
|
||||
outer.release <> Queue(TileLinkHeaderOverwriter(client.release, i, conf.nBanks, convertAddrToBank _))
|
||||
outer.acquire <> Queue(TileLinkHeaderOverwriter(client.acquire, i, params(NBanks), convertAddrToBank _))
|
||||
outer.release <> Queue(TileLinkHeaderOverwriter(client.release, i, params(NBanks), convertAddrToBank _))
|
||||
outer.finish <> Queue(TileLinkHeaderOverwriter(client.finish, i, true))
|
||||
client.grant <> Queue(outer.grant, 1, pipe = true)
|
||||
client.probe <> Queue(outer.probe)
|
||||
}
|
||||
|
||||
// pad out the HTIF using a divided clock
|
||||
val hio = Module((new SlowIO(512)) { Bits(width = htif_width+1) })
|
||||
val hio = Module((new SlowIO(512)) { Bits(width = params(HTIFWidth)+1) })
|
||||
hio.io.set_divisor.valid := htif.io.scr.wen && htif.io.scr.waddr === 63
|
||||
hio.io.set_divisor.bits := htif.io.scr.wdata
|
||||
htif.io.scr.rdata(63) := hio.io.divisor
|
||||
@ -183,12 +320,12 @@ class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module
|
||||
io.host.debug_stats_pcr := htif.io.host.debug_stats_pcr
|
||||
}
|
||||
|
||||
class TopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends Bundle {
|
||||
val host = new HostIO(htifWidth)
|
||||
class TopIO extends Bundle {
|
||||
val host = new HostIO(params(HTIFWidth))
|
||||
val mem = new MemIO
|
||||
}
|
||||
|
||||
class VLSITopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends TopIO(htifWidth)(conf) {
|
||||
class VLSITopIO extends TopIO {
|
||||
val mem_backup_en = Bool(INPUT)
|
||||
val in_mem_ready = Bool(OUTPUT)
|
||||
val in_mem_valid = Bool(INPUT)
|
||||
@ -196,58 +333,27 @@ class VLSITopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends To
|
||||
val out_mem_valid = Bool(OUTPUT)
|
||||
}
|
||||
|
||||
|
||||
class MemDessert extends Module {
|
||||
implicit val mif = MemoryIFConfiguration(params[Int]("MEM_ADDR_BITS"), params[Int]("MEM_DATA_BITS"), params[Int]("MEM_TAG_BITS"), params[Int]("MEM_DATA_BEATS"))
|
||||
val io = new MemDesserIO(params[Int]("HTIF_WIDTH"))
|
||||
val x = Module(new MemDesser(params[Int]("HTIF_WIDTH")))
|
||||
val io = new MemDesserIO(params(HTIFWidth))
|
||||
val x = Module(new MemDesser(params(HTIFWidth)))
|
||||
io.narrow <> x.io.narrow
|
||||
io.wide <> x.io.wide
|
||||
}
|
||||
|
||||
|
||||
class Top extends Module {
|
||||
val dir = new FullRepresentation(params[Int]("NTILES")+1)
|
||||
val co = if(params[Boolean]("ENABLE_SHARING")) {
|
||||
if(params[Boolean]("ENABLE_CLEAN_EXCLUSIVE")) new MESICoherence(dir)
|
||||
else new MSICoherence(dir)
|
||||
} else {
|
||||
if(params[Boolean]("ENABLE_CLEAN_EXCLUSIVE")) new MEICoherence(dir)
|
||||
else new MICoherence(dir)
|
||||
}
|
||||
|
||||
implicit val ln = LogicalNetworkConfiguration(log2Up(params[Int]("NTILES"))+1, params[Int]("NBANKS"), params[Int]("NTILES")+1)
|
||||
implicit val as = AddressSpaceConfiguration(params[Int]("PADDR_BITS"), params[Int]("VADDR_BITS"), params[Int]("PGIDX_BITS"), params[Int]("ASID_BITS"), params[Int]("PERM_BITS"))
|
||||
implicit val tl = TileLinkConfiguration(co = co, ln = ln,
|
||||
addrBits = as.paddrBits-params[Int]("OFFSET_BITS"),
|
||||
clientXactIdBits = log2Up(params[Int]("NL2_REL_XACTS")+params[Int]("NL2_ACQ_XACTS")),
|
||||
masterXactIdBits = 2*log2Up(params[Int]("NMSHRS")*params[Int]("NTILES")+1),
|
||||
dataBits = params[Int]("CACHE_DATA_SIZE_IN_BYTES")*8,
|
||||
writeMaskBits = params[Int]("WRITE_MASK_BITS"),
|
||||
wordAddrBits = params[Int]("SUBWORD_ADDR_BITS"),
|
||||
atomicOpBits = params[Int]("ATOMIC_OP_BITS"))
|
||||
implicit val l2 = L2CacheConfig(512, 8, 1, 1, params[Int]("NL2_REL_XACTS"), params[Int]("NL2_ACQ_XACTS"), tl, as)
|
||||
implicit val mif = MemoryIFConfiguration(params[Int]("MEM_ADDR_BITS"), params[Int]("MEM_DATA_BITS"), params[Int]("MEM_TAG_BITS"), params[Int]("MEM_DATA_BEATS"))
|
||||
implicit val uc = UncoreConfiguration(l2, tl, mif, params[Int]("NTILES"), params[Int]("NBANKS"), bankIdLsb = 5, nSCR = 64, offsetBits = params[Int]("OFFSET_BITS"), useDRAMSideLLC = params[Boolean]("USE_DRAMSIDE_LLC"))
|
||||
//val vic = ICacheConfig(sets = 128, assoc = 1, tl = tl, as = as, btb = BTBConfig(as, 8))
|
||||
//val hc = hwacha.HwachaConfiguration(as, vic, dc, 8, 256, ndtlb = 8, nptlb = 2)
|
||||
|
||||
val ic = ICacheConfig(sets = 128, assoc = 2, ntlb = 8, tl = tl, as = as, btb = BTBConfig(as, 64, 2))
|
||||
val dc = DCacheConfig(sets = 128, ways = 4,
|
||||
tl = tl, as = as,
|
||||
ntlb = 8, nmshr = params[Int]("NMSHRS"), nrpq = 16, nsdq = 17,
|
||||
reqtagbits = -1, databits = -1)
|
||||
val vic = ICacheConfig(sets = 128, assoc = 1, tl = tl, as = as, btb = BTBConfig(as, 8))
|
||||
val hc = hwacha.HwachaConfiguration(as, vic, dc, 8, 256, ndtlb = 8, nptlb = 2)
|
||||
val rc = RocketConfiguration(tl, as, ic, dc
|
||||
// rocc = (c: RocketConfiguration) => (new hwacha.Hwacha(hc, c))
|
||||
)
|
||||
val nTiles = params(NTiles)
|
||||
val io = new VLSITopIO
|
||||
|
||||
val io = new VLSITopIO(params[Int]("HTIF_WIDTH"))
|
||||
params.alter(params(TileLinkL1Params))
|
||||
val resetSigs = Vec.fill(nTiles){Bool()}
|
||||
val tileList = (0 until nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))))
|
||||
val uncore = Module(new Uncore)
|
||||
|
||||
val resetSigs = Vec.fill(uc.nTiles){Bool()}
|
||||
val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc)))
|
||||
val uncore = Module(new Uncore(params[Int]("HTIF_WIDTH")))
|
||||
|
||||
for (i <- 0 until uc.nTiles) {
|
||||
for (i <- 0 until nTiles) {
|
||||
val hl = uncore.io.htif(i)
|
||||
val tl = uncore.io.tiles(i)
|
||||
val il = uncore.io.incoherent(i)
|
||||
|
@ -6,17 +6,15 @@ import rocket._
|
||||
import DRAMModel._
|
||||
import DRAMModel.MemModelConstants._
|
||||
|
||||
class FPGAOuterMemorySystem(htif_width: Int)(implicit conf: FPGAUncoreConfiguration)
|
||||
extends Module {
|
||||
implicit val (tl, ln, l2, mif) = (conf.tl, conf.tl.ln, conf.l2, conf.mif)
|
||||
class FPGAOuterMemorySystem extends Module {
|
||||
val io = new Bundle {
|
||||
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||
val tiles = Vec.fill(params(NTiles)){new TileLinkIO}.flip
|
||||
val htif = (new TileLinkIO).flip
|
||||
val incoherent = Vec.fill(ln.nClients){Bool()}.asInput
|
||||
val incoherent = Vec.fill(params(LNClients)){Bool()}.asInput
|
||||
val mem = new MemIO
|
||||
}
|
||||
|
||||
val master = Module(new L2CoherenceAgent(0))
|
||||
val master = Module(new L2CoherenceAgent(0), params(L2HellaCacheParams))
|
||||
val net = Module(new ReferenceChipCrossbarNetwork)
|
||||
net.io.clients zip (io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
||||
net.io.masters.head <> master.io.inner
|
||||
@ -25,24 +23,21 @@ class FPGAOuterMemorySystem(htif_width: Int)(implicit conf: FPGAUncoreConfigurat
|
||||
val conv = Module(new MemIOUncachedTileLinkIOConverter(2))
|
||||
conv.io.uncached <> master.io.outer
|
||||
io.mem.req_cmd <> Queue(conv.io.mem.req_cmd, 2)
|
||||
io.mem.req_data <> Queue(conv.io.mem.req_data, tl.dataBits/mif.dataBits)
|
||||
io.mem.req_data <> Queue(conv.io.mem.req_data, params(TLDataBits)/params(MIFDataBits))
|
||||
conv.io.mem.resp <> Queue(io.mem.resp)
|
||||
}
|
||||
|
||||
case class FPGAUncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, mif: MemoryIFConfiguration, nTiles: Int, nSCR: Int, offsetBits: Int)
|
||||
|
||||
class FPGAUncore(htif_width: Int)(implicit conf: FPGAUncoreConfiguration)
|
||||
extends Module {
|
||||
implicit val (tl, ln, mif) = (conf.tl, conf.tl.ln, conf.mif)
|
||||
class FPGAUncore extends Module {
|
||||
val (htifw, nTiles) = (params(HTIFWidth),params(NTiles))
|
||||
val io = new Bundle {
|
||||
val host = new HostIO(htif_width)
|
||||
val host = new HostIO(htifw)
|
||||
val mem = new MemIO
|
||||
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||
val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip
|
||||
val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput
|
||||
val tiles = Vec.fill(nTiles){new TileLinkIO}.flip
|
||||
val htif = Vec.fill(nTiles){new HTIFIO}.flip
|
||||
val incoherent = Vec.fill(nTiles){Bool()}.asInput
|
||||
}
|
||||
val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR, conf.offsetBits))
|
||||
val outmemsys = Module(new FPGAOuterMemorySystem(htif_width))
|
||||
val htif = Module(new HTIF(CSRs.reset))
|
||||
val outmemsys = Module(new FPGAOuterMemorySystem)
|
||||
val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput)
|
||||
outmemsys.io.incoherent := incoherentWithHtif
|
||||
htif.io.cpu <> io.htif
|
||||
@ -62,13 +57,13 @@ class FPGAUncore(htif_width: Int)(implicit conf: FPGAUncoreConfiguration)
|
||||
htif.io.host.in <> io.host.in
|
||||
}
|
||||
|
||||
class FPGATopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends TopIO(htifWidth)(conf)
|
||||
class FPGATopIO extends TopIO
|
||||
|
||||
class FPGATop extends Module {
|
||||
/*
|
||||
val ntiles = 1
|
||||
val nmshrs = 2
|
||||
val htif_width = 16
|
||||
|
||||
val co = new MESICoherence(new FullRepresentation(ntiles+1))
|
||||
implicit val ln = LogicalNetworkConfiguration(log2Up(ntiles)+1, 1, ntiles+1)
|
||||
implicit val as = AddressSpaceConfiguration(params[Int]("PADDR_BITS"), params[Int]("VADDR_BITS"), params[Int]("PGIDX_BITS"), params[Int]("ASID_BITS"), params[Int]("PERM_BITS"))
|
||||
@ -88,14 +83,18 @@ class FPGATop extends Module {
|
||||
val dc = DCacheConfig(64, 1, ntlb = 4, nmshr = 2, nrpq = 16, nsdq = 17, tl = tl, as = as, reqtagbits = -1, databits = -1)
|
||||
val rc = RocketConfiguration(tl, as, ic, dc,
|
||||
fastMulDiv = false)
|
||||
*/
|
||||
|
||||
val io = new FPGATopIO(htif_width)
|
||||
val nTiles = params(NTiles)
|
||||
val io = new FPGATopIO
|
||||
|
||||
val resetSigs = Vec.fill(uc.nTiles){Bool()}
|
||||
val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc)))
|
||||
val uncore = Module(new FPGAUncore(htif_width))
|
||||
params.alter(params(TileLinkL1Params))
|
||||
|
||||
for (i <- 0 until uc.nTiles) {
|
||||
val resetSigs = Vec.fill(nTiles){Bool()}
|
||||
val tileList = (0 until nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))))
|
||||
val uncore = Module(new FPGAUncore)
|
||||
|
||||
for (i <- 0 until nTiles) {
|
||||
val hl = uncore.io.htif(i)
|
||||
val tl = uncore.io.tiles(i)
|
||||
val il = uncore.io.incoherent(i)
|
||||
|
@ -6,7 +6,7 @@ import scala.reflect._
|
||||
import scala.reflect.runtime.universe._
|
||||
|
||||
object TileLinkHeaderOverwriter {
|
||||
def apply[T <: ClientSourcedMessage](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, passThrough: Boolean)(implicit conf: TileLinkConfiguration): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
def apply[T <: ClientSourcedMessage](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, passThrough: Boolean): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out = in.clone.asDirectionless
|
||||
out.bits.payload := in.bits.payload
|
||||
out.bits.header.src := UInt(clientId)
|
||||
@ -15,29 +15,26 @@ object TileLinkHeaderOverwriter {
|
||||
in.ready := out.ready
|
||||
out
|
||||
}
|
||||
def apply[T <: ClientSourcedMessage with HasPhysicalAddress](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: UInt => UInt)(implicit conf: TileLinkConfiguration): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
def apply[T <: ClientSourcedMessage with HasPhysicalAddress](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: UInt => UInt): DecoupledIO[LogicalNetworkIO[T]] = {
|
||||
val out: DecoupledIO[LogicalNetworkIO[T]] = apply(in, clientId, false)
|
||||
out.bits.header.dst := (if(nBanks > 1) addrConvert(in.bits.payload.addr) else UInt(0))
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
class ReferenceChipCrossbarNetwork(implicit conf: TileLinkConfiguration)
|
||||
extends LogicalNetwork[TileLinkIO]()(conf.ln) {
|
||||
implicit val (ln, co) = (conf.ln, conf.co)
|
||||
class ReferenceChipCrossbarNetwork extends LogicalNetwork {
|
||||
val io = new Bundle {
|
||||
val clients = Vec.fill(ln.nClients){(new TileLinkIO).flip}
|
||||
val masters = Vec.fill(ln.nMasters){new TileLinkIO}
|
||||
val clients = Vec.fill(params(LNClients)){(new TileLinkIO).flip}
|
||||
val masters = Vec.fill(params(LNMasters)){new TileLinkIO}
|
||||
}
|
||||
|
||||
implicit val pconf = new PhysicalNetworkConfiguration(ln.nEndpoints, ln.idBits) // Same config for all networks
|
||||
|
||||
val n = params(LNEndpoints)
|
||||
// Actually instantiate the particular networks required for TileLink
|
||||
val acqNet = Module(new BasicCrossbar(new Acquire))
|
||||
val relNet = Module(new BasicCrossbar(new Release))
|
||||
val prbNet = Module(new BasicCrossbar(new Probe))
|
||||
val gntNet = Module(new BasicCrossbar(new Grant))
|
||||
val ackNet = Module(new BasicCrossbar(new Finish))
|
||||
val acqNet = Module(new BasicCrossbar(n, new Acquire))
|
||||
val relNet = Module(new BasicCrossbar(n, new Release))
|
||||
val prbNet = Module(new BasicCrossbar(n, new Probe))
|
||||
val gntNet = Module(new BasicCrossbar(n, new Grant))
|
||||
val ackNet = Module(new BasicCrossbar(n, new Finish))
|
||||
|
||||
// Aliases for the various network IO bundle types
|
||||
type FBCIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]]
|
||||
@ -57,16 +54,16 @@ class ReferenceChipCrossbarNetwork(implicit conf: TileLinkConfiguration)
|
||||
}
|
||||
def CrossbarToMasterShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||
val out = DefaultFromCrossbarShim(in)
|
||||
out.bits.header.src := in.bits.header.src - UInt(ln.nMasters)
|
||||
out.bits.header.src := in.bits.header.src - UInt(params(LNMasters))
|
||||
out
|
||||
}
|
||||
def CrossbarToClientShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||
val out = DefaultFromCrossbarShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst - UInt(ln.nMasters)
|
||||
out.bits.header.dst := in.bits.header.dst - UInt(params(LNMasters))
|
||||
out
|
||||
}
|
||||
def DefaultToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = Decoupled(new PhysicalNetworkIO(in.bits.payload)).asDirectionless
|
||||
val out = Decoupled(new PhysicalNetworkIO(n,in.bits.payload)).asDirectionless
|
||||
out.bits.header := in.bits.header
|
||||
out.bits.payload := in.bits.payload
|
||||
out.valid := in.valid
|
||||
@ -75,12 +72,12 @@ class ReferenceChipCrossbarNetwork(implicit conf: TileLinkConfiguration)
|
||||
}
|
||||
def MasterToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = DefaultToCrossbarShim(in)
|
||||
out.bits.header.dst := in.bits.header.dst + UInt(ln.nMasters)
|
||||
out.bits.header.dst := in.bits.header.dst + UInt(params(LNMasters))
|
||||
out
|
||||
}
|
||||
def ClientToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||
val out = DefaultToCrossbarShim(in)
|
||||
out.bits.header.src := in.bits.header.src + UInt(ln.nMasters)
|
||||
out.bits.header.src := in.bits.header.src + UInt(params(LNMasters))
|
||||
out
|
||||
}
|
||||
|
||||
@ -112,11 +109,11 @@ class ReferenceChipCrossbarNetwork(implicit conf: TileLinkConfiguration)
|
||||
typeTag[T].tpe match{
|
||||
case t if t <:< typeTag[ClientSourcedMessage].tpe => {
|
||||
io.masters.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](false, physIO.in(id), physIO.out(id), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](true, physIO.in(id+ln.nMasters), physIO.out(id+ln.nMasters), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](true, physIO.in(id+params(LNMasters)), physIO.out(id+params(LNMasters)), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim) }
|
||||
}
|
||||
case t if t <:< typeTag[MasterSourcedMessage].tpe => {
|
||||
io.masters.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](true, physIO.in(id), physIO.out(id), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](false, physIO.in(id+ln.nMasters), physIO.out(id+ln.nMasters), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim) }
|
||||
io.clients.zipWithIndex.map{ case (i, id) => doFIFOHookup[T](false, physIO.in(id+params(LNMasters)), physIO.out(id+params(LNMasters)), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim) }
|
||||
}
|
||||
case _ => require(false, "Unknown message sourcing.")
|
||||
}
|
||||
|
2
uncore
2
uncore
@ -1 +1 @@
|
||||
Subproject commit ebe0f493a62641a71caec9f2959a4f57e2c16b4e
|
||||
Subproject commit e2f3606041d97eedb10964e48e57b4b093ab73c6
|
Loading…
Reference in New Issue
Block a user