Refactored uncore constants and tilelink data
This commit is contained in:
		| @@ -5,11 +5,10 @@ import uncore._ | ||||
| import rocket._ | ||||
| import rocket.Util._ | ||||
| import ReferenceChipBackend._ | ||||
| import scala.collection.mutable.ArrayBuffer | ||||
| import scala.collection.mutable.HashMap | ||||
| import DRAMModel._ | ||||
|  | ||||
| object DummyTopLevelConstants { | ||||
| object DesignSpaceConstants { | ||||
|   val NTILES = 1 | ||||
|   val NBANKS = 1 | ||||
|   val HTIF_WIDTH = 16 | ||||
| @@ -21,7 +20,29 @@ object DummyTopLevelConstants { | ||||
|   val NMSHRS = 2 | ||||
| } | ||||
|  | ||||
| import DummyTopLevelConstants._ | ||||
| object MemoryConstants { | ||||
|   val CACHE_DATA_SIZE_IN_BYTES = 1 << 6 //TODO: How configurable is this really? | ||||
|   val OFFSET_BITS = log2Up(CACHE_DATA_SIZE_IN_BYTES) | ||||
|   val PADDR_BITS = 32 | ||||
|   val VADDR_BITS = 43 | ||||
|   val PGIDX_BITS = 13 | ||||
|   val ASID_BITS = 7 | ||||
|   val PERM_BITS = 6 | ||||
|   val MEM_TAG_BITS = 5 | ||||
|   val MEM_DATA_BITS = 128 | ||||
|   val MEM_ADDR_BITS = PADDR_BITS - OFFSET_BITS | ||||
|   val MEM_DATA_BEATS = 4 | ||||
| } | ||||
|  | ||||
| object TileLinkSizeConstants { | ||||
|   val WRITE_MASK_BITS = 6 | ||||
|   val SUBWORD_ADDR_BITS = 3 | ||||
|   val ATOMIC_OP_BITS = 4 | ||||
| } | ||||
|  | ||||
| import DesignSpaceConstants._ | ||||
| import MemoryConstants._ | ||||
| import TileLinkSizeConstants._ | ||||
|  | ||||
| object ReferenceChipBackend { | ||||
|   val initMap = new HashMap[Module, Bool]() | ||||
| @@ -88,20 +109,21 @@ class Fame1ReferenceChipBackend extends ReferenceChipBackend with Fame1Transform | ||||
|  | ||||
| class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
| { | ||||
|   implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2) | ||||
|   implicit val (tl, ln, l2, mif) = (conf.tl, conf.tl.ln, conf.l2, conf.mif) | ||||
|   val io = new Bundle { | ||||
|     val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip | ||||
|     val htif = (new TileLinkIO).flip | ||||
|     val incoherent = Vec.fill(ln.nClients){Bool()}.asInput | ||||
|     val mem = new ioMem | ||||
|     val mem_backup = new ioMemSerialized(htif_width) | ||||
|     val mem = new MemIO | ||||
|     val mem_backup = new MemSerializedIO(htif_width) | ||||
|     val mem_backup_en = Bool(INPUT) | ||||
|   } | ||||
|  | ||||
|   val refill_cycles = tl.dataBits/mif.dataBits | ||||
|   val llc_tag_leaf = Mem(Bits(width = 152), 512, seqRead = true) | ||||
|   val llc_data_leaf = Mem(Bits(width = 64), 4096, seqRead = true) | ||||
|   val llc = Module(new DRAMSideLLC(sets=512, ways=8, outstanding=16, tagLeaf=llc_tag_leaf, dataLeaf=llc_data_leaf)) | ||||
|   //val llc = Module(new DRAMSideLLCNull(NL2_REL_XACTS+NL2_ACQ_XACTS, REFILL_CYCLES)) | ||||
|   val llc = Module(new DRAMSideLLC(sets=512, ways=8, outstanding=16, refill_cycles=refill_cycles, tagLeaf=llc_tag_leaf, dataLeaf=llc_data_leaf)) | ||||
|   //val llc = Module(new DRAMSideLLCNull(NL2_REL_XACTS+NL2_ACQ_XACTS, refill_cycles)) | ||||
|   val mem_serdes = Module(new MemSerdes(htif_width)) | ||||
|  | ||||
|   val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i))) | ||||
| @@ -119,7 +141,7 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext | ||||
|     conv.io.uncached <> masterEndpoints.head.io.master | ||||
|   } | ||||
|   llc.io.cpu.req_cmd <> Queue(conv.io.mem.req_cmd) | ||||
|   llc.io.cpu.req_data <> Queue(conv.io.mem.req_data, REFILL_CYCLES) | ||||
|   llc.io.cpu.req_data <> Queue(conv.io.mem.req_data, refill_cycles) | ||||
|   conv.io.mem.resp <> llc.io.cpu.resp | ||||
|  | ||||
|   // mux between main and backup memory ports | ||||
| @@ -131,7 +153,7 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext | ||||
|   mem_serdes.io.wide.req_cmd.valid := mem_cmdq.io.deq.valid && io.mem_backup_en | ||||
|   mem_serdes.io.wide.req_cmd.bits := mem_cmdq.io.deq.bits | ||||
|  | ||||
|   val mem_dataq = Module(new Queue(new MemData, REFILL_CYCLES)) | ||||
|   val mem_dataq = Module(new Queue(new MemData, refill_cycles)) | ||||
|   mem_dataq.io.enq <> llc.io.mem.req_data | ||||
|   mem_dataq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_data.ready, io.mem.req_data.ready) | ||||
|   io.mem.req_data.valid := mem_dataq.io.deq.valid && !io.mem_backup_en | ||||
| @@ -146,21 +168,21 @@ class OuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) ext | ||||
|   io.mem_backup <> mem_serdes.io.narrow | ||||
| } | ||||
|  | ||||
| case class UncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int, nSCR: Int) | ||||
| case class UncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, mif: MemoryIFConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int, nSCR: Int, offsetBits: Int) | ||||
|  | ||||
| class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
| { | ||||
|   implicit val tl = conf.tl | ||||
|   implicit val (tl, mif) = (conf.tl, conf.mif) | ||||
|   val io = new Bundle { | ||||
|     val host = new HostIO(htif_width) | ||||
|     val mem = new ioMem | ||||
|     val mem = new MemIO | ||||
|     val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip | ||||
|     val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip | ||||
|     val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput | ||||
|     val mem_backup = new ioMemSerialized(htif_width) | ||||
|     val mem_backup = new MemSerializedIO(htif_width) | ||||
|     val mem_backup_en = Bool(INPUT) | ||||
|   } | ||||
|   val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR)) | ||||
|   val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR, conf.offsetBits)) | ||||
|   val outmemsys = Module(new OuterMemorySystem(htif_width)) | ||||
|   val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput) | ||||
|   outmemsys.io.incoherent := incoherentWithHtif | ||||
| @@ -170,21 +192,15 @@ class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
|  | ||||
|   // Add networking headers and endpoint queues | ||||
|   def convertAddrToBank(addr: Bits): UInt = { | ||||
|     require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")}) | ||||
|     require(conf.bankIdLsb + log2Up(conf.nBanks) < conf.mif.addrBits, {println("Invalid bits for bank multiplexing.")}) | ||||
|     addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb) | ||||
|   } | ||||
|  | ||||
|   (outmemsys.io.tiles :+ outmemsys.io.htif).zip(io.tiles :+ htif.io.mem).zipWithIndex.map {  | ||||
|     case ((outer, client), i) =>  | ||||
|       outer.acquire <> TileLinkHeaderAppender(client.acquire, i, conf.nBanks, convertAddrToBank _) | ||||
|       outer.release <> TileLinkHeaderAppender(client.release, i, conf.nBanks, convertAddrToBank _) | ||||
|  | ||||
|       val grant_ack_q = Queue(client.grant_ack) | ||||
|       outer.grant_ack.valid := grant_ack_q.valid | ||||
|       outer.grant_ack.bits := grant_ack_q.bits | ||||
|       outer.grant_ack.bits.header.src := UInt(i) | ||||
|       grant_ack_q.ready := outer.grant_ack.ready | ||||
|  | ||||
|       outer.acquire <> Queue(TileLinkHeaderOverwriter(client.acquire, i, conf.nBanks, convertAddrToBank _)) | ||||
|       outer.release <> Queue(TileLinkHeaderOverwriter(client.release, i, conf.nBanks, convertAddrToBank _)) | ||||
|       outer.grant_ack <> Queue(TileLinkHeaderOverwriter(client.grant_ack, i)) | ||||
|       client.grant <> Queue(outer.grant, 1, pipe = true) | ||||
|       client.probe <> Queue(outer.probe) | ||||
|   } | ||||
| @@ -219,12 +235,12 @@ class Uncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
|   io.host.debug_stats_pcr := htif.io.host.debug_stats_pcr | ||||
| } | ||||
|  | ||||
| class TopIO(htifWidth: Int) extends Bundle  { | ||||
| class TopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends Bundle  { | ||||
|   val host    = new HostIO(htifWidth) | ||||
|   val mem     = new ioMem | ||||
|   val mem     = new MemIO | ||||
| } | ||||
|  | ||||
| class VLSITopIO(htifWidth: Int) extends TopIO(htifWidth) { | ||||
| class VLSITopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends TopIO(htifWidth)(conf) { | ||||
|   val mem_backup_en = Bool(INPUT) | ||||
|   val in_mem_ready = Bool(OUTPUT) | ||||
|   val in_mem_valid = Bool(INPUT) | ||||
| @@ -234,6 +250,7 @@ class VLSITopIO(htifWidth: Int) extends TopIO(htifWidth) { | ||||
|  | ||||
|  | ||||
| class MemDessert extends Module { | ||||
|   implicit val mif = MemoryIFConfiguration(MEM_ADDR_BITS, MEM_DATA_BITS, MEM_TAG_BITS, MEM_DATA_BEATS) | ||||
|   val io = new MemDesserIO(HTIF_WIDTH) | ||||
|   val x = Module(new MemDesser(HTIF_WIDTH)) | ||||
|   io.narrow <> x.io.narrow | ||||
| @@ -251,18 +268,29 @@ class Top extends Module { | ||||
|             } | ||||
|  | ||||
|   implicit val ln = LogicalNetworkConfiguration(log2Up(NTILES)+1, NBANKS, NTILES+1) | ||||
|   implicit val tl = TileLinkConfiguration(co, ln, log2Up(NL2_REL_XACTS+NL2_ACQ_XACTS), 2*log2Up(NMSHRS*NTILES+1), MEM_DATA_BITS) | ||||
|   implicit val as = AddressSpaceConfiguration(PADDR_BITS, VADDR_BITS, PGIDX_BITS, ASID_BITS, PERM_BITS) | ||||
|   implicit val tl = TileLinkConfiguration(co = co, ln = ln,  | ||||
|                                           addrBits = as.paddrBits-OFFSET_BITS,  | ||||
|                                           clientXactIdBits = log2Up(NL2_REL_XACTS+NL2_ACQ_XACTS),  | ||||
|                                           masterXactIdBits = 2*log2Up(NMSHRS*NTILES+1),  | ||||
|                                           dataBits = CACHE_DATA_SIZE_IN_BYTES*8,  | ||||
|                                           writeMaskBits = WRITE_MASK_BITS,  | ||||
|                                           wordAddrBits = SUBWORD_ADDR_BITS,  | ||||
|                                           atomicOpBits = ATOMIC_OP_BITS) | ||||
|   implicit val l2 = L2CoherenceAgentConfiguration(tl, NL2_REL_XACTS, NL2_ACQ_XACTS) | ||||
|   implicit val uc = UncoreConfiguration(l2, tl, NTILES, NBANKS, bankIdLsb = 5, nSCR = 64) | ||||
|   implicit val mif = MemoryIFConfiguration(MEM_ADDR_BITS, MEM_DATA_BITS, MEM_TAG_BITS, MEM_DATA_BEATS) | ||||
|   implicit val uc = UncoreConfiguration(l2, tl, mif, NTILES, NBANKS, bankIdLsb = 5, nSCR = 64, offsetBits = OFFSET_BITS) | ||||
|  | ||||
|   val ic = ICacheConfig(128, 2, ntlb = 8, btb = BTBConfig(64, 2)) | ||||
|   val dc = DCacheConfig(128, 4, ntlb = 8, | ||||
|                         nmshr = NMSHRS, nrpq = 16, nsdq = 17, states = co.nClientStates) | ||||
|   val vic = ICacheConfig(128, 1) | ||||
|   val hc = hwacha.HwachaConfiguration(vic, dc, 8, 256, ndtlb = 8, nptlb = 2) | ||||
|   val ic = ICacheConfig(sets = 128, assoc = 2, ntlb = 8, tl = tl, as = as, btb = BTBConfig(as, 64, 2)) | ||||
|   val dc = DCacheConfig(sets = 128, ways = 4,  | ||||
|                         tl = tl, as = as, | ||||
|                         ntlb = 8, nmshr = NMSHRS, nrpq = 16, nsdq = 17,  | ||||
|                         reqtagbits = -1, databits = -1) | ||||
|   val vic = ICacheConfig(sets = 128, assoc = 1, tl = tl, as = as, btb = BTBConfig(as, 8)) | ||||
|   val hc = hwacha.HwachaConfiguration(as, vic, dc, 8, 256, ndtlb = 8, nptlb = 2) | ||||
|   val fpu = if (HAS_FPU) Some(FPUConfig(sfmaLatency = 2, dfmaLatency = 3)) else None | ||||
|   val rc = RocketConfiguration(tl, ic, dc, fpu | ||||
|                                //,rocc = (c: RocketConfiguration) => (new hwacha.Hwacha(hc, c)) | ||||
|   val rc = RocketConfiguration(tl, as, ic, dc, fpu | ||||
|   //                             rocc = (c: RocketConfiguration) => (new hwacha.Hwacha(hc, c)) | ||||
|                               ) | ||||
|  | ||||
|   val io = new VLSITopIO(HTIF_WIDTH) | ||||
|   | ||||
| @@ -5,15 +5,16 @@ import Node._ | ||||
| import uncore._ | ||||
| import rocket._ | ||||
| import DRAMModel._ | ||||
| import DRAMModel.MemModelConstants._ | ||||
|  | ||||
| class FPGAOuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
| { | ||||
|   implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2) | ||||
|   implicit val (tl, ln, l2, mif) = (conf.tl, conf.tl.ln, conf.l2, conf.mif) | ||||
|   val io = new Bundle { | ||||
|     val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip | ||||
|     val htif = (new TileLinkIO).flip | ||||
|     val incoherent = Vec.fill(ln.nClients){Bool()}.asInput | ||||
|     val mem = new ioMem | ||||
|     val mem = new MemIO | ||||
|   } | ||||
|  | ||||
|   val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i))) | ||||
| @@ -32,21 +33,21 @@ class FPGAOuterMemorySystem(htif_width: Int)(implicit conf: UncoreConfiguration) | ||||
|     conv.io.uncached <> masterEndpoints.head.io.master | ||||
|   } | ||||
|   io.mem.req_cmd <> Queue(conv.io.mem.req_cmd) | ||||
|   io.mem.req_data <> Queue(conv.io.mem.req_data, REFILL_CYCLES) | ||||
|   io.mem.req_data <> Queue(conv.io.mem.req_data, tl.dataBits/mif.dataBits) | ||||
|   conv.io.mem.resp <> Queue(io.mem.resp) | ||||
| } | ||||
|  | ||||
| class FPGAUncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Module | ||||
| { | ||||
|   implicit val (tl, ln) = (conf.tl, conf.tl.ln) | ||||
|   implicit val (tl, ln, mif) = (conf.tl, conf.tl.ln, conf.mif) | ||||
|   val io = new Bundle { | ||||
|     val host = new HostIO(htif_width) | ||||
|     val mem = new ioMem | ||||
|     val mem = new MemIO | ||||
|     val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip | ||||
|     val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip | ||||
|     val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput | ||||
|   } | ||||
|   val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR)) | ||||
|   val htif = Module(new HTIF(htif_width, CSRs.reset, conf.nSCR, conf.offsetBits)) | ||||
|   val outmemsys = Module(new FPGAOuterMemorySystem(htif_width)) | ||||
|   val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput) | ||||
|   outmemsys.io.incoherent := incoherentWithHtif | ||||
| @@ -55,21 +56,15 @@ class FPGAUncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Mo | ||||
|  | ||||
|   // Add networking headers and endpoint queues | ||||
|   def convertAddrToBank(addr: Bits): UInt = { | ||||
|     require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")}) | ||||
|     require(conf.bankIdLsb + log2Up(conf.nBanks) < conf.mif.addrBits, {println("Invalid bits for bank multiplexing.")}) | ||||
|     addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb) | ||||
|   } | ||||
|  | ||||
|   (outmemsys.io.tiles :+ outmemsys.io.htif).zip(io.tiles :+ htif.io.mem).zipWithIndex.map {  | ||||
|     case ((outer, client), i) =>  | ||||
|       outer.acquire <> TileLinkHeaderAppender(client.acquire, i, conf.nBanks, convertAddrToBank _) | ||||
|       outer.release <> TileLinkHeaderAppender(client.release, i, conf.nBanks, convertAddrToBank _) | ||||
|  | ||||
|       val grant_ack_q = Queue(client.grant_ack) | ||||
|       outer.grant_ack.valid := grant_ack_q.valid | ||||
|       outer.grant_ack.bits := grant_ack_q.bits | ||||
|       outer.grant_ack.bits.header.src := UInt(i) | ||||
|       grant_ack_q.ready := outer.grant_ack.ready | ||||
|  | ||||
|       outer.acquire <> Queue(TileLinkHeaderOverwriter(client.acquire, i, conf.nBanks, convertAddrToBank _)) | ||||
|       outer.release <> Queue(TileLinkHeaderOverwriter(client.release, i, conf.nBanks, convertAddrToBank _)) | ||||
|       outer.grant_ack <> Queue(TileLinkHeaderOverwriter(client.release, i)) | ||||
|       client.grant <> Queue(outer.grant, 1, pipe = true) | ||||
|       client.probe <> Queue(outer.probe) | ||||
|   } | ||||
| @@ -78,14 +73,16 @@ class FPGAUncore(htif_width: Int)(implicit conf: UncoreConfiguration) extends Mo | ||||
|   htif.io.host.in <> io.host.in | ||||
| } | ||||
|  | ||||
| class ReferenceChip(htif_width: Int) extends Module { | ||||
| import MemoryConstants._ | ||||
| import TileLinkSizeConstants._ | ||||
|  | ||||
| class ReferenceChip(htif_width: Int)(implicit mif: MemoryIFConfiguration) extends Module { | ||||
|   val io = new Bundle { | ||||
|     val host_in = new DecoupledIO(new HostPacket(htif_width)).flip() | ||||
|     val host_out = new DecoupledIO(new HostPacket(htif_width)) | ||||
|     val host_clk = Bool(OUTPUT) | ||||
|     val host_clk_edge = Bool(OUTPUT) | ||||
|     val host_debug_stats_pcr = Bool(OUTPUT) | ||||
|  | ||||
|     val mem_req_cmd = new DecoupledIO(new MemReqCmd()) | ||||
|     val mem_req_data = new DecoupledIO(new MemData()) | ||||
|     val mem_resp = (new DecoupledIO(new MemResp())).flip() | ||||
| @@ -96,13 +93,21 @@ class ReferenceChip(htif_width: Int) extends Module { | ||||
|   val nbanks = 1 | ||||
|   val nmshrs = 2 | ||||
|   implicit val ln = LogicalNetworkConfiguration(log2Up(ntiles)+1, nbanks, ntiles+1) | ||||
|   implicit val tl = TileLinkConfiguration(co, ln, log2Up(1+8), 2*log2Up(nmshrs*ntiles+1), MEM_DATA_BITS) | ||||
|   implicit val as = AddressSpaceConfiguration(PADDR_BITS, VADDR_BITS, PGIDX_BITS, ASID_BITS, PERM_BITS) | ||||
|   implicit val tl = TileLinkConfiguration(co = co, ln = ln, | ||||
|                                           addrBits = as.paddrBits-OFFSET_BITS,  | ||||
|                                           clientXactIdBits = log2Up(1+8),  | ||||
|                                           masterXactIdBits = 2*log2Up(2*1+1),  | ||||
|                                           dataBits = CACHE_DATA_SIZE_IN_BYTES*8,  | ||||
|                                           writeMaskBits = WRITE_MASK_BITS,  | ||||
|                                           wordAddrBits = SUBWORD_ADDR_BITS,  | ||||
|                                           atomicOpBits = ATOMIC_OP_BITS) | ||||
|   implicit val l2 = L2CoherenceAgentConfiguration(tl, 1, 8) | ||||
|   implicit val uc = UncoreConfiguration(l2, tl, ntiles, nbanks, bankIdLsb = 5, nSCR = 64) | ||||
|   implicit val uc = UncoreConfiguration(l2, tl, mif, ntiles, nbanks, bankIdLsb = 5, nSCR = 64, offsetBits = OFFSET_BITS) | ||||
|  | ||||
|   val ic = ICacheConfig(64, 1, ntlb = 4, btb = BTBConfig(4)) | ||||
|   val dc = DCacheConfig(64, 1, ntlb = 4, nmshr = 2, nrpq = 16, nsdq = 17, states = co.nClientStates) | ||||
|   val rc = RocketConfiguration(tl, ic, dc, fpu = None, | ||||
|   val ic = ICacheConfig(64, 1, ntlb = 4, tl = tl, as = as, btb = BTBConfig(as, 8)) | ||||
|   val dc = DCacheConfig(64, 1, ntlb = 4, nmshr = 2, nrpq = 16, nsdq = 17, tl = tl, as = as, reqtagbits = -1, databits = -1) | ||||
|   val rc = RocketConfiguration(tl, as, ic, dc, fpu = None, | ||||
|                                fastMulDiv = false) | ||||
|  | ||||
|   val resetSigs = Vec.fill(uc.nTiles){Bool()} | ||||
| @@ -144,11 +149,17 @@ class ReferenceChip(htif_width: Int) extends Module { | ||||
|   io.mem_resp <> uncore.io.mem.resp | ||||
| } | ||||
|  | ||||
| import MemoryConstants._ | ||||
|  | ||||
| class FPGATopIO(htifWidth: Int) extends TopIO(htifWidth) | ||||
| class FPGATopIO(htifWidth: Int)(implicit conf: MemoryIFConfiguration) extends TopIO(htifWidth)(conf) | ||||
|  | ||||
| class FPGATop extends Module { | ||||
|   val htif_width = 16 | ||||
|    | ||||
|   implicit val mif = MemoryIFConfiguration(MEM_ADDR_BITS, MEM_DATA_BITS, MEM_TAG_BITS, 4) | ||||
|   val deviceWidth = ROW_WIDTH/mif.dataBits | ||||
|   implicit val mc = MemoryControllerConfiguration(deviceWidth, (if(deviceWidth == 4) 0 else log2Up(deviceWidth/4)), mif) | ||||
|  | ||||
|   val io = new FPGATopIO(htif_width) | ||||
|    | ||||
|   val referenceChip = Module(new Fame1Wrapper(new ReferenceChip(htif_width))) | ||||
| @@ -197,8 +208,8 @@ class FPGATop extends Module { | ||||
|    | ||||
|   //dram model to outside memory connections | ||||
|   val host_mem_cmd_queue = Module(new Queue(new MemReqCmd, 2)) | ||||
|   val host_mem_data_queue = Module(new Queue(new MemData, REFILL_CYCLES)) | ||||
|   val host_mem_resp_queue = Module(new Queue(new MemResp, REFILL_CYCLES)) | ||||
|   val host_mem_data_queue = Module(new Queue(new MemData, mif.dataBeats)) | ||||
|   val host_mem_resp_queue = Module(new Queue(new MemResp, mif.dataBeats)) | ||||
|    | ||||
|   host_mem_cmd_queue.io.enq <> dramModel.io.mem.req_cmd | ||||
|   host_mem_cmd_queue.io.deq <> io.mem.req_cmd | ||||
| @@ -261,7 +272,7 @@ class Slave extends AXISlave | ||||
|  | ||||
|   // write cr1 -> mem.resp (nonblocking) | ||||
|   val in_count = Reg(init=UInt(0, log2Up(memw/dw))) | ||||
|   val rf_count = Reg(init=UInt(0, log2Up(REFILL_CYCLES))) | ||||
|   val rf_count = Reg(init=UInt(0, log2Up(CACHE_DATA_SIZE_IN_BYTES*8/memw))) | ||||
|   require(memw % dw == 0 && isPow2(memw/dw)) | ||||
|   val in_reg = Reg(top.io.mem.resp.bits.data) | ||||
|   top.io.mem.resp.bits.data := Cat(io.in.bits, in_reg(in_reg.getWidth-1,dw)) | ||||
|   | ||||
| @@ -5,115 +5,42 @@ import uncore._ | ||||
| import scala.reflect._ | ||||
| import scala.reflect.runtime.universe._ | ||||
|  | ||||
| object TileLinkHeaderAppender { | ||||
|   def apply[T <: ClientSourcedMessage with HasPhysicalAddress, U <: ClientSourcedMessage with HasTileLinkData](in: PairedDataIO[LogicalNetworkIO[T],LogicalNetworkIO[U]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = { | ||||
|     val shim = Module(new TileLinkHeaderAppender(in.meta.bits.payload, in.data.bits.payload, clientId, nBanks, addrConvert)) | ||||
|     shim.io.in <> in | ||||
|     shim.io.out | ||||
| object TileLinkHeaderOverwriter { | ||||
|   def apply[T <: ClientSourcedMessage](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int)(implicit conf: TileLinkConfiguration): DecoupledIO[LogicalNetworkIO[T]] = { | ||||
|     val out = in.clone.asDirectionless | ||||
|     out.bits.payload := in.bits.payload | ||||
|     out.bits.header.src := UInt(clientId) | ||||
|     out.bits.header.dst := in.bits.header.dst | ||||
|     out.valid := in.valid | ||||
|     in.ready := out.ready | ||||
|     out | ||||
|   } | ||||
|   def apply[T <: ClientSourcedMessage with HasPhysicalAddress](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = { | ||||
|     val shim = Module(new TileLinkHeaderAppender(in.bits.payload.clone, new AcquireData, clientId, nBanks, addrConvert)) | ||||
|     shim.io.in.meta <> in | ||||
|     shim.io.out.meta | ||||
|   def apply[T <: ClientSourcedMessage with HasPhysicalAddress](in: DecoupledIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: UInt => UInt)(implicit conf: TileLinkConfiguration): DecoupledIO[LogicalNetworkIO[T]] = { | ||||
|     val out: DecoupledIO[LogicalNetworkIO[T]] = apply(in, clientId) | ||||
|     out.bits.header.dst := (if(nBanks > 1) addrConvert(in.bits.payload.addr) else UInt(0)) | ||||
|     out | ||||
|   } | ||||
| } | ||||
|  | ||||
| class TileLinkHeaderAppender[T <: ClientSourcedMessage with HasPhysicalAddress, U <: ClientSourcedMessage with HasTileLinkData](mType: T, dType: U, clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) extends Module { | ||||
|   implicit val ln = conf.ln | ||||
|   val io = new Bundle { | ||||
|     val in = new PairedDataIO(new LogicalNetworkIO(mType), new LogicalNetworkIO(dType)).flip | ||||
|     val out = new PairedDataIO(new LogicalNetworkIO(mType), new LogicalNetworkIO(dType)) | ||||
|   } | ||||
|  | ||||
|   val meta_q = Queue(io.in.meta) | ||||
|   val data_q = Queue(io.in.data) | ||||
|   if(nBanks == 1) { | ||||
|     io.out.meta.bits.payload := meta_q.bits.payload | ||||
|     io.out.meta.bits.header.src := UInt(clientId) | ||||
|     io.out.meta.bits.header.dst := UInt(0) | ||||
|     io.out.meta.valid := meta_q.valid | ||||
|     meta_q.ready := io.out.meta.ready | ||||
|     io.out.data.bits.payload := data_q.bits.payload | ||||
|     io.out.data.bits.header.src := UInt(clientId) | ||||
|     io.out.data.bits.header.dst := UInt(0) | ||||
|     io.out.data.valid := data_q.valid | ||||
|     data_q.ready := io.out.data.ready | ||||
|   } else { | ||||
|     val meta_has_data = conf.co.messageHasData(meta_q.bits.payload) | ||||
|     val addr_q = Module(new Queue(io.in.meta.bits.payload.addr.clone, 2, pipe = true, flow = true)) | ||||
|     val data_cnt = Reg(init=UInt(0, width = log2Up(REFILL_CYCLES))) | ||||
|     val data_cnt_up = data_cnt + UInt(1) | ||||
|  | ||||
|     io.out.meta.bits.payload := meta_q.bits.payload | ||||
|     io.out.meta.bits.header.src := UInt(clientId) | ||||
|     io.out.meta.bits.header.dst := addrConvert(meta_q.bits.payload.addr) | ||||
|     io.out.data.bits.payload := meta_q.bits.payload | ||||
|     io.out.data.bits.header.src := UInt(clientId) | ||||
|     io.out.data.bits.header.dst := addrConvert(addr_q.io.deq.bits) | ||||
|     addr_q.io.enq.bits := meta_q.bits.payload.addr | ||||
|  | ||||
|     io.out.meta.valid := meta_q.valid && addr_q.io.enq.ready | ||||
|     meta_q.ready := io.out.meta.ready && addr_q.io.enq.ready | ||||
|     io.out.data.valid := data_q.valid && addr_q.io.deq.valid | ||||
|     data_q.ready := io.out.data.ready && addr_q.io.deq.valid | ||||
|     addr_q.io.enq.valid := meta_q.valid && io.out.meta.ready && meta_has_data | ||||
|     addr_q.io.deq.ready := Bool(false) | ||||
|  | ||||
|     when(data_q.valid && data_q.ready) { | ||||
|       data_cnt := data_cnt_up | ||||
|       when(data_cnt_up === UInt(0)) { | ||||
|         addr_q.io.deq.ready := Bool(true) | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| //Adapter betweewn an UncachedTileLinkIO and a mem controller MemIO | ||||
| class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfiguration) extends Module { | ||||
|   val io = new Bundle { | ||||
|     val uncached = new UncachedTileLinkIO().flip | ||||
|     val mem = new ioMem | ||||
|   } | ||||
|   val mem_cmd_q = Module(new Queue(new MemReqCmd, qDepth)) | ||||
|   val mem_data_q = Module(new Queue(new MemData, qDepth)) | ||||
|   mem_cmd_q.io.enq.valid := io.uncached.acquire.meta.valid | ||||
|   io.uncached.acquire.meta.ready := mem_cmd_q.io.enq.ready  | ||||
|   mem_cmd_q.io.enq.bits.rw := conf.co.needsOuterWrite(io.uncached.acquire.meta.bits.payload.a_type, UInt(0)) | ||||
|   mem_cmd_q.io.enq.bits.tag := io.uncached.acquire.meta.bits.payload.client_xact_id | ||||
|   mem_cmd_q.io.enq.bits.addr := io.uncached.acquire.meta.bits.payload.addr | ||||
|   mem_data_q.io.enq.valid := io.uncached.acquire.data.valid | ||||
|   io.uncached.acquire.data.ready := mem_data_q.io.enq.ready | ||||
|   mem_data_q.io.enq.bits.data := io.uncached.acquire.data.bits.payload.data  | ||||
|   io.uncached.grant.valid := io.mem.resp.valid | ||||
|   io.mem.resp.ready := io.uncached.grant.ready | ||||
|   io.uncached.grant.bits.payload.data := io.mem.resp.bits.data | ||||
|   io.uncached.grant.bits.payload.client_xact_id := io.mem.resp.bits.tag | ||||
|   io.uncached.grant.bits.payload.master_xact_id := UInt(0) // DNC | ||||
|   io.uncached.grant.bits.payload.g_type := UInt(0) // DNC | ||||
|   io.mem.req_cmd <> mem_cmd_q.io.deq | ||||
|   io.mem.req_data <> mem_data_q.io.deq | ||||
| } | ||||
|  | ||||
| class ReferenceChipCrossbarNetwork(implicit conf: UncoreConfiguration) extends LogicalNetwork[TileLinkIO]()(conf.tl.ln) { | ||||
|   implicit val (tl, ln, co) = (conf.tl, conf.tl.ln, conf.tl.co) | ||||
|   val io = new Bundle { | ||||
|     val clients = Vec.fill(ln.nClients){(new TileLinkIO).flip} | ||||
|     val masters = Vec.fill(ln.nMasters){new TileLinkIO} | ||||
|   } | ||||
|  | ||||
|   implicit val pconf = new PhysicalNetworkConfiguration(ln.nEndpoints, ln.idBits) // Same config for all networks | ||||
|  | ||||
|   // Actually instantiate the particular networks required for TileLink | ||||
|   val acqNet = Module(new PairedCrossbar(new Acquire, new AcquireData, REFILL_CYCLES, (acq: PhysicalNetworkIO[Acquire]) => co.messageHasData(acq.payload))) | ||||
|   val relNet = Module(new PairedCrossbar(new Release, new ReleaseData, REFILL_CYCLES, (rel: PhysicalNetworkIO[Release]) => co.messageHasData(rel.payload))) | ||||
|   val probeNet = Module(new BasicCrossbar(new Probe)) | ||||
|   val grantNet = Module(new BasicCrossbar(new Grant)) | ||||
|   val acqNet = Module(new BasicCrossbar(new Acquire)) | ||||
|   val relNet = Module(new BasicCrossbar(new Release)) | ||||
|   val prbNet = Module(new BasicCrossbar(new Probe)) | ||||
|   val gntNet = Module(new BasicCrossbar(new Grant)) | ||||
|   val ackNet = Module(new BasicCrossbar(new GrantAck)) | ||||
|  | ||||
|   // Aliases for the various network IO bundle types | ||||
|   type FBCIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]] | ||||
|   type FLNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]] | ||||
|   type PBCIO[M <: Data, D <: Data] = PairedDataIO[PhysicalNetworkIO[M], PhysicalNetworkIO[D]] | ||||
|   type PLNIO[M <: Data, D <: Data] = PairedDataIO[LogicalNetworkIO[M], LogicalNetworkIO[D]] | ||||
|   type FromCrossbar[T <: Data] = FBCIO[T] => FLNIO[T] | ||||
|   type ToCrossbar[T <: Data] = FLNIO[T] => FBCIO[T] | ||||
|  | ||||
| @@ -176,10 +103,10 @@ class ReferenceChipCrossbarNetwork(implicit conf: UncoreConfiguration) extends L | ||||
|  | ||||
|   def doFIFOHookup[T <: Data](isEndpointSourceOfMessage: Boolean, physIn: FBCIO[T], physOut: FBCIO[T], logIO: FLNIO[T], inShim: ToCrossbar[T], outShim: FromCrossbar[T]) = { | ||||
|     if(isEndpointSourceOfMessage) doFIFOInputHookup(physIn, physOut, logIO, inShim) | ||||
|     else                 doFIFOOutputHookup(physIn, physOut, logIO, outShim) | ||||
|     else doFIFOOutputHookup(physIn, physOut, logIO, outShim) | ||||
|   } | ||||
|      | ||||
|   //Hookup all instances of a particular subbundle of  | ||||
|   //Hookup all instances of a particular subbundle of TileLink | ||||
|   def doFIFOHookups[T <: Data: TypeTag](physIO: BasicCrossbarIO[T], getLogIO: TileLinkIO => FLNIO[T]) = { | ||||
|     typeTag[T].tpe match{  | ||||
|       case t if t <:< typeTag[ClientSourcedMessage].tpe => { | ||||
| @@ -194,33 +121,9 @@ class ReferenceChipCrossbarNetwork(implicit conf: UncoreConfiguration) extends L | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   def doPairedDataHookup[T <: Data, R <: Data](isEndpointSourceOfMessage: Boolean, physIn: PBCIO[T,R], physOut: PBCIO[T,R], logIO: PLNIO[T,R], inShim: ToCrossbar[T], outShim: FromCrossbar[T], inShimD: ToCrossbar[R], outShimD: FromCrossbar[R]) = { | ||||
|     if(isEndpointSourceOfMessage) { | ||||
|       doFIFOInputHookup[T](physIn.meta, physOut.meta, logIO.meta, inShim) | ||||
|       doFIFOInputHookup[R](physIn.data, physOut.data, logIO.data, inShimD) | ||||
|     } else { | ||||
|       doFIFOOutputHookup[T](physIn.meta, physOut.meta, logIO.meta, outShim) | ||||
|       doFIFOOutputHookup[R](physIn.data, physOut.data, logIO.data, outShimD) | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   def doPairedDataHookups[T <: Data: TypeTag, R <: Data](physIO: PairedCrossbarIO[T,R], getLogIO: TileLinkIO => PLNIO[T,R]) = { | ||||
|     typeTag[T].tpe match{  | ||||
|       case t if t <:< typeTag[ClientSourcedMessage].tpe => { | ||||
|         io.masters.zipWithIndex.map{ case (i, id) => doPairedDataHookup[T,R](false, physIO.in(id), physIO.out(id), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim, ClientToCrossbarShim, CrossbarToMasterShim) } | ||||
|         io.clients.zipWithIndex.map{ case (i, id) => doPairedDataHookup[T,R](true, physIO.in(id+ln.nMasters), physIO.out(id+ln.nMasters), getLogIO(i), ClientToCrossbarShim, CrossbarToMasterShim, ClientToCrossbarShim, CrossbarToMasterShim) } | ||||
|       } | ||||
|       case t if t <:< typeTag[MasterSourcedMessage].tpe => { | ||||
|         io.masters.zipWithIndex.map{ case (i, id) => doPairedDataHookup[T,R](true, physIO.in(id), physIO.out(id), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim, MasterToCrossbarShim, CrossbarToClientShim) } | ||||
|         io.clients.zipWithIndex.map{ case (i, id) => doPairedDataHookup[T,R](false, physIO.in(id+ln.nMasters), physIO.out(id+ln.nMasters), getLogIO(i), MasterToCrossbarShim, CrossbarToClientShim, MasterToCrossbarShim, CrossbarToClientShim) } | ||||
|       } | ||||
|       case _ => require(false, "Unknown message sourcing.") | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   doPairedDataHookups(acqNet.io, (tl: TileLinkIO) => tl.acquire) | ||||
|   doPairedDataHookups(relNet.io, (tl: TileLinkIO) => tl.release) | ||||
|   doFIFOHookups(probeNet.io, (tl: TileLinkIO) => tl.probe) | ||||
|   doFIFOHookups(grantNet.io, (tl: TileLinkIO) => tl.grant) | ||||
|   doFIFOHookups(acqNet.io, (tl: TileLinkIO) => tl.acquire) | ||||
|   doFIFOHookups(relNet.io, (tl: TileLinkIO) => tl.release) | ||||
|   doFIFOHookups(prbNet.io, (tl: TileLinkIO) => tl.probe) | ||||
|   doFIFOHookups(gntNet.io, (tl: TileLinkIO) => tl.grant) | ||||
|   doFIFOHookups(ackNet.io, (tl: TileLinkIO) => tl.grant_ack) | ||||
| } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user