From 11e131af47187d281094e2c3a83f182b46f72164 Mon Sep 17 00:00:00 2001 From: Henry Cook Date: Mon, 12 Aug 2013 10:46:22 -0700 Subject: [PATCH] initial attempt at upgrade --- chisel | 2 +- hardfloat | 2 +- riscv-rocket | 2 +- src/main/scala/RocketChip.scala | 83 +++++++++++++++++---------------- src/main/scala/fpga.scala | 74 ++++++++++++++--------------- src/main/scala/network.scala | 70 +++++++++++++-------------- uncore | 2 +- 7 files changed, 118 insertions(+), 117 deletions(-) diff --git a/chisel b/chisel index 11cb15ba..97ac8785 160000 --- a/chisel +++ b/chisel @@ -1 +1 @@ -Subproject commit 11cb15ba9a0f7dedf43a34e0d64708facd0ea619 +Subproject commit 97ac878580bdd9bf9d4cf05d33d64689dfc6627a diff --git a/hardfloat b/hardfloat index fc09bea8..76ac1cb9 160000 --- a/hardfloat +++ b/hardfloat @@ -1 +1 @@ -Subproject commit fc09bea89982c4d7d33b6201896aa3b40caba186 +Subproject commit 76ac1cb932949dc33a11dd85f9bf9cbc3a639eb4 diff --git a/riscv-rocket b/riscv-rocket index ee815dd3..2d3caa3e 160000 --- a/riscv-rocket +++ b/riscv-rocket @@ -1 +1 @@ -Subproject commit ee815dd3983f3b1a67fd3d810a513e23bdef97e4 +Subproject commit 2d3caa3e269f238b1a8ccaa28f0c348dc12acf61 diff --git a/src/main/scala/RocketChip.scala b/src/main/scala/RocketChip.scala index bd35cdda..d24f9881 100644 --- a/src/main/scala/RocketChip.scala +++ b/src/main/scala/RocketChip.scala @@ -9,19 +9,20 @@ import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashMap object DummyTopLevelConstants { - val NTILES = 2 + val NTILES = 1 val NBANKS = 1 val HTIF_WIDTH = 16 val ENABLE_SHARING = true val ENABLE_CLEAN_EXCLUSIVE = true val HAS_VEC = true + val HAS_FPU = true val NL2_REL_XACTS = 1 val NL2_ACQ_XACTS = 8 val NMSHRS = 2 } object ReferenceChipBackend { - val initMap = new HashMap[Component, Bool]() + val initMap = new HashMap[Module, Bool]() } class ReferenceChipBackend extends VerilogBackend @@ -35,15 +36,15 @@ class ReferenceChipBackend extends VerilogBackend (if (idx == 0) res.toString else "") + super.emitPortDef(m, idx) } - def addMemPin(c: Component) = { - for (node <- Component.nodes) { + def addMemPin(c: Module) = { + for (node <- Module.nodes) { if (node.isInstanceOf[Mem[ _ ]] && node.component != null && node.asInstanceOf[Mem[_]].seqRead) { connectMemPin(c, node.component, node) } } } - def connectMemPin(topC: Component, c: Component, p: Node): Unit = { + def connectMemPin(topC: Module, c: Module, p: Node): Unit = { var isNewPin = false val compInitPin = if (initMap.contains(c)) { @@ -64,7 +65,7 @@ class ReferenceChipBackend extends VerilogBackend } } - def addTopLevelPin(c: Component) = { + def addTopLevelPin(c: Module) = { val init = Bool(INPUT) init.setName("init") init.component = c @@ -72,37 +73,37 @@ class ReferenceChipBackend extends VerilogBackend initMap += (c -> init) } - transforms += ((c: Component) => addTopLevelPin(c)) - transforms += ((c: Component) => addMemPin(c)) + transforms += ((c: Module) => addTopLevelPin(c)) + transforms += ((c: Module) => addMemPin(c)) } -class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component +class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module { implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2) val io = new Bundle { - val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip + val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip val htif = (new TileLinkIO).flip - val incoherent = Vec(ln.nClients) { Bool() }.asInput + val incoherent = Vec.fill(ln.nClients){Bool()}.asInput val mem = new ioMem val mem_backup = new ioMemSerialized(htif_width) val mem_backup_en = Bool(INPUT) } - val llc_tag_leaf = Mem(512, seqRead = true) { Bits(width = 152) } - val llc_data_leaf = Mem(4096, seqRead = true) { Bits(width = 64) } - //val llc = new DRAMSideLLC(512, 8, 4, llc_tag_leaf, llc_data_leaf) - val llc = new DRAMSideLLCNull(8, REFILL_CYCLES) - val mem_serdes = new MemSerdes(htif_width) + val llc_tag_leaf = Mem(Bits(width = 152), 512, seqRead = true) + val llc_data_leaf = Mem(Bits(width = 64), 4096, seqRead = true) + val llc = Module(new DRAMSideLLC(512, 8, 4, llc_tag_leaf, llc_data_leaf)) + //val llc = Module(new DRAMSideLLCNull(8, REFILL_CYCLES)) + val mem_serdes = Module(new MemSerdes(htif_width)) require(clientEndpoints.length == ln.nClients) - val masterEndpoints = (0 until ln.nMasters).map(new L2CoherenceAgent(_)) - val net = new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints) + val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i))) + val net = Module(new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints)) net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end } masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } } - val conv = new MemIOUncachedTileLinkIOConverter(2) + val conv = Module(new MemIOUncachedTileLinkIOConverter(2)) if(ln.nMasters > 1) { - val arb = new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters) + val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters)) arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache } conv.io.uncached <> arb.io.out } else { @@ -113,7 +114,7 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge conv.io.mem.resp <> llc.io.cpu.resp // mux between main and backup memory ports - val mem_cmdq = (new Queue(2)) { new MemReqCmd } + val mem_cmdq = Module(new Queue(new MemReqCmd, 2)) mem_cmdq.io.enq <> llc.io.mem.req_cmd mem_cmdq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_cmd.ready, io.mem.req_cmd.ready) io.mem.req_cmd.valid := mem_cmdq.io.deq.valid && !io.mem_backup_en @@ -121,7 +122,7 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge mem_serdes.io.wide.req_cmd.valid := mem_cmdq.io.deq.valid && io.mem_backup_en mem_serdes.io.wide.req_cmd.bits := mem_cmdq.io.deq.bits - val mem_dataq = (new Queue(REFILL_CYCLES)) { new MemData } + val mem_dataq = Module(new Queue(new MemData, REFILL_CYCLES)) mem_dataq.io.enq <> llc.io.mem.req_data mem_dataq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_data.ready, io.mem.req_data.ready) io.mem.req_data.valid := mem_dataq.io.deq.valid && !io.mem_backup_en @@ -138,21 +139,21 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge case class UncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int) -class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component +class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module { implicit val tl = conf.tl val io = new Bundle { val debug = new DebugIO() val host = new HostIO(htif_width) val mem = new ioMem - val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip - val htif = Vec(conf.nTiles) { new HTIFIO(conf.nTiles) }.flip - val incoherent = Vec(conf.nTiles) { Bool() }.asInput + val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip + val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip + val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput val mem_backup = new ioMemSerialized(htif_width) val mem_backup_en = Bool(INPUT) } - val htif = new RocketHTIF(htif_width) - val outmemsys = new OuterMemorySystem(htif_width, tileList :+ htif) + val htif = Module(new RocketHTIF(htif_width)) + val outmemsys = Module(new OuterMemorySystem(htif_width, tileList :+ htif)) val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput) outmemsys.io.incoherent := incoherentWithHtif htif.io.cpu <> io.htif @@ -160,7 +161,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf outmemsys.io.mem_backup_en <> io.mem_backup_en // Add networking headers and endpoint queues - def convertAddrToBank(addr: Bits): UFix = { + def convertAddrToBank(addr: Bits): UInt = { require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")}) addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb) } @@ -173,7 +174,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf val grant_ack_q = Queue(client.grant_ack) outer.grant_ack.valid := grant_ack_q.valid outer.grant_ack.bits := grant_ack_q.bits - outer.grant_ack.bits.header.src := UFix(i) + outer.grant_ack.bits.header.src := UInt(i) grant_ack_q.ready := outer.grant_ack.ready client.grant <> Queue(outer.grant, 1, pipe = true) @@ -181,7 +182,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf } // pad out the HTIF using a divided clock - val hio = (new SlowIO(512)) { Bits(width = htif_width+1) } + val hio = Module((new SlowIO(512)) { Bits(width = htif_width+1) }) hio.io.set_divisor.valid := htif.io.scr.wen && htif.io.scr.waddr === 63 hio.io.set_divisor.bits := htif.io.scr.wdata htif.io.scr.rdata(63) := hio.io.divisor @@ -205,7 +206,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf htif.io.host.in.bits := hio.io.in_fast.bits hio.io.in_fast.ready := Mux(hio.io.in_fast.bits(htif_width), Bool(true), htif.io.host.in.ready) io.host.clk := hio.io.clk_slow - io.host.clk_edge := Reg(io.host.clk && !Reg(io.host.clk)) + io.host.clk_edge := RegUpdate(io.host.clk && !RegUpdate(io.host.clk)) } class TopIO(htifWidth: Int) extends Bundle { @@ -224,14 +225,14 @@ class VLSITopIO(htifWidth: Int) extends TopIO(htifWidth) { import DummyTopLevelConstants._ -class MemDessert extends Component { +class MemDessert extends Module { val io = new MemDesserIO(HTIF_WIDTH) - val x = new MemDesser(HTIF_WIDTH) + val x = Module(new MemDesser(HTIF_WIDTH)) io.narrow <> x.io.narrow io.wide <> x.io.wide } -class Top extends Component { +class Top extends Module { val co = if(ENABLE_SHARING) { if(ENABLE_CLEAN_EXCLUSIVE) new MESICoherence else new MSICoherence @@ -249,13 +250,13 @@ class Top extends Component { val dc = DCacheConfig(128, 4, ntlb = 8, nmshr = NMSHRS, nrpq = 16, nsdq = 17, states = co.nClientStates) val rc = RocketConfiguration(tl, ic, dc, - fpu = true, vec = HAS_VEC) + fpu = HAS_FPU, vec = HAS_VEC) val io = new VLSITopIO(HTIF_WIDTH) - val resetSigs = Vec(uc.nTiles){Bool()} - val tileList = (0 until uc.nTiles).map(r => new Tile(resetSignal = resetSigs(r))(rc)) - val uncore = new Uncore(HTIF_WIDTH, tileList) + val resetSigs = Vec.fill(uc.nTiles){Bool()} + val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc))) + val uncore = Module(new Uncore(HTIF_WIDTH, tileList)) var error_mode = Bool(false) for (i <- 0 until uc.nTiles) { @@ -267,12 +268,12 @@ class Top extends Component { val tile = tileList(i) tile.io.tilelink <> tl il := hl.reset - tile.io.host.reset := Reg(Reg(hl.reset)) + tile.io.host.reset := RegUpdate(RegUpdate(hl.reset)) tile.io.host.pcr_req <> Queue(hl.pcr_req) hl.pcr_rep <> Queue(tile.io.host.pcr_rep) hl.ipi_req <> Queue(tile.io.host.ipi_req) tile.io.host.ipi_rep <> Queue(hl.ipi_rep) - error_mode = error_mode || Reg(tile.io.host.debug.error_mode) + error_mode = error_mode || RegUpdate(tile.io.host.debug.error_mode) } io.host <> uncore.io.host diff --git a/src/main/scala/fpga.scala b/src/main/scala/fpga.scala index 7a471fc5..4a5a45ef 100644 --- a/src/main/scala/fpga.scala +++ b/src/main/scala/fpga.scala @@ -5,26 +5,26 @@ import Node._ import uncore._ import rocket._ -class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component +class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module { implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2) val io = new Bundle { - val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip + val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip val htif = (new TileLinkIO).flip - val incoherent = Vec(ln.nClients) { Bool() }.asInput + val incoherent = Vec.fill(ln.nClients){Bool()}.asInput val mem = new ioMem } require(clientEndpoints.length == ln.nClients) - val masterEndpoints = (0 until ln.nMasters).map(new L2CoherenceAgent(_)) + val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i))) - val net = new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints) + val net = Module(new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints)) net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end } masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } } - val conv = new MemIOUncachedTileLinkIOConverter(2) + val conv = Module(new MemIOUncachedTileLinkIOConverter(2)) if(ln.nMasters > 1) { - val arb = new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters) + val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters)) arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache } conv.io.uncached <> arb.io.out } else { @@ -35,24 +35,24 @@ class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenc conv.io.mem.resp <> Queue(io.mem.resp) } -class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component +class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module { implicit val (tl, ln) = (conf.tl, conf.tl.ln) val io = new Bundle { val debug = new DebugIO() val host = new HostIO(htif_width) val mem = new ioMem - val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip - val htif = Vec(conf.nTiles) { new HTIFIO(conf.nTiles) }.flip - val incoherent = Vec(conf.nTiles) { Bool() }.asInput + val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip + val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip + val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput } - val htif = new RocketHTIF(htif_width) - val outmemsys = new FPGAOuterMemorySystem(htif_width, tileList :+ htif) + val htif = Module(new RocketHTIF(htif_width)) + val outmemsys = Module(new FPGAOuterMemorySystem(htif_width, tileList :+ htif)) htif.io.cpu <> io.htif outmemsys.io.mem <> io.mem // Add networking headers and endpoint queues - def convertAddrToBank(addr: Bits): UFix = { + def convertAddrToBank(addr: Bits): UInt = { require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")}) addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb) } @@ -66,7 +66,7 @@ class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit val grant_ack_q = Queue(client.grant_ack) outer.grant_ack.valid := grant_ack_q.valid outer.grant_ack.bits := grant_ack_q.bits - outer.grant_ack.bits.header.src := UFix(i) + outer.grant_ack.bits.header.src := UInt(i) grant_ack_q.ready := outer.grant_ack.ready client.grant <> Queue(outer.grant, 1, pipe = true) @@ -79,7 +79,7 @@ class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit class FPGATopIO(htifWidth: Int) extends TopIO(htifWidth) -class FPGATop extends Component { +class FPGATop extends Module { val htif_width = 16 val co = new MESICoherence val ntiles = 1 @@ -98,9 +98,9 @@ class FPGATop extends Component { val io = new FPGATopIO(htif_width) - val resetSigs = Vec(uc.nTiles){Bool()} - val tileList = (0 until uc.nTiles).map(r => new Tile(resetSignal = resetSigs(r))(rc)) - val uncore = new FPGAUncore(htif_width, tileList) + val resetSigs = Vec.fill(uc.nTiles){Bool()} + val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc))) + val uncore = Module(new FPGAUncore(htif_width, tileList)) io.debug.error_mode := Bool(false) for (i <- 0 until uc.nTiles) { @@ -113,7 +113,7 @@ class FPGATop extends Component { tile.io.tilelink <> tl il := hl.reset - tile.io.host.reset := Reg(Reg(hl.reset)) + tile.io.host.reset := RegUpdate(RegUpdate(hl.reset)) tile.io.host.pcr_req <> Queue(hl.pcr_req) hl.pcr_rep <> Queue(tile.io.host.pcr_rep) hl.ipi_req <> Queue(tile.io.host.ipi_req) @@ -126,12 +126,12 @@ class FPGATop extends Component { io.mem <> uncore.io.mem } -abstract class AXISlave extends Component { +abstract class AXISlave extends Module { val aw = 5 val dw = 32 val io = new Bundle { - val in = new FIFOIO()(Bits(width = dw)).flip - val out = new FIFOIO()(Bits(width = dw)) + val in = Decoupled(Bits(width = dw)).flip + val out = Decoupled(Bits(width = dw)) val addr = Bits(INPUT, aw) } } @@ -144,11 +144,11 @@ class Slave extends AXISlave val htifw = top.io.host.in.bits.getWidth val n = 4 // htif, mem req/read data, mem write data, error mode - def wen(i: Int) = io.in.valid && io.addr(log2Up(n)-1,0) === UFix(i) - def ren(i: Int) = io.out.ready && io.addr(log2Up(n)-1,0) === UFix(i) - val rdata = Vec(n){Bits(width = dw)} - val rvalid = Vec(n){Bool()} - val wready = Vec(n){Bool()} + def wen(i: Int) = io.in.valid && io.addr(log2Up(n)-1,0) === UInt(i) + def ren(i: Int) = io.out.ready && io.addr(log2Up(n)-1,0) === UInt(i) + val rdata = Vec.fill(n){Bits(width = dw)} + val rvalid = Vec.fill(n){Bool()} + val wready = Vec.fill(n){Bool()} io.in.ready := wready(io.addr) io.out.valid := rvalid(io.addr) @@ -167,7 +167,7 @@ class Slave extends AXISlave // read cr1 -> mem.req_cmd (nonblocking) // the memory system is FIFO from hereon out, so just remember the tags here - val tagq = new Queue(4)(top.io.mem.req_cmd.bits.tag.clone) + val tagq = Module(new Queue(top.io.mem.req_cmd.bits.tag, 4)) tagq.io.enq.bits := top.io.mem.req_cmd.bits.tag tagq.io.enq.valid := ren(1) && top.io.mem.req_cmd.valid && !top.io.mem.req_cmd.bits.rw top.io.mem.req_cmd.ready := ren(1) @@ -176,29 +176,29 @@ class Slave extends AXISlave require(dw >= top.io.mem.req_cmd.bits.addr.getWidth + 1 + 1) // write cr1 -> mem.resp (nonblocking) - val in_count = Reg(resetVal = UFix(0, log2Up(memw/dw))) - val rf_count = Reg(resetVal = UFix(0, log2Up(REFILL_CYCLES))) + val in_count = RegReset(UInt(0, log2Up(memw/dw))) + val rf_count = RegReset(UInt(0, log2Up(REFILL_CYCLES))) require(memw % dw == 0 && isPow2(memw/dw)) - val in_reg = Reg{top.io.mem.resp.bits.data.clone} + val in_reg = Reg(top.io.mem.resp.bits.data) top.io.mem.resp.bits.data := Cat(io.in.bits, in_reg(in_reg.getWidth-1,dw)) top.io.mem.resp.bits.tag := tagq.io.deq.bits top.io.mem.resp.valid := wen(1) && in_count.andR tagq.io.deq.ready := top.io.mem.resp.fire() && rf_count.andR wready(1) := top.io.mem.resp.ready when (wen(1) && wready(1)) { - in_count := in_count + UFix(1) + in_count := in_count + UInt(1) in_reg := top.io.mem.resp.bits.data } when (top.io.mem.resp.fire()) { - rf_count := rf_count + UFix(1) + rf_count := rf_count + UInt(1) } // read cr2 -> mem.req_data (blocking) - val out_count = Reg(resetVal = UFix(0, log2Up(memw/dw))) + val out_count = RegReset(UInt(0, log2Up(memw/dw))) top.io.mem.req_data.ready := ren(2) && out_count.andR - rdata(2) := top.io.mem.req_data.bits.data >> (out_count * UFix(dw)) + rdata(2) := top.io.mem.req_data.bits.data >> (out_count * UInt(dw)) rvalid(2) := top.io.mem.req_data.valid - when (ren(2) && rvalid(2)) { out_count := out_count + UFix(1) } + when (ren(2) && rvalid(2)) { out_count := out_count + UInt(1) } // read cr3 -> error mode (nonblocking) rdata(3) := Cat(top.io.mem.req_cmd.valid, tagq.io.enq.ready, top.io.debug.error_mode) diff --git a/src/main/scala/network.scala b/src/main/scala/network.scala index affbfb2e..c60b727b 100644 --- a/src/main/scala/network.scala +++ b/src/main/scala/network.scala @@ -5,19 +5,19 @@ import uncore._ import scala.reflect._ object TileLinkHeaderAppender { - def apply[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](in: ClientSourcedDataIO[LogicalNetworkIO[T],LogicalNetworkIO[U]], clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(implicit conf: TileLinkConfiguration) = { - val shim = new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.meta.bits.payload.clone, in.data.bits.payload.clone) + def apply[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](in: ClientSourcedDataIO[LogicalNetworkIO[T],LogicalNetworkIO[U]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = { + val shim = Module(new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.meta.bits.payload.clone, in.data.bits.payload.clone)) shim.io.in <> in shim.io.out } - def apply[T <: SourcedMessage with HasPhysicalAddress](in: ClientSourcedFIFOIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(implicit conf: TileLinkConfiguration) = { - val shim = new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.bits.payload.clone, new AcquireData) + def apply[T <: SourcedMessage with HasPhysicalAddress](in: ClientSourcedFIFOIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = { + val shim = Module(new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.bits.payload.clone, new AcquireData)) shim.io.in.meta <> in shim.io.out.meta } } -class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(metadata: => T, data: => U)(implicit conf: TileLinkConfiguration) extends Component { +class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(metadata: => T, data: => U)(implicit conf: TileLinkConfiguration) extends Module { implicit val ln = conf.ln val io = new Bundle { val in = new ClientSourcedDataIO()((new LogicalNetworkIO){ metadata }, (new LogicalNetworkIO){ data }).flip @@ -28,26 +28,26 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S val data_q = Queue(io.in.data) if(nBanks == 1) { io.out.meta.bits.payload := meta_q.bits.payload - io.out.meta.bits.header.src := UFix(clientId) - io.out.meta.bits.header.dst := UFix(0) + io.out.meta.bits.header.src := UInt(clientId) + io.out.meta.bits.header.dst := UInt(0) io.out.meta.valid := meta_q.valid meta_q.ready := io.out.meta.ready io.out.data.bits.payload := data_q.bits.payload - io.out.data.bits.header.src := UFix(clientId) - io.out.data.bits.header.dst := UFix(0) + io.out.data.bits.header.src := UInt(clientId) + io.out.data.bits.header.dst := UInt(0) io.out.data.valid := data_q.valid data_q.ready := io.out.data.ready } else { val meta_has_data = conf.co.messageHasData(meta_q.bits.payload) - val addr_q = (new Queue(2, pipe = true, flow = true)){io.in.meta.bits.payload.addr.clone} - val data_cnt = Reg(resetVal = UFix(0, width = log2Up(REFILL_CYCLES))) - val data_cnt_up = data_cnt + UFix(1) + val addr_q = Module(new Queue(io.in.meta.bits.payload.addr.clone, 2, pipe = true, flow = true)) + val data_cnt = RegReset(UInt(0, width = log2Up(REFILL_CYCLES))) + val data_cnt_up = data_cnt + UInt(1) io.out.meta.bits.payload := meta_q.bits.payload - io.out.meta.bits.header.src := UFix(clientId) + io.out.meta.bits.header.src := UInt(clientId) io.out.meta.bits.header.dst := addrConvert(meta_q.bits.payload.addr) io.out.data.bits.payload := meta_q.bits.payload - io.out.data.bits.header.src := UFix(clientId) + io.out.data.bits.header.src := UInt(clientId) io.out.data.bits.header.dst := addrConvert(addr_q.io.deq.bits) addr_q.io.enq.bits := meta_q.bits.payload.addr @@ -60,7 +60,7 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S when(data_q.valid && data_q.ready) { data_cnt := data_cnt_up - when(data_cnt_up === UFix(0)) { + when(data_cnt_up === UInt(0)) { addr_q.io.deq.ready := Bool(true) } } @@ -68,16 +68,16 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S } //Adapter betweewn an UncachedTileLinkIO and a mem controller MemIO -class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfiguration) extends Component { +class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfiguration) extends Module { val io = new Bundle { val uncached = new UncachedTileLinkIO().flip val mem = new ioMem } - val mem_cmd_q = (new Queue(qDepth)){new MemReqCmd} - val mem_data_q = (new Queue(qDepth)){new MemData} + val mem_cmd_q = Module(new Queue(new MemReqCmd, qDepth)) + val mem_data_q = Module(new Queue(new MemData, qDepth)) mem_cmd_q.io.enq.valid := io.uncached.acquire.meta.valid io.uncached.acquire.meta.ready := mem_cmd_q.io.enq.ready - mem_cmd_q.io.enq.bits.rw := conf.co.needsOuterWrite(io.uncached.acquire.meta.bits.payload.a_type, UFix(0)) + mem_cmd_q.io.enq.bits.rw := conf.co.needsOuterWrite(io.uncached.acquire.meta.bits.payload.a_type, UInt(0)) mem_cmd_q.io.enq.bits.tag := io.uncached.acquire.meta.bits.payload.client_xact_id mem_cmd_q.io.enq.bits.addr := io.uncached.acquire.meta.bits.payload.addr mem_data_q.io.enq.valid := io.uncached.acquire.data.valid @@ -87,20 +87,20 @@ class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfi io.mem.resp.ready := io.uncached.grant.ready io.uncached.grant.bits.payload.data := io.mem.resp.bits.data io.uncached.grant.bits.payload.client_xact_id := io.mem.resp.bits.tag - io.uncached.grant.bits.payload.master_xact_id := UFix(0) // DNC - io.uncached.grant.bits.payload.g_type := UFix(0) // DNC + io.uncached.grant.bits.payload.master_xact_id := UInt(0) // DNC + io.uncached.grant.bits.payload.g_type := UInt(0) // DNC io.mem.req_cmd <> mem_cmd_q.io.deq io.mem.req_data <> mem_data_q.io.deq } class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit conf: UncoreConfiguration) extends LogicalNetwork[TileLinkIO](endpoints)(conf.tl.ln) { implicit val (tl, ln, co) = (conf.tl, conf.tl.ln, conf.tl.co) - val io = Vec(endpoints.map(_ match { case t:ClientCoherenceAgent => {(new TileLinkIO).flip}; case h:MasterCoherenceAgent => {new TileLinkIO}})){ new TileLinkIO } + val io = Vec(endpoints.map(_ match { case t:ClientCoherenceAgent => {(new TileLinkIO).flip}; case h:MasterCoherenceAgent => {new TileLinkIO}})) implicit val pconf = new PhysicalNetworkConfiguration(ln.nEndpoints, ln.idBits) // Same config for all networks // Aliases for the various network IO bundle types - type FBCIO[T <: Data] = FIFOIO[PhysicalNetworkIO[T]] - type FLNIO[T <: Data] = FIFOIO[LogicalNetworkIO[T]] + type FBCIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]] + type FLNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]] type PBCIO[M <: Data, D <: Data] = PairedDataIO[PhysicalNetworkIO[M], PhysicalNetworkIO[D]] type PLNIO[M <: Data, D <: Data] = PairedDataIO[LogicalNetworkIO[M], LogicalNetworkIO[D]] type FromCrossbar[T <: Data] = FBCIO[T] => FLNIO[T] @@ -109,7 +109,7 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit // Shims for converting between logical network IOs and physical network IOs //TODO: Could be less verbose if you could override subbundles after a <> def DefaultFromCrossbarShim[T <: Data](in: FBCIO[T]): FLNIO[T] = { - val out = new FIFOIO()(new LogicalNetworkIO()(in.bits.payload.clone)).asDirectionless + val out = Decoupled(new LogicalNetworkIO()(in.bits.payload.clone)).asDirectionless out.bits.header := in.bits.header out.bits.payload := in.bits.payload out.valid := in.valid @@ -118,16 +118,16 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit } def CrossbarToMasterShim[T <: Data](in: FBCIO[T]): FLNIO[T] = { val out = DefaultFromCrossbarShim(in) - out.bits.header.src := in.bits.header.src - UFix(ln.nMasters) + out.bits.header.src := in.bits.header.src - UInt(ln.nMasters) out } def CrossbarToClientShim[T <: Data](in: FBCIO[T]): FLNIO[T] = { val out = DefaultFromCrossbarShim(in) - out.bits.header.dst := in.bits.header.dst - UFix(ln.nMasters) + out.bits.header.dst := in.bits.header.dst - UInt(ln.nMasters) out } def DefaultToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = { - val out = new FIFOIO()(new PhysicalNetworkIO()(in.bits.payload.clone)).asDirectionless + val out = Decoupled(new PhysicalNetworkIO()(in.bits.payload.clone)).asDirectionless out.bits.header := in.bits.header out.bits.payload := in.bits.payload out.valid := in.valid @@ -136,12 +136,12 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit } def MasterToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = { val out = DefaultToCrossbarShim(in) - out.bits.header.dst := in.bits.header.dst + UFix(ln.nMasters) + out.bits.header.dst := in.bits.header.dst + UInt(ln.nMasters) out } def ClientToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = { val out = DefaultToCrossbarShim(in) - out.bits.header.src := in.bits.header.src + UFix(ln.nMasters) + out.bits.header.src := in.bits.header.src + UInt(ln.nMasters) out } @@ -203,20 +203,20 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit // Actually instantiate the particular networks required for TileLink def acqHasData(acq: PhysicalNetworkIO[Acquire]) = co.messageHasData(acq.payload) - val acq_net = new PairedCrossbar(REFILL_CYCLES, acqHasData _)(new Acquire, new AcquireData) + val acq_net = Module(new PairedCrossbar(REFILL_CYCLES, acqHasData _)(new Acquire, new AcquireData)) endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, acq_net.io.in(id), acq_net.io.out(id), io.acquire) } def relHasData(rel: PhysicalNetworkIO[Release]) = co.messageHasData(rel.payload) - val rel_net = new PairedCrossbar(REFILL_CYCLES, relHasData _)(new Release, new ReleaseData) + val rel_net = Module(new PairedCrossbar(REFILL_CYCLES, relHasData _)(new Release, new ReleaseData)) endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, rel_net.io.in(id), rel_net.io.out(id), io.release) } - val probe_net = new BasicCrossbar()(new Probe) + val probe_net = Module(new BasicCrossbar()(new Probe)) endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, probe_net.io.in(id), probe_net.io.out(id), io.probe) } - val grant_net = new BasicCrossbar()(new Grant) + val grant_net = Module(new BasicCrossbar()(new Grant)) endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, grant_net.io.in(id), grant_net.io.out(id), io.grant) } - val ack_net = new BasicCrossbar()(new GrantAck) + val ack_net = Module(new BasicCrossbar()(new GrantAck)) endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedFIFOHookup(end, ack_net.io.in(id), ack_net.io.out(id), io.grant_ack) } val physicalNetworks = List(acq_net, rel_net, probe_net, grant_net, ack_net) diff --git a/uncore b/uncore index 0f675e35..295a4a5d 160000 --- a/uncore +++ b/uncore @@ -1 +1 @@ -Subproject commit 0f675e35e7503419482b12fb265ef2709a91403a +Subproject commit 295a4a5d69987d58de5edea6fad4e750100cffa3