initial attempt at upgrade
This commit is contained in:
parent
199e76fc77
commit
11e131af47
2
chisel
2
chisel
@ -1 +1 @@
|
|||||||
Subproject commit 11cb15ba9a0f7dedf43a34e0d64708facd0ea619
|
Subproject commit 97ac878580bdd9bf9d4cf05d33d64689dfc6627a
|
@ -1 +1 @@
|
|||||||
Subproject commit fc09bea89982c4d7d33b6201896aa3b40caba186
|
Subproject commit 76ac1cb932949dc33a11dd85f9bf9cbc3a639eb4
|
@ -1 +1 @@
|
|||||||
Subproject commit ee815dd3983f3b1a67fd3d810a513e23bdef97e4
|
Subproject commit 2d3caa3e269f238b1a8ccaa28f0c348dc12acf61
|
@ -9,19 +9,20 @@ import scala.collection.mutable.ArrayBuffer
|
|||||||
import scala.collection.mutable.HashMap
|
import scala.collection.mutable.HashMap
|
||||||
|
|
||||||
object DummyTopLevelConstants {
|
object DummyTopLevelConstants {
|
||||||
val NTILES = 2
|
val NTILES = 1
|
||||||
val NBANKS = 1
|
val NBANKS = 1
|
||||||
val HTIF_WIDTH = 16
|
val HTIF_WIDTH = 16
|
||||||
val ENABLE_SHARING = true
|
val ENABLE_SHARING = true
|
||||||
val ENABLE_CLEAN_EXCLUSIVE = true
|
val ENABLE_CLEAN_EXCLUSIVE = true
|
||||||
val HAS_VEC = true
|
val HAS_VEC = true
|
||||||
|
val HAS_FPU = true
|
||||||
val NL2_REL_XACTS = 1
|
val NL2_REL_XACTS = 1
|
||||||
val NL2_ACQ_XACTS = 8
|
val NL2_ACQ_XACTS = 8
|
||||||
val NMSHRS = 2
|
val NMSHRS = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
object ReferenceChipBackend {
|
object ReferenceChipBackend {
|
||||||
val initMap = new HashMap[Component, Bool]()
|
val initMap = new HashMap[Module, Bool]()
|
||||||
}
|
}
|
||||||
|
|
||||||
class ReferenceChipBackend extends VerilogBackend
|
class ReferenceChipBackend extends VerilogBackend
|
||||||
@ -35,15 +36,15 @@ class ReferenceChipBackend extends VerilogBackend
|
|||||||
(if (idx == 0) res.toString else "") + super.emitPortDef(m, idx)
|
(if (idx == 0) res.toString else "") + super.emitPortDef(m, idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
def addMemPin(c: Component) = {
|
def addMemPin(c: Module) = {
|
||||||
for (node <- Component.nodes) {
|
for (node <- Module.nodes) {
|
||||||
if (node.isInstanceOf[Mem[ _ ]] && node.component != null && node.asInstanceOf[Mem[_]].seqRead) {
|
if (node.isInstanceOf[Mem[ _ ]] && node.component != null && node.asInstanceOf[Mem[_]].seqRead) {
|
||||||
connectMemPin(c, node.component, node)
|
connectMemPin(c, node.component, node)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def connectMemPin(topC: Component, c: Component, p: Node): Unit = {
|
def connectMemPin(topC: Module, c: Module, p: Node): Unit = {
|
||||||
var isNewPin = false
|
var isNewPin = false
|
||||||
val compInitPin =
|
val compInitPin =
|
||||||
if (initMap.contains(c)) {
|
if (initMap.contains(c)) {
|
||||||
@ -64,7 +65,7 @@ class ReferenceChipBackend extends VerilogBackend
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def addTopLevelPin(c: Component) = {
|
def addTopLevelPin(c: Module) = {
|
||||||
val init = Bool(INPUT)
|
val init = Bool(INPUT)
|
||||||
init.setName("init")
|
init.setName("init")
|
||||||
init.component = c
|
init.component = c
|
||||||
@ -72,37 +73,37 @@ class ReferenceChipBackend extends VerilogBackend
|
|||||||
initMap += (c -> init)
|
initMap += (c -> init)
|
||||||
}
|
}
|
||||||
|
|
||||||
transforms += ((c: Component) => addTopLevelPin(c))
|
transforms += ((c: Module) => addTopLevelPin(c))
|
||||||
transforms += ((c: Component) => addMemPin(c))
|
transforms += ((c: Module) => addMemPin(c))
|
||||||
}
|
}
|
||||||
|
|
||||||
class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component
|
class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module
|
||||||
{
|
{
|
||||||
implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2)
|
implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2)
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip
|
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||||
val htif = (new TileLinkIO).flip
|
val htif = (new TileLinkIO).flip
|
||||||
val incoherent = Vec(ln.nClients) { Bool() }.asInput
|
val incoherent = Vec.fill(ln.nClients){Bool()}.asInput
|
||||||
val mem = new ioMem
|
val mem = new ioMem
|
||||||
val mem_backup = new ioMemSerialized(htif_width)
|
val mem_backup = new ioMemSerialized(htif_width)
|
||||||
val mem_backup_en = Bool(INPUT)
|
val mem_backup_en = Bool(INPUT)
|
||||||
}
|
}
|
||||||
|
|
||||||
val llc_tag_leaf = Mem(512, seqRead = true) { Bits(width = 152) }
|
val llc_tag_leaf = Mem(Bits(width = 152), 512, seqRead = true)
|
||||||
val llc_data_leaf = Mem(4096, seqRead = true) { Bits(width = 64) }
|
val llc_data_leaf = Mem(Bits(width = 64), 4096, seqRead = true)
|
||||||
//val llc = new DRAMSideLLC(512, 8, 4, llc_tag_leaf, llc_data_leaf)
|
val llc = Module(new DRAMSideLLC(512, 8, 4, llc_tag_leaf, llc_data_leaf))
|
||||||
val llc = new DRAMSideLLCNull(8, REFILL_CYCLES)
|
//val llc = Module(new DRAMSideLLCNull(8, REFILL_CYCLES))
|
||||||
val mem_serdes = new MemSerdes(htif_width)
|
val mem_serdes = Module(new MemSerdes(htif_width))
|
||||||
|
|
||||||
require(clientEndpoints.length == ln.nClients)
|
require(clientEndpoints.length == ln.nClients)
|
||||||
val masterEndpoints = (0 until ln.nMasters).map(new L2CoherenceAgent(_))
|
val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i)))
|
||||||
val net = new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints)
|
val net = Module(new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints))
|
||||||
net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
||||||
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
||||||
|
|
||||||
val conv = new MemIOUncachedTileLinkIOConverter(2)
|
val conv = Module(new MemIOUncachedTileLinkIOConverter(2))
|
||||||
if(ln.nMasters > 1) {
|
if(ln.nMasters > 1) {
|
||||||
val arb = new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters)
|
val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters))
|
||||||
arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache }
|
arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache }
|
||||||
conv.io.uncached <> arb.io.out
|
conv.io.uncached <> arb.io.out
|
||||||
} else {
|
} else {
|
||||||
@ -113,7 +114,7 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge
|
|||||||
conv.io.mem.resp <> llc.io.cpu.resp
|
conv.io.mem.resp <> llc.io.cpu.resp
|
||||||
|
|
||||||
// mux between main and backup memory ports
|
// mux between main and backup memory ports
|
||||||
val mem_cmdq = (new Queue(2)) { new MemReqCmd }
|
val mem_cmdq = Module(new Queue(new MemReqCmd, 2))
|
||||||
mem_cmdq.io.enq <> llc.io.mem.req_cmd
|
mem_cmdq.io.enq <> llc.io.mem.req_cmd
|
||||||
mem_cmdq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_cmd.ready, io.mem.req_cmd.ready)
|
mem_cmdq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_cmd.ready, io.mem.req_cmd.ready)
|
||||||
io.mem.req_cmd.valid := mem_cmdq.io.deq.valid && !io.mem_backup_en
|
io.mem.req_cmd.valid := mem_cmdq.io.deq.valid && !io.mem_backup_en
|
||||||
@ -121,7 +122,7 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge
|
|||||||
mem_serdes.io.wide.req_cmd.valid := mem_cmdq.io.deq.valid && io.mem_backup_en
|
mem_serdes.io.wide.req_cmd.valid := mem_cmdq.io.deq.valid && io.mem_backup_en
|
||||||
mem_serdes.io.wide.req_cmd.bits := mem_cmdq.io.deq.bits
|
mem_serdes.io.wide.req_cmd.bits := mem_cmdq.io.deq.bits
|
||||||
|
|
||||||
val mem_dataq = (new Queue(REFILL_CYCLES)) { new MemData }
|
val mem_dataq = Module(new Queue(new MemData, REFILL_CYCLES))
|
||||||
mem_dataq.io.enq <> llc.io.mem.req_data
|
mem_dataq.io.enq <> llc.io.mem.req_data
|
||||||
mem_dataq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_data.ready, io.mem.req_data.ready)
|
mem_dataq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_data.ready, io.mem.req_data.ready)
|
||||||
io.mem.req_data.valid := mem_dataq.io.deq.valid && !io.mem_backup_en
|
io.mem.req_data.valid := mem_dataq.io.deq.valid && !io.mem_backup_en
|
||||||
@ -138,21 +139,21 @@ class OuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAge
|
|||||||
|
|
||||||
case class UncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int)
|
case class UncoreConfiguration(l2: L2CoherenceAgentConfiguration, tl: TileLinkConfiguration, nTiles: Int, nBanks: Int, bankIdLsb: Int)
|
||||||
|
|
||||||
class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component
|
class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module
|
||||||
{
|
{
|
||||||
implicit val tl = conf.tl
|
implicit val tl = conf.tl
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val debug = new DebugIO()
|
val debug = new DebugIO()
|
||||||
val host = new HostIO(htif_width)
|
val host = new HostIO(htif_width)
|
||||||
val mem = new ioMem
|
val mem = new ioMem
|
||||||
val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip
|
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||||
val htif = Vec(conf.nTiles) { new HTIFIO(conf.nTiles) }.flip
|
val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip
|
||||||
val incoherent = Vec(conf.nTiles) { Bool() }.asInput
|
val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput
|
||||||
val mem_backup = new ioMemSerialized(htif_width)
|
val mem_backup = new ioMemSerialized(htif_width)
|
||||||
val mem_backup_en = Bool(INPUT)
|
val mem_backup_en = Bool(INPUT)
|
||||||
}
|
}
|
||||||
val htif = new RocketHTIF(htif_width)
|
val htif = Module(new RocketHTIF(htif_width))
|
||||||
val outmemsys = new OuterMemorySystem(htif_width, tileList :+ htif)
|
val outmemsys = Module(new OuterMemorySystem(htif_width, tileList :+ htif))
|
||||||
val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput)
|
val incoherentWithHtif = (io.incoherent :+ Bool(true).asInput)
|
||||||
outmemsys.io.incoherent := incoherentWithHtif
|
outmemsys.io.incoherent := incoherentWithHtif
|
||||||
htif.io.cpu <> io.htif
|
htif.io.cpu <> io.htif
|
||||||
@ -160,7 +161,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf
|
|||||||
outmemsys.io.mem_backup_en <> io.mem_backup_en
|
outmemsys.io.mem_backup_en <> io.mem_backup_en
|
||||||
|
|
||||||
// Add networking headers and endpoint queues
|
// Add networking headers and endpoint queues
|
||||||
def convertAddrToBank(addr: Bits): UFix = {
|
def convertAddrToBank(addr: Bits): UInt = {
|
||||||
require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")})
|
require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")})
|
||||||
addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb)
|
addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb)
|
||||||
}
|
}
|
||||||
@ -173,7 +174,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf
|
|||||||
val grant_ack_q = Queue(client.grant_ack)
|
val grant_ack_q = Queue(client.grant_ack)
|
||||||
outer.grant_ack.valid := grant_ack_q.valid
|
outer.grant_ack.valid := grant_ack_q.valid
|
||||||
outer.grant_ack.bits := grant_ack_q.bits
|
outer.grant_ack.bits := grant_ack_q.bits
|
||||||
outer.grant_ack.bits.header.src := UFix(i)
|
outer.grant_ack.bits.header.src := UInt(i)
|
||||||
grant_ack_q.ready := outer.grant_ack.ready
|
grant_ack_q.ready := outer.grant_ack.ready
|
||||||
|
|
||||||
client.grant <> Queue(outer.grant, 1, pipe = true)
|
client.grant <> Queue(outer.grant, 1, pipe = true)
|
||||||
@ -181,7 +182,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf
|
|||||||
}
|
}
|
||||||
|
|
||||||
// pad out the HTIF using a divided clock
|
// pad out the HTIF using a divided clock
|
||||||
val hio = (new SlowIO(512)) { Bits(width = htif_width+1) }
|
val hio = Module((new SlowIO(512)) { Bits(width = htif_width+1) })
|
||||||
hio.io.set_divisor.valid := htif.io.scr.wen && htif.io.scr.waddr === 63
|
hio.io.set_divisor.valid := htif.io.scr.wen && htif.io.scr.waddr === 63
|
||||||
hio.io.set_divisor.bits := htif.io.scr.wdata
|
hio.io.set_divisor.bits := htif.io.scr.wdata
|
||||||
htif.io.scr.rdata(63) := hio.io.divisor
|
htif.io.scr.rdata(63) := hio.io.divisor
|
||||||
@ -205,7 +206,7 @@ class Uncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf
|
|||||||
htif.io.host.in.bits := hio.io.in_fast.bits
|
htif.io.host.in.bits := hio.io.in_fast.bits
|
||||||
hio.io.in_fast.ready := Mux(hio.io.in_fast.bits(htif_width), Bool(true), htif.io.host.in.ready)
|
hio.io.in_fast.ready := Mux(hio.io.in_fast.bits(htif_width), Bool(true), htif.io.host.in.ready)
|
||||||
io.host.clk := hio.io.clk_slow
|
io.host.clk := hio.io.clk_slow
|
||||||
io.host.clk_edge := Reg(io.host.clk && !Reg(io.host.clk))
|
io.host.clk_edge := RegUpdate(io.host.clk && !RegUpdate(io.host.clk))
|
||||||
}
|
}
|
||||||
|
|
||||||
class TopIO(htifWidth: Int) extends Bundle {
|
class TopIO(htifWidth: Int) extends Bundle {
|
||||||
@ -224,14 +225,14 @@ class VLSITopIO(htifWidth: Int) extends TopIO(htifWidth) {
|
|||||||
|
|
||||||
import DummyTopLevelConstants._
|
import DummyTopLevelConstants._
|
||||||
|
|
||||||
class MemDessert extends Component {
|
class MemDessert extends Module {
|
||||||
val io = new MemDesserIO(HTIF_WIDTH)
|
val io = new MemDesserIO(HTIF_WIDTH)
|
||||||
val x = new MemDesser(HTIF_WIDTH)
|
val x = Module(new MemDesser(HTIF_WIDTH))
|
||||||
io.narrow <> x.io.narrow
|
io.narrow <> x.io.narrow
|
||||||
io.wide <> x.io.wide
|
io.wide <> x.io.wide
|
||||||
}
|
}
|
||||||
|
|
||||||
class Top extends Component {
|
class Top extends Module {
|
||||||
val co = if(ENABLE_SHARING) {
|
val co = if(ENABLE_SHARING) {
|
||||||
if(ENABLE_CLEAN_EXCLUSIVE) new MESICoherence
|
if(ENABLE_CLEAN_EXCLUSIVE) new MESICoherence
|
||||||
else new MSICoherence
|
else new MSICoherence
|
||||||
@ -249,13 +250,13 @@ class Top extends Component {
|
|||||||
val dc = DCacheConfig(128, 4, ntlb = 8,
|
val dc = DCacheConfig(128, 4, ntlb = 8,
|
||||||
nmshr = NMSHRS, nrpq = 16, nsdq = 17, states = co.nClientStates)
|
nmshr = NMSHRS, nrpq = 16, nsdq = 17, states = co.nClientStates)
|
||||||
val rc = RocketConfiguration(tl, ic, dc,
|
val rc = RocketConfiguration(tl, ic, dc,
|
||||||
fpu = true, vec = HAS_VEC)
|
fpu = HAS_FPU, vec = HAS_VEC)
|
||||||
|
|
||||||
val io = new VLSITopIO(HTIF_WIDTH)
|
val io = new VLSITopIO(HTIF_WIDTH)
|
||||||
|
|
||||||
val resetSigs = Vec(uc.nTiles){Bool()}
|
val resetSigs = Vec.fill(uc.nTiles){Bool()}
|
||||||
val tileList = (0 until uc.nTiles).map(r => new Tile(resetSignal = resetSigs(r))(rc))
|
val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc)))
|
||||||
val uncore = new Uncore(HTIF_WIDTH, tileList)
|
val uncore = Module(new Uncore(HTIF_WIDTH, tileList))
|
||||||
|
|
||||||
var error_mode = Bool(false)
|
var error_mode = Bool(false)
|
||||||
for (i <- 0 until uc.nTiles) {
|
for (i <- 0 until uc.nTiles) {
|
||||||
@ -267,12 +268,12 @@ class Top extends Component {
|
|||||||
val tile = tileList(i)
|
val tile = tileList(i)
|
||||||
tile.io.tilelink <> tl
|
tile.io.tilelink <> tl
|
||||||
il := hl.reset
|
il := hl.reset
|
||||||
tile.io.host.reset := Reg(Reg(hl.reset))
|
tile.io.host.reset := RegUpdate(RegUpdate(hl.reset))
|
||||||
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
||||||
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
||||||
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
||||||
tile.io.host.ipi_rep <> Queue(hl.ipi_rep)
|
tile.io.host.ipi_rep <> Queue(hl.ipi_rep)
|
||||||
error_mode = error_mode || Reg(tile.io.host.debug.error_mode)
|
error_mode = error_mode || RegUpdate(tile.io.host.debug.error_mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
io.host <> uncore.io.host
|
io.host <> uncore.io.host
|
||||||
|
@ -5,26 +5,26 @@ import Node._
|
|||||||
import uncore._
|
import uncore._
|
||||||
import rocket._
|
import rocket._
|
||||||
|
|
||||||
class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component
|
class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module
|
||||||
{
|
{
|
||||||
implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2)
|
implicit val (tl, ln, l2) = (conf.tl, conf.tl.ln, conf.l2)
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip
|
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||||
val htif = (new TileLinkIO).flip
|
val htif = (new TileLinkIO).flip
|
||||||
val incoherent = Vec(ln.nClients) { Bool() }.asInput
|
val incoherent = Vec.fill(ln.nClients){Bool()}.asInput
|
||||||
val mem = new ioMem
|
val mem = new ioMem
|
||||||
}
|
}
|
||||||
|
|
||||||
require(clientEndpoints.length == ln.nClients)
|
require(clientEndpoints.length == ln.nClients)
|
||||||
val masterEndpoints = (0 until ln.nMasters).map(new L2CoherenceAgent(_))
|
val masterEndpoints = (0 until ln.nMasters).map(i => Module(new L2CoherenceAgent(i)))
|
||||||
|
|
||||||
val net = new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints)
|
val net = Module(new ReferenceChipCrossbarNetwork(masterEndpoints++clientEndpoints))
|
||||||
net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
net.io zip (masterEndpoints.map(_.io.client) ++ io.tiles :+ io.htif) map { case (net, end) => net <> end }
|
||||||
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
masterEndpoints.map{ _.io.incoherent zip io.incoherent map { case (m, c) => m := c } }
|
||||||
|
|
||||||
val conv = new MemIOUncachedTileLinkIOConverter(2)
|
val conv = Module(new MemIOUncachedTileLinkIOConverter(2))
|
||||||
if(ln.nMasters > 1) {
|
if(ln.nMasters > 1) {
|
||||||
val arb = new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters)
|
val arb = Module(new UncachedTileLinkIOArbiterThatAppendsArbiterId(ln.nMasters))
|
||||||
arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache }
|
arb.io.in zip masterEndpoints.map(_.io.master) map { case (arb, cache) => arb <> cache }
|
||||||
conv.io.uncached <> arb.io.out
|
conv.io.uncached <> arb.io.out
|
||||||
} else {
|
} else {
|
||||||
@ -35,24 +35,24 @@ class FPGAOuterMemorySystem(htif_width: Int, clientEndpoints: Seq[ClientCoherenc
|
|||||||
conv.io.mem.resp <> Queue(io.mem.resp)
|
conv.io.mem.resp <> Queue(io.mem.resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Component
|
class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit conf: UncoreConfiguration) extends Module
|
||||||
{
|
{
|
||||||
implicit val (tl, ln) = (conf.tl, conf.tl.ln)
|
implicit val (tl, ln) = (conf.tl, conf.tl.ln)
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val debug = new DebugIO()
|
val debug = new DebugIO()
|
||||||
val host = new HostIO(htif_width)
|
val host = new HostIO(htif_width)
|
||||||
val mem = new ioMem
|
val mem = new ioMem
|
||||||
val tiles = Vec(conf.nTiles) { new TileLinkIO }.flip
|
val tiles = Vec.fill(conf.nTiles){new TileLinkIO}.flip
|
||||||
val htif = Vec(conf.nTiles) { new HTIFIO(conf.nTiles) }.flip
|
val htif = Vec.fill(conf.nTiles){new HTIFIO(conf.nTiles)}.flip
|
||||||
val incoherent = Vec(conf.nTiles) { Bool() }.asInput
|
val incoherent = Vec.fill(conf.nTiles){Bool()}.asInput
|
||||||
}
|
}
|
||||||
val htif = new RocketHTIF(htif_width)
|
val htif = Module(new RocketHTIF(htif_width))
|
||||||
val outmemsys = new FPGAOuterMemorySystem(htif_width, tileList :+ htif)
|
val outmemsys = Module(new FPGAOuterMemorySystem(htif_width, tileList :+ htif))
|
||||||
htif.io.cpu <> io.htif
|
htif.io.cpu <> io.htif
|
||||||
outmemsys.io.mem <> io.mem
|
outmemsys.io.mem <> io.mem
|
||||||
|
|
||||||
// Add networking headers and endpoint queues
|
// Add networking headers and endpoint queues
|
||||||
def convertAddrToBank(addr: Bits): UFix = {
|
def convertAddrToBank(addr: Bits): UInt = {
|
||||||
require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")})
|
require(conf.bankIdLsb + log2Up(conf.nBanks) < MEM_ADDR_BITS, {println("Invalid bits for bank multiplexing.")})
|
||||||
addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb)
|
addr(conf.bankIdLsb + log2Up(conf.nBanks) - 1, conf.bankIdLsb)
|
||||||
}
|
}
|
||||||
@ -66,7 +66,7 @@ class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit
|
|||||||
val grant_ack_q = Queue(client.grant_ack)
|
val grant_ack_q = Queue(client.grant_ack)
|
||||||
outer.grant_ack.valid := grant_ack_q.valid
|
outer.grant_ack.valid := grant_ack_q.valid
|
||||||
outer.grant_ack.bits := grant_ack_q.bits
|
outer.grant_ack.bits := grant_ack_q.bits
|
||||||
outer.grant_ack.bits.header.src := UFix(i)
|
outer.grant_ack.bits.header.src := UInt(i)
|
||||||
grant_ack_q.ready := outer.grant_ack.ready
|
grant_ack_q.ready := outer.grant_ack.ready
|
||||||
|
|
||||||
client.grant <> Queue(outer.grant, 1, pipe = true)
|
client.grant <> Queue(outer.grant, 1, pipe = true)
|
||||||
@ -79,7 +79,7 @@ class FPGAUncore(htif_width: Int, tileList: Seq[ClientCoherenceAgent])(implicit
|
|||||||
|
|
||||||
class FPGATopIO(htifWidth: Int) extends TopIO(htifWidth)
|
class FPGATopIO(htifWidth: Int) extends TopIO(htifWidth)
|
||||||
|
|
||||||
class FPGATop extends Component {
|
class FPGATop extends Module {
|
||||||
val htif_width = 16
|
val htif_width = 16
|
||||||
val co = new MESICoherence
|
val co = new MESICoherence
|
||||||
val ntiles = 1
|
val ntiles = 1
|
||||||
@ -98,9 +98,9 @@ class FPGATop extends Component {
|
|||||||
|
|
||||||
val io = new FPGATopIO(htif_width)
|
val io = new FPGATopIO(htif_width)
|
||||||
|
|
||||||
val resetSigs = Vec(uc.nTiles){Bool()}
|
val resetSigs = Vec.fill(uc.nTiles){Bool()}
|
||||||
val tileList = (0 until uc.nTiles).map(r => new Tile(resetSignal = resetSigs(r))(rc))
|
val tileList = (0 until uc.nTiles).map(r => Module(new Tile(resetSignal = resetSigs(r))(rc)))
|
||||||
val uncore = new FPGAUncore(htif_width, tileList)
|
val uncore = Module(new FPGAUncore(htif_width, tileList))
|
||||||
|
|
||||||
io.debug.error_mode := Bool(false)
|
io.debug.error_mode := Bool(false)
|
||||||
for (i <- 0 until uc.nTiles) {
|
for (i <- 0 until uc.nTiles) {
|
||||||
@ -113,7 +113,7 @@ class FPGATop extends Component {
|
|||||||
|
|
||||||
tile.io.tilelink <> tl
|
tile.io.tilelink <> tl
|
||||||
il := hl.reset
|
il := hl.reset
|
||||||
tile.io.host.reset := Reg(Reg(hl.reset))
|
tile.io.host.reset := RegUpdate(RegUpdate(hl.reset))
|
||||||
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
tile.io.host.pcr_req <> Queue(hl.pcr_req)
|
||||||
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
hl.pcr_rep <> Queue(tile.io.host.pcr_rep)
|
||||||
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
hl.ipi_req <> Queue(tile.io.host.ipi_req)
|
||||||
@ -126,12 +126,12 @@ class FPGATop extends Component {
|
|||||||
io.mem <> uncore.io.mem
|
io.mem <> uncore.io.mem
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract class AXISlave extends Component {
|
abstract class AXISlave extends Module {
|
||||||
val aw = 5
|
val aw = 5
|
||||||
val dw = 32
|
val dw = 32
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val in = new FIFOIO()(Bits(width = dw)).flip
|
val in = Decoupled(Bits(width = dw)).flip
|
||||||
val out = new FIFOIO()(Bits(width = dw))
|
val out = Decoupled(Bits(width = dw))
|
||||||
val addr = Bits(INPUT, aw)
|
val addr = Bits(INPUT, aw)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -144,11 +144,11 @@ class Slave extends AXISlave
|
|||||||
val htifw = top.io.host.in.bits.getWidth
|
val htifw = top.io.host.in.bits.getWidth
|
||||||
|
|
||||||
val n = 4 // htif, mem req/read data, mem write data, error mode
|
val n = 4 // htif, mem req/read data, mem write data, error mode
|
||||||
def wen(i: Int) = io.in.valid && io.addr(log2Up(n)-1,0) === UFix(i)
|
def wen(i: Int) = io.in.valid && io.addr(log2Up(n)-1,0) === UInt(i)
|
||||||
def ren(i: Int) = io.out.ready && io.addr(log2Up(n)-1,0) === UFix(i)
|
def ren(i: Int) = io.out.ready && io.addr(log2Up(n)-1,0) === UInt(i)
|
||||||
val rdata = Vec(n){Bits(width = dw)}
|
val rdata = Vec.fill(n){Bits(width = dw)}
|
||||||
val rvalid = Vec(n){Bool()}
|
val rvalid = Vec.fill(n){Bool()}
|
||||||
val wready = Vec(n){Bool()}
|
val wready = Vec.fill(n){Bool()}
|
||||||
|
|
||||||
io.in.ready := wready(io.addr)
|
io.in.ready := wready(io.addr)
|
||||||
io.out.valid := rvalid(io.addr)
|
io.out.valid := rvalid(io.addr)
|
||||||
@ -167,7 +167,7 @@ class Slave extends AXISlave
|
|||||||
|
|
||||||
// read cr1 -> mem.req_cmd (nonblocking)
|
// read cr1 -> mem.req_cmd (nonblocking)
|
||||||
// the memory system is FIFO from hereon out, so just remember the tags here
|
// the memory system is FIFO from hereon out, so just remember the tags here
|
||||||
val tagq = new Queue(4)(top.io.mem.req_cmd.bits.tag.clone)
|
val tagq = Module(new Queue(top.io.mem.req_cmd.bits.tag, 4))
|
||||||
tagq.io.enq.bits := top.io.mem.req_cmd.bits.tag
|
tagq.io.enq.bits := top.io.mem.req_cmd.bits.tag
|
||||||
tagq.io.enq.valid := ren(1) && top.io.mem.req_cmd.valid && !top.io.mem.req_cmd.bits.rw
|
tagq.io.enq.valid := ren(1) && top.io.mem.req_cmd.valid && !top.io.mem.req_cmd.bits.rw
|
||||||
top.io.mem.req_cmd.ready := ren(1)
|
top.io.mem.req_cmd.ready := ren(1)
|
||||||
@ -176,29 +176,29 @@ class Slave extends AXISlave
|
|||||||
require(dw >= top.io.mem.req_cmd.bits.addr.getWidth + 1 + 1)
|
require(dw >= top.io.mem.req_cmd.bits.addr.getWidth + 1 + 1)
|
||||||
|
|
||||||
// write cr1 -> mem.resp (nonblocking)
|
// write cr1 -> mem.resp (nonblocking)
|
||||||
val in_count = Reg(resetVal = UFix(0, log2Up(memw/dw)))
|
val in_count = RegReset(UInt(0, log2Up(memw/dw)))
|
||||||
val rf_count = Reg(resetVal = UFix(0, log2Up(REFILL_CYCLES)))
|
val rf_count = RegReset(UInt(0, log2Up(REFILL_CYCLES)))
|
||||||
require(memw % dw == 0 && isPow2(memw/dw))
|
require(memw % dw == 0 && isPow2(memw/dw))
|
||||||
val in_reg = Reg{top.io.mem.resp.bits.data.clone}
|
val in_reg = Reg(top.io.mem.resp.bits.data)
|
||||||
top.io.mem.resp.bits.data := Cat(io.in.bits, in_reg(in_reg.getWidth-1,dw))
|
top.io.mem.resp.bits.data := Cat(io.in.bits, in_reg(in_reg.getWidth-1,dw))
|
||||||
top.io.mem.resp.bits.tag := tagq.io.deq.bits
|
top.io.mem.resp.bits.tag := tagq.io.deq.bits
|
||||||
top.io.mem.resp.valid := wen(1) && in_count.andR
|
top.io.mem.resp.valid := wen(1) && in_count.andR
|
||||||
tagq.io.deq.ready := top.io.mem.resp.fire() && rf_count.andR
|
tagq.io.deq.ready := top.io.mem.resp.fire() && rf_count.andR
|
||||||
wready(1) := top.io.mem.resp.ready
|
wready(1) := top.io.mem.resp.ready
|
||||||
when (wen(1) && wready(1)) {
|
when (wen(1) && wready(1)) {
|
||||||
in_count := in_count + UFix(1)
|
in_count := in_count + UInt(1)
|
||||||
in_reg := top.io.mem.resp.bits.data
|
in_reg := top.io.mem.resp.bits.data
|
||||||
}
|
}
|
||||||
when (top.io.mem.resp.fire()) {
|
when (top.io.mem.resp.fire()) {
|
||||||
rf_count := rf_count + UFix(1)
|
rf_count := rf_count + UInt(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// read cr2 -> mem.req_data (blocking)
|
// read cr2 -> mem.req_data (blocking)
|
||||||
val out_count = Reg(resetVal = UFix(0, log2Up(memw/dw)))
|
val out_count = RegReset(UInt(0, log2Up(memw/dw)))
|
||||||
top.io.mem.req_data.ready := ren(2) && out_count.andR
|
top.io.mem.req_data.ready := ren(2) && out_count.andR
|
||||||
rdata(2) := top.io.mem.req_data.bits.data >> (out_count * UFix(dw))
|
rdata(2) := top.io.mem.req_data.bits.data >> (out_count * UInt(dw))
|
||||||
rvalid(2) := top.io.mem.req_data.valid
|
rvalid(2) := top.io.mem.req_data.valid
|
||||||
when (ren(2) && rvalid(2)) { out_count := out_count + UFix(1) }
|
when (ren(2) && rvalid(2)) { out_count := out_count + UInt(1) }
|
||||||
|
|
||||||
// read cr3 -> error mode (nonblocking)
|
// read cr3 -> error mode (nonblocking)
|
||||||
rdata(3) := Cat(top.io.mem.req_cmd.valid, tagq.io.enq.ready, top.io.debug.error_mode)
|
rdata(3) := Cat(top.io.mem.req_cmd.valid, tagq.io.enq.ready, top.io.debug.error_mode)
|
||||||
|
@ -5,19 +5,19 @@ import uncore._
|
|||||||
import scala.reflect._
|
import scala.reflect._
|
||||||
|
|
||||||
object TileLinkHeaderAppender {
|
object TileLinkHeaderAppender {
|
||||||
def apply[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](in: ClientSourcedDataIO[LogicalNetworkIO[T],LogicalNetworkIO[U]], clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(implicit conf: TileLinkConfiguration) = {
|
def apply[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](in: ClientSourcedDataIO[LogicalNetworkIO[T],LogicalNetworkIO[U]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = {
|
||||||
val shim = new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.meta.bits.payload.clone, in.data.bits.payload.clone)
|
val shim = Module(new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.meta.bits.payload.clone, in.data.bits.payload.clone))
|
||||||
shim.io.in <> in
|
shim.io.in <> in
|
||||||
shim.io.out
|
shim.io.out
|
||||||
}
|
}
|
||||||
def apply[T <: SourcedMessage with HasPhysicalAddress](in: ClientSourcedFIFOIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(implicit conf: TileLinkConfiguration) = {
|
def apply[T <: SourcedMessage with HasPhysicalAddress](in: ClientSourcedFIFOIO[LogicalNetworkIO[T]], clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(implicit conf: TileLinkConfiguration) = {
|
||||||
val shim = new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.bits.payload.clone, new AcquireData)
|
val shim = Module(new TileLinkHeaderAppender(clientId, nBanks, addrConvert)(in.bits.payload.clone, new AcquireData))
|
||||||
shim.io.in.meta <> in
|
shim.io.in.meta <> in
|
||||||
shim.io.out.meta
|
shim.io.out.meta
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](clientId: Int, nBanks: Int, addrConvert: Bits => UFix)(metadata: => T, data: => U)(implicit conf: TileLinkConfiguration) extends Component {
|
class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: SourcedMessage with HasTileLinkData](clientId: Int, nBanks: Int, addrConvert: Bits => UInt)(metadata: => T, data: => U)(implicit conf: TileLinkConfiguration) extends Module {
|
||||||
implicit val ln = conf.ln
|
implicit val ln = conf.ln
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val in = new ClientSourcedDataIO()((new LogicalNetworkIO){ metadata }, (new LogicalNetworkIO){ data }).flip
|
val in = new ClientSourcedDataIO()((new LogicalNetworkIO){ metadata }, (new LogicalNetworkIO){ data }).flip
|
||||||
@ -28,26 +28,26 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S
|
|||||||
val data_q = Queue(io.in.data)
|
val data_q = Queue(io.in.data)
|
||||||
if(nBanks == 1) {
|
if(nBanks == 1) {
|
||||||
io.out.meta.bits.payload := meta_q.bits.payload
|
io.out.meta.bits.payload := meta_q.bits.payload
|
||||||
io.out.meta.bits.header.src := UFix(clientId)
|
io.out.meta.bits.header.src := UInt(clientId)
|
||||||
io.out.meta.bits.header.dst := UFix(0)
|
io.out.meta.bits.header.dst := UInt(0)
|
||||||
io.out.meta.valid := meta_q.valid
|
io.out.meta.valid := meta_q.valid
|
||||||
meta_q.ready := io.out.meta.ready
|
meta_q.ready := io.out.meta.ready
|
||||||
io.out.data.bits.payload := data_q.bits.payload
|
io.out.data.bits.payload := data_q.bits.payload
|
||||||
io.out.data.bits.header.src := UFix(clientId)
|
io.out.data.bits.header.src := UInt(clientId)
|
||||||
io.out.data.bits.header.dst := UFix(0)
|
io.out.data.bits.header.dst := UInt(0)
|
||||||
io.out.data.valid := data_q.valid
|
io.out.data.valid := data_q.valid
|
||||||
data_q.ready := io.out.data.ready
|
data_q.ready := io.out.data.ready
|
||||||
} else {
|
} else {
|
||||||
val meta_has_data = conf.co.messageHasData(meta_q.bits.payload)
|
val meta_has_data = conf.co.messageHasData(meta_q.bits.payload)
|
||||||
val addr_q = (new Queue(2, pipe = true, flow = true)){io.in.meta.bits.payload.addr.clone}
|
val addr_q = Module(new Queue(io.in.meta.bits.payload.addr.clone, 2, pipe = true, flow = true))
|
||||||
val data_cnt = Reg(resetVal = UFix(0, width = log2Up(REFILL_CYCLES)))
|
val data_cnt = RegReset(UInt(0, width = log2Up(REFILL_CYCLES)))
|
||||||
val data_cnt_up = data_cnt + UFix(1)
|
val data_cnt_up = data_cnt + UInt(1)
|
||||||
|
|
||||||
io.out.meta.bits.payload := meta_q.bits.payload
|
io.out.meta.bits.payload := meta_q.bits.payload
|
||||||
io.out.meta.bits.header.src := UFix(clientId)
|
io.out.meta.bits.header.src := UInt(clientId)
|
||||||
io.out.meta.bits.header.dst := addrConvert(meta_q.bits.payload.addr)
|
io.out.meta.bits.header.dst := addrConvert(meta_q.bits.payload.addr)
|
||||||
io.out.data.bits.payload := meta_q.bits.payload
|
io.out.data.bits.payload := meta_q.bits.payload
|
||||||
io.out.data.bits.header.src := UFix(clientId)
|
io.out.data.bits.header.src := UInt(clientId)
|
||||||
io.out.data.bits.header.dst := addrConvert(addr_q.io.deq.bits)
|
io.out.data.bits.header.dst := addrConvert(addr_q.io.deq.bits)
|
||||||
addr_q.io.enq.bits := meta_q.bits.payload.addr
|
addr_q.io.enq.bits := meta_q.bits.payload.addr
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S
|
|||||||
|
|
||||||
when(data_q.valid && data_q.ready) {
|
when(data_q.valid && data_q.ready) {
|
||||||
data_cnt := data_cnt_up
|
data_cnt := data_cnt_up
|
||||||
when(data_cnt_up === UFix(0)) {
|
when(data_cnt_up === UInt(0)) {
|
||||||
addr_q.io.deq.ready := Bool(true)
|
addr_q.io.deq.ready := Bool(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -68,16 +68,16 @@ class TileLinkHeaderAppender[T <: SourcedMessage with HasPhysicalAddress, U <: S
|
|||||||
}
|
}
|
||||||
|
|
||||||
//Adapter betweewn an UncachedTileLinkIO and a mem controller MemIO
|
//Adapter betweewn an UncachedTileLinkIO and a mem controller MemIO
|
||||||
class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfiguration) extends Component {
|
class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfiguration) extends Module {
|
||||||
val io = new Bundle {
|
val io = new Bundle {
|
||||||
val uncached = new UncachedTileLinkIO().flip
|
val uncached = new UncachedTileLinkIO().flip
|
||||||
val mem = new ioMem
|
val mem = new ioMem
|
||||||
}
|
}
|
||||||
val mem_cmd_q = (new Queue(qDepth)){new MemReqCmd}
|
val mem_cmd_q = Module(new Queue(new MemReqCmd, qDepth))
|
||||||
val mem_data_q = (new Queue(qDepth)){new MemData}
|
val mem_data_q = Module(new Queue(new MemData, qDepth))
|
||||||
mem_cmd_q.io.enq.valid := io.uncached.acquire.meta.valid
|
mem_cmd_q.io.enq.valid := io.uncached.acquire.meta.valid
|
||||||
io.uncached.acquire.meta.ready := mem_cmd_q.io.enq.ready
|
io.uncached.acquire.meta.ready := mem_cmd_q.io.enq.ready
|
||||||
mem_cmd_q.io.enq.bits.rw := conf.co.needsOuterWrite(io.uncached.acquire.meta.bits.payload.a_type, UFix(0))
|
mem_cmd_q.io.enq.bits.rw := conf.co.needsOuterWrite(io.uncached.acquire.meta.bits.payload.a_type, UInt(0))
|
||||||
mem_cmd_q.io.enq.bits.tag := io.uncached.acquire.meta.bits.payload.client_xact_id
|
mem_cmd_q.io.enq.bits.tag := io.uncached.acquire.meta.bits.payload.client_xact_id
|
||||||
mem_cmd_q.io.enq.bits.addr := io.uncached.acquire.meta.bits.payload.addr
|
mem_cmd_q.io.enq.bits.addr := io.uncached.acquire.meta.bits.payload.addr
|
||||||
mem_data_q.io.enq.valid := io.uncached.acquire.data.valid
|
mem_data_q.io.enq.valid := io.uncached.acquire.data.valid
|
||||||
@ -87,20 +87,20 @@ class MemIOUncachedTileLinkIOConverter(qDepth: Int)(implicit conf: TileLinkConfi
|
|||||||
io.mem.resp.ready := io.uncached.grant.ready
|
io.mem.resp.ready := io.uncached.grant.ready
|
||||||
io.uncached.grant.bits.payload.data := io.mem.resp.bits.data
|
io.uncached.grant.bits.payload.data := io.mem.resp.bits.data
|
||||||
io.uncached.grant.bits.payload.client_xact_id := io.mem.resp.bits.tag
|
io.uncached.grant.bits.payload.client_xact_id := io.mem.resp.bits.tag
|
||||||
io.uncached.grant.bits.payload.master_xact_id := UFix(0) // DNC
|
io.uncached.grant.bits.payload.master_xact_id := UInt(0) // DNC
|
||||||
io.uncached.grant.bits.payload.g_type := UFix(0) // DNC
|
io.uncached.grant.bits.payload.g_type := UInt(0) // DNC
|
||||||
io.mem.req_cmd <> mem_cmd_q.io.deq
|
io.mem.req_cmd <> mem_cmd_q.io.deq
|
||||||
io.mem.req_data <> mem_data_q.io.deq
|
io.mem.req_data <> mem_data_q.io.deq
|
||||||
}
|
}
|
||||||
|
|
||||||
class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit conf: UncoreConfiguration) extends LogicalNetwork[TileLinkIO](endpoints)(conf.tl.ln) {
|
class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit conf: UncoreConfiguration) extends LogicalNetwork[TileLinkIO](endpoints)(conf.tl.ln) {
|
||||||
implicit val (tl, ln, co) = (conf.tl, conf.tl.ln, conf.tl.co)
|
implicit val (tl, ln, co) = (conf.tl, conf.tl.ln, conf.tl.co)
|
||||||
val io = Vec(endpoints.map(_ match { case t:ClientCoherenceAgent => {(new TileLinkIO).flip}; case h:MasterCoherenceAgent => {new TileLinkIO}})){ new TileLinkIO }
|
val io = Vec(endpoints.map(_ match { case t:ClientCoherenceAgent => {(new TileLinkIO).flip}; case h:MasterCoherenceAgent => {new TileLinkIO}}))
|
||||||
implicit val pconf = new PhysicalNetworkConfiguration(ln.nEndpoints, ln.idBits) // Same config for all networks
|
implicit val pconf = new PhysicalNetworkConfiguration(ln.nEndpoints, ln.idBits) // Same config for all networks
|
||||||
|
|
||||||
// Aliases for the various network IO bundle types
|
// Aliases for the various network IO bundle types
|
||||||
type FBCIO[T <: Data] = FIFOIO[PhysicalNetworkIO[T]]
|
type FBCIO[T <: Data] = DecoupledIO[PhysicalNetworkIO[T]]
|
||||||
type FLNIO[T <: Data] = FIFOIO[LogicalNetworkIO[T]]
|
type FLNIO[T <: Data] = DecoupledIO[LogicalNetworkIO[T]]
|
||||||
type PBCIO[M <: Data, D <: Data] = PairedDataIO[PhysicalNetworkIO[M], PhysicalNetworkIO[D]]
|
type PBCIO[M <: Data, D <: Data] = PairedDataIO[PhysicalNetworkIO[M], PhysicalNetworkIO[D]]
|
||||||
type PLNIO[M <: Data, D <: Data] = PairedDataIO[LogicalNetworkIO[M], LogicalNetworkIO[D]]
|
type PLNIO[M <: Data, D <: Data] = PairedDataIO[LogicalNetworkIO[M], LogicalNetworkIO[D]]
|
||||||
type FromCrossbar[T <: Data] = FBCIO[T] => FLNIO[T]
|
type FromCrossbar[T <: Data] = FBCIO[T] => FLNIO[T]
|
||||||
@ -109,7 +109,7 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit
|
|||||||
// Shims for converting between logical network IOs and physical network IOs
|
// Shims for converting between logical network IOs and physical network IOs
|
||||||
//TODO: Could be less verbose if you could override subbundles after a <>
|
//TODO: Could be less verbose if you could override subbundles after a <>
|
||||||
def DefaultFromCrossbarShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
def DefaultFromCrossbarShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||||
val out = new FIFOIO()(new LogicalNetworkIO()(in.bits.payload.clone)).asDirectionless
|
val out = Decoupled(new LogicalNetworkIO()(in.bits.payload.clone)).asDirectionless
|
||||||
out.bits.header := in.bits.header
|
out.bits.header := in.bits.header
|
||||||
out.bits.payload := in.bits.payload
|
out.bits.payload := in.bits.payload
|
||||||
out.valid := in.valid
|
out.valid := in.valid
|
||||||
@ -118,16 +118,16 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit
|
|||||||
}
|
}
|
||||||
def CrossbarToMasterShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
def CrossbarToMasterShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||||
val out = DefaultFromCrossbarShim(in)
|
val out = DefaultFromCrossbarShim(in)
|
||||||
out.bits.header.src := in.bits.header.src - UFix(ln.nMasters)
|
out.bits.header.src := in.bits.header.src - UInt(ln.nMasters)
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
def CrossbarToClientShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
def CrossbarToClientShim[T <: Data](in: FBCIO[T]): FLNIO[T] = {
|
||||||
val out = DefaultFromCrossbarShim(in)
|
val out = DefaultFromCrossbarShim(in)
|
||||||
out.bits.header.dst := in.bits.header.dst - UFix(ln.nMasters)
|
out.bits.header.dst := in.bits.header.dst - UInt(ln.nMasters)
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
def DefaultToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
def DefaultToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||||
val out = new FIFOIO()(new PhysicalNetworkIO()(in.bits.payload.clone)).asDirectionless
|
val out = Decoupled(new PhysicalNetworkIO()(in.bits.payload.clone)).asDirectionless
|
||||||
out.bits.header := in.bits.header
|
out.bits.header := in.bits.header
|
||||||
out.bits.payload := in.bits.payload
|
out.bits.payload := in.bits.payload
|
||||||
out.valid := in.valid
|
out.valid := in.valid
|
||||||
@ -136,12 +136,12 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit
|
|||||||
}
|
}
|
||||||
def MasterToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
def MasterToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||||
val out = DefaultToCrossbarShim(in)
|
val out = DefaultToCrossbarShim(in)
|
||||||
out.bits.header.dst := in.bits.header.dst + UFix(ln.nMasters)
|
out.bits.header.dst := in.bits.header.dst + UInt(ln.nMasters)
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
def ClientToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
def ClientToCrossbarShim[T <: Data](in: FLNIO[T]): FBCIO[T] = {
|
||||||
val out = DefaultToCrossbarShim(in)
|
val out = DefaultToCrossbarShim(in)
|
||||||
out.bits.header.src := in.bits.header.src + UFix(ln.nMasters)
|
out.bits.header.src := in.bits.header.src + UInt(ln.nMasters)
|
||||||
out
|
out
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,20 +203,20 @@ class ReferenceChipCrossbarNetwork(endpoints: Seq[CoherenceAgentRole])(implicit
|
|||||||
|
|
||||||
// Actually instantiate the particular networks required for TileLink
|
// Actually instantiate the particular networks required for TileLink
|
||||||
def acqHasData(acq: PhysicalNetworkIO[Acquire]) = co.messageHasData(acq.payload)
|
def acqHasData(acq: PhysicalNetworkIO[Acquire]) = co.messageHasData(acq.payload)
|
||||||
val acq_net = new PairedCrossbar(REFILL_CYCLES, acqHasData _)(new Acquire, new AcquireData)
|
val acq_net = Module(new PairedCrossbar(REFILL_CYCLES, acqHasData _)(new Acquire, new AcquireData))
|
||||||
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, acq_net.io.in(id), acq_net.io.out(id), io.acquire) }
|
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, acq_net.io.in(id), acq_net.io.out(id), io.acquire) }
|
||||||
|
|
||||||
def relHasData(rel: PhysicalNetworkIO[Release]) = co.messageHasData(rel.payload)
|
def relHasData(rel: PhysicalNetworkIO[Release]) = co.messageHasData(rel.payload)
|
||||||
val rel_net = new PairedCrossbar(REFILL_CYCLES, relHasData _)(new Release, new ReleaseData)
|
val rel_net = Module(new PairedCrossbar(REFILL_CYCLES, relHasData _)(new Release, new ReleaseData))
|
||||||
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, rel_net.io.in(id), rel_net.io.out(id), io.release) }
|
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedPairedHookup(end, rel_net.io.in(id), rel_net.io.out(id), io.release) }
|
||||||
|
|
||||||
val probe_net = new BasicCrossbar()(new Probe)
|
val probe_net = Module(new BasicCrossbar()(new Probe))
|
||||||
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, probe_net.io.in(id), probe_net.io.out(id), io.probe) }
|
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, probe_net.io.in(id), probe_net.io.out(id), io.probe) }
|
||||||
|
|
||||||
val grant_net = new BasicCrossbar()(new Grant)
|
val grant_net = Module(new BasicCrossbar()(new Grant))
|
||||||
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, grant_net.io.in(id), grant_net.io.out(id), io.grant) }
|
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doMasterSourcedFIFOHookup(end, grant_net.io.in(id), grant_net.io.out(id), io.grant) }
|
||||||
|
|
||||||
val ack_net = new BasicCrossbar()(new GrantAck)
|
val ack_net = Module(new BasicCrossbar()(new GrantAck))
|
||||||
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedFIFOHookup(end, ack_net.io.in(id), ack_net.io.out(id), io.grant_ack) }
|
endpoints.zip(io).zipWithIndex.map{ case ((end, io), id) => doClientSourcedFIFOHookup(end, ack_net.io.in(id), ack_net.io.out(id), io.grant_ack) }
|
||||||
|
|
||||||
val physicalNetworks = List(acq_net, rel_net, probe_net, grant_net, ack_net)
|
val physicalNetworks = List(acq_net, rel_net, probe_net, grant_net, ack_net)
|
||||||
|
2
uncore
2
uncore
@ -1 +1 @@
|
|||||||
Subproject commit 0f675e35e7503419482b12fb265ef2709a91403a
|
Subproject commit 295a4a5d69987d58de5edea6fad4e750100cffa3
|
Loading…
Reference in New Issue
Block a user