1
0

remove chip-specific uncore gunk

This commit is contained in:
Andrew Waterman 2012-07-26 03:26:52 -07:00
parent a5bea4364f
commit 9c50621a19

View File

@ -5,95 +5,14 @@ import Node._;
import Constants._;
import collection.mutable._
class ioTop(htif_width: Int) extends Bundle {
val debug = new ioDebug();
val host = new ioHost(htif_width);
val host_clk = Bool(OUTPUT)
val mem_backup = new ioMemSerialized
val mem_backup_en = Bool(INPUT)
val mem_backup_clk = Bool(OUTPUT)
val mem = new ioMemPipe
}
class ioUncore(htif_width: Int, ntiles: Int) extends Bundle {
val debug = new ioDebug()
val host = new ioHost(htif_width)
val host_clk = Bool(OUTPUT)
val mem_backup = new ioMemSerialized
val mem_backup_en = Bool(INPUT)
val mem_backup_clk = Bool(OUTPUT)
val mem = new ioMemPipe
val tiles = Vec(ntiles) { new ioTileLink() }.flip
val htif = Vec(ntiles) { new ioHTIF() }.flip
}
class Uncore(htif_width: Int, ntiles: Int, co: CoherencePolicyWithUncached) extends Component
{
val clkdiv = 8
val io = new ioUncore(htif_width, ntiles)
val htif = new rocketHTIF(htif_width, NTILES, co)
val hub = new CoherenceHubBroadcast(NTILES+1, co)
val llc_tag_leaf = Mem(1024, seqRead = true) { Bits(width = 72) }
val llc_data_leaf = Mem(4096, seqRead = true) { Bits(width = 64) }
val llc = new DRAMSideLLC(1024, 8, 4, llc_tag_leaf, llc_data_leaf)
for (i <- 0 until NTILES) {
hub.io.tiles(i) <> io.tiles(i)
htif.io.cpu(i) <> io.htif(i)
}
hub.io.tiles(NTILES) <> htif.io.mem
llc.io.cpu.req_cmd <> Queue(hub.io.mem.req_cmd)
llc.io.cpu.req_data <> Queue(hub.io.mem.req_data, REFILL_CYCLES)
hub.io.mem.resp <> llc.io.cpu.resp
// mux between main and backup memory ports
val mem_serdes = new MemSerdes
val mem_cmdq = (new queue(2)) { new MemReqCmd }
mem_cmdq.io.enq <> llc.io.mem.req_cmd
mem_cmdq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_cmd.ready, io.mem.req_cmd.ready)
io.mem.req_cmd.valid := mem_cmdq.io.deq.valid && !io.mem_backup_en
io.mem.req_cmd.bits := mem_cmdq.io.deq.bits
mem_serdes.io.wide.req_cmd.valid := mem_cmdq.io.deq.valid && io.mem_backup_en
mem_serdes.io.wide.req_cmd.bits := mem_cmdq.io.deq.bits
val mem_dataq = (new queue(REFILL_CYCLES)) { new MemData }
mem_dataq.io.enq <> llc.io.mem.req_data
mem_dataq.io.deq.ready := Mux(io.mem_backup_en, mem_serdes.io.wide.req_data.ready, io.mem.req_data.ready)
io.mem.req_data.valid := mem_dataq.io.deq.valid && !io.mem_backup_en
io.mem.req_data.bits := mem_dataq.io.deq.bits
mem_serdes.io.wide.req_data.valid := mem_dataq.io.deq.valid && io.mem_backup_en
mem_serdes.io.wide.req_data.bits := mem_dataq.io.deq.bits
llc.io.mem.resp.valid := Mux(io.mem_backup_en, mem_serdes.io.wide.resp.valid, io.mem.resp.valid)
llc.io.mem.resp.bits := Mux(io.mem_backup_en, mem_serdes.io.wide.resp.bits, io.mem.resp.bits)
// pad out the HTIF using a divided clock
val hio = (new slowIO(clkdiv)) { Bits(width = htif_width+1) }
hio.io.out_fast.valid := htif.io.host.out.valid || mem_serdes.io.narrow.req.valid
hio.io.out_fast.bits := Cat(htif.io.host.out.valid, Mux(htif.io.host.out.valid, htif.io.host.out.bits, mem_serdes.io.narrow.req.bits))
htif.io.host.out.ready := hio.io.out_fast.ready
mem_serdes.io.narrow.req.ready := hio.io.out_fast.ready && !htif.io.host.out.valid
io.host.out.valid := hio.io.out_slow.valid && hio.io.out_slow.bits(htif_width)
io.host.out.bits := hio.io.out_slow.bits
io.mem_backup.req.valid := hio.io.out_slow.valid && !hio.io.out_slow.bits(htif_width)
hio.io.out_slow.ready := Mux(hio.io.out_slow.bits(htif_width), io.host.out.ready, io.mem_backup.req.ready)
val mem_backup_resp_valid = io.mem_backup_en && io.mem_backup.resp.valid
hio.io.in_slow.valid := mem_backup_resp_valid || io.host.in.valid
hio.io.in_slow.bits := Cat(mem_backup_resp_valid, io.host.in.bits)
io.host.in.ready := hio.io.in_slow.ready
mem_serdes.io.narrow.resp.valid := hio.io.in_fast.valid && hio.io.in_fast.bits(htif_width)
mem_serdes.io.narrow.resp.bits := hio.io.in_fast.bits
htif.io.host.in.valid := hio.io.in_fast.valid && !hio.io.in_fast.bits(htif_width)
htif.io.host.in.bits := hio.io.in_fast.bits
hio.io.in_fast.ready := Mux(hio.io.in_fast.bits(htif_width), Bool(true), htif.io.host.in.ready)
io.host_clk := hio.io.clk_slow
}
class Top extends Component
{
val io = new Bundle {
val debug = new ioDebug
val host = new ioHost(HTIF_WIDTH)
val mem = new ioMemPipe
}
val co = if(ENABLE_SHARING) {
if(ENABLE_CLEAN_EXCLUSIVE) new MESICoherence
else new MSICoherence
@ -101,15 +20,21 @@ class Top extends Component
if(ENABLE_CLEAN_EXCLUSIVE) new MEICoherence
else new MICoherence
}
val io = new ioTop(HTIF_WIDTH)
val uncore = new Uncore(HTIF_WIDTH, NTILES, co)
uncore.io <> io
val htif = new rocketHTIF(HTIF_WIDTH, NTILES, co)
val hub = new CoherenceHubBroadcast(NTILES+1, co)
hub.io.tiles(NTILES) <> htif.io.mem
io.host <> htif.io.host
io.mem.req_cmd <> Queue(hub.io.mem.req_cmd)
io.mem.req_data <> Queue(hub.io.mem.req_data, REFILL_CYCLES)
hub.io.mem.resp <> Pipe(io.mem.resp)
Assert(hub.io.mem.resp.ready, "hub.io.mem.resp.ready")
var error_mode = Bool(false)
for (i <- 0 until NTILES) {
val hl = uncore.io.htif(i)
val tl = uncore.io.tiles(i)
val hl = htif.io.cpu(i)
val tl = hub.io.tiles(i)
val tile = new Tile(co, resetSignal = hl.reset)
tile.io.host.reset := Reg(Reg(hl.reset))