Merge branch 'release-xacts'
Conflicts: src/htif.scala src/icache.scala src/nbdcache.scala src/tile.scala
This commit is contained in:
commit
95f0a688e9
@ -47,68 +47,3 @@ class HellaCacheArbiter(n: Int)(implicit conf: RocketConfiguration) extends Comp
|
||||
resp.bits.replay := io.mem.resp.bits.replay && tag_hit
|
||||
}
|
||||
}
|
||||
|
||||
class UncachedRequestorIO(implicit conf: LogicalNetworkConfiguration) extends Bundle {
|
||||
val acquire = (new ClientSourcedIO){(new LogicalNetworkIO){new Acquire }}
|
||||
val abort = (new MasterSourcedIO) {(new LogicalNetworkIO){new Abort }}
|
||||
val grant = (new MasterSourcedIO) {(new LogicalNetworkIO){new Grant }}
|
||||
val grant_ack = (new ClientSourcedIO){(new LogicalNetworkIO){new GrantAck }}
|
||||
}
|
||||
|
||||
class MemArbiter(n: Int)(implicit conf: LogicalNetworkConfiguration) extends Component {
|
||||
val io = new Bundle {
|
||||
val mem = new UncachedRequestorIO
|
||||
val requestor = Vec(n) { new UncachedRequestorIO }.flip
|
||||
}
|
||||
|
||||
var xi_bits = new Acquire
|
||||
xi_bits := io.requestor(n-1).acquire.bits.payload
|
||||
xi_bits.client_xact_id := Cat(io.requestor(n-1).acquire.bits.payload.client_xact_id, UFix(n-1, log2Up(n)))
|
||||
for (i <- n-2 to 0 by -1)
|
||||
{
|
||||
var my_xi_bits = new Acquire
|
||||
my_xi_bits := io.requestor(i).acquire.bits.payload
|
||||
my_xi_bits.client_xact_id := Cat(io.requestor(i).acquire.bits.payload.client_xact_id, UFix(i, log2Up(n)))
|
||||
|
||||
xi_bits = Mux(io.requestor(i).acquire.valid, my_xi_bits, xi_bits)
|
||||
}
|
||||
|
||||
io.mem.acquire.bits.payload := xi_bits
|
||||
io.mem.acquire.valid := io.requestor.map(_.acquire.valid).reduce(_||_)
|
||||
io.requestor(0).acquire.ready := io.mem.acquire.ready
|
||||
for (i <- 1 until n)
|
||||
io.requestor(i).acquire.ready := io.requestor(i-1).acquire.ready && !io.requestor(i-1).acquire.valid
|
||||
|
||||
var xf_bits = io.requestor(n-1).grant_ack.bits
|
||||
for (i <- n-2 to 0 by -1)
|
||||
xf_bits = Mux(io.requestor(i).grant_ack.valid, io.requestor(i).grant_ack.bits, xf_bits)
|
||||
|
||||
io.mem.grant_ack.bits := xf_bits
|
||||
io.mem.grant_ack.valid := io.requestor.map(_.grant_ack.valid).reduce(_||_)
|
||||
io.requestor(0).grant_ack.ready := io.mem.grant_ack.ready
|
||||
for (i <- 1 until n)
|
||||
io.requestor(i).grant_ack.ready := io.requestor(i-1).grant_ack.ready && !io.requestor(i-1).grant_ack.valid
|
||||
|
||||
io.mem.grant.ready := Bool(false)
|
||||
for (i <- 0 until n)
|
||||
{
|
||||
val tag = io.mem.grant.bits.payload.client_xact_id
|
||||
io.requestor(i).grant.valid := Bool(false)
|
||||
when (tag(log2Up(n)-1,0) === UFix(i)) {
|
||||
io.requestor(i).grant.valid := io.mem.grant.valid
|
||||
io.mem.grant.ready := io.requestor(i).grant.ready
|
||||
}
|
||||
io.requestor(i).grant.bits := io.mem.grant.bits
|
||||
io.requestor(i).grant.bits.payload.client_xact_id := tag >> UFix(log2Up(n))
|
||||
}
|
||||
|
||||
for (i <- 0 until n)
|
||||
{
|
||||
val tag = io.mem.abort.bits.payload.client_xact_id
|
||||
io.requestor(i).abort.valid := io.mem.abort.valid && tag(log2Up(n)-1,0) === UFix(i)
|
||||
io.requestor(i).abort.bits := io.mem.abort.bits
|
||||
io.requestor(i).abort.bits.payload.client_xact_id := tag >> UFix(log2Up(n))
|
||||
}
|
||||
|
||||
io.mem.abort.ready := Bool(true)
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ import Util._
|
||||
|
||||
class RocketIO(implicit conf: RocketConfiguration) extends Bundle
|
||||
{
|
||||
val host = new HTIFIO(conf.lnConf.nTiles)
|
||||
val host = new HTIFIO(conf.lnConf.nClients)
|
||||
val imem = new CPUFrontendIO()(conf.icache)
|
||||
val vimem = new CPUFrontendIO()(conf.icache)
|
||||
val dmem = new HellaCacheIO()(conf.dcache)
|
||||
|
@ -10,7 +10,7 @@ import hwacha._
|
||||
class Datapath(implicit conf: RocketConfiguration) extends Component
|
||||
{
|
||||
val io = new Bundle {
|
||||
val host = new HTIFIO(conf.lnConf.nTiles)
|
||||
val host = new HTIFIO(conf.lnConf.nClients)
|
||||
val ctrl = (new CtrlDpathIO).flip
|
||||
val dmem = new HellaCacheIO()(conf.dcache)
|
||||
val ptw = (new DatapathPTWIO).flip
|
||||
|
@ -104,7 +104,7 @@ object PCR
|
||||
class PCR(implicit conf: RocketConfiguration) extends Component
|
||||
{
|
||||
val io = new Bundle {
|
||||
val host = new HTIFIO(conf.lnConf.nTiles)
|
||||
val host = new HTIFIO(conf.lnConf.nClients)
|
||||
val r = new ioReadPort(conf.nxpr, conf.xprlen)
|
||||
val w = new ioWritePort(conf.nxpr, conf.xprlen)
|
||||
|
||||
|
@ -45,12 +45,12 @@ class SCRIO extends Bundle
|
||||
val wdata = Bits(OUTPUT, 64)
|
||||
}
|
||||
|
||||
class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Component with ClientCoherenceAgent
|
||||
class rocketHTIF(w: Int)(implicit conf: UncoreConfiguration) extends Component with ClientCoherenceAgent
|
||||
{
|
||||
implicit val lnConf = conf.ln
|
||||
val io = new Bundle {
|
||||
val host = new HostIO(w)
|
||||
val cpu = Vec(conf.ln.nTiles) { new HTIFIO(conf.ln.nTiles).flip }
|
||||
val cpu = Vec(conf.ln.nClients) { new HTIFIO(conf.ln.nClients).flip }
|
||||
val mem = new TileLinkIO()(conf.ln)
|
||||
val scr = new SCRIO
|
||||
}
|
||||
@ -92,7 +92,7 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
val cmd_readmem :: cmd_writemem :: cmd_readcr :: cmd_writecr :: cmd_ack :: cmd_nack :: Nil = Enum(6) { UFix() }
|
||||
|
||||
val pcr_addr = addr(io.cpu(0).pcr_req.bits.addr.width-1, 0)
|
||||
val pcr_coreid = addr(log2Up(conf.ln.nTiles+1)-1+20,20)
|
||||
val pcr_coreid = if (conf.ln.nClients == 1) UFix(0) else addr(log2Up(conf.ln.nClients)-1+20,20)
|
||||
val pcr_wdata = packet_ram(0)
|
||||
|
||||
val bad_mem_packet = size(OFFSET_BITS-1-3,0).orR || addr(OFFSET_BITS-1-3,0).orR
|
||||
@ -114,16 +114,15 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
|
||||
val mem_acked = Reg(resetVal = Bool(false))
|
||||
val mem_gxid = Reg() { Bits() }
|
||||
val mem_gsrc = Reg() { UFix(width = conf.ln.idBits) }
|
||||
val mem_needs_ack = Reg() { Bool() }
|
||||
val mem_nacked = Reg(resetVal = Bool(false))
|
||||
when (io.mem.grant.valid) {
|
||||
mem_acked := Bool(true)
|
||||
mem_gxid := io.mem.grant.bits.payload.master_xact_id
|
||||
mem_needs_ack := io.mem.grant.bits.payload.require_ack
|
||||
mem_gsrc := io.mem.grant.bits.header.src
|
||||
mem_needs_ack := conf.co.requiresAck(io.mem.grant.bits.payload)
|
||||
}
|
||||
io.mem.grant.ready := Bool(true)
|
||||
when (io.mem.abort.valid) { mem_nacked := Bool(true) }
|
||||
io.mem.abort.ready := Bool(true)
|
||||
|
||||
val state_rx :: state_pcr_req :: state_pcr_resp :: state_mem_req :: state_mem_wdata :: state_mem_wresp :: state_mem_rdata :: state_mem_finish :: state_tx :: Nil = Enum(9) { UFix() }
|
||||
val state = Reg(resetVal = state_rx)
|
||||
@ -147,20 +146,12 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
mem_cnt := mem_cnt + UFix(1)
|
||||
}
|
||||
when (state === state_mem_wresp) {
|
||||
when (mem_nacked) {
|
||||
state := state_mem_req
|
||||
mem_nacked := Bool(false)
|
||||
}
|
||||
when (mem_acked) {
|
||||
state := state_mem_finish
|
||||
mem_acked := Bool(false)
|
||||
}
|
||||
}
|
||||
when (state === state_mem_rdata) {
|
||||
when (mem_nacked) {
|
||||
state := state_mem_req
|
||||
mem_nacked := Bool(false)
|
||||
}
|
||||
when (io.mem.grant.valid) {
|
||||
when (mem_cnt.andR) {
|
||||
state := state_mem_finish
|
||||
@ -194,26 +185,18 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
val init_addr = addr.toUFix >> UFix(OFFSET_BITS-3)
|
||||
val co = conf.co.asInstanceOf[CoherencePolicyWithUncached]
|
||||
x_init.io.enq.bits := Mux(cmd === cmd_writemem, co.getUncachedWriteAcquire(init_addr, UFix(0)), co.getUncachedReadAcquire(init_addr, UFix(0)))
|
||||
io.mem.acquire <> FIFOedLogicalNetworkIOWrapper(x_init.io.deq, UFix(conf.ln.nTiles), UFix(0))
|
||||
io.mem.acquire <> FIFOedLogicalNetworkIOWrapper(x_init.io.deq, UFix(conf.ln.nClients), UFix(0))
|
||||
io.mem.acquire_data.valid := state === state_mem_wdata
|
||||
io.mem.acquire_data.bits.payload.data := mem_req_data
|
||||
io.mem.grant_ack.valid := (state === state_mem_finish) && mem_needs_ack
|
||||
io.mem.grant_ack.bits.payload.master_xact_id := mem_gxid
|
||||
io.mem.grant_ack.bits.header.dst := mem_gsrc
|
||||
io.mem.probe.ready := Bool(false)
|
||||
io.mem.release.valid := Bool(false)
|
||||
io.mem.release_data.valid := Bool(false)
|
||||
|
||||
io.mem.acquire_data.bits.header.src := UFix(conf.ln.nTiles)
|
||||
io.mem.acquire_data.bits.header.dst := UFix(0)
|
||||
io.mem.release.bits.header.src := UFix(conf.ln.nTiles)
|
||||
io.mem.release.bits.header.dst := UFix(0)
|
||||
io.mem.release_data.bits.header.src := UFix(conf.ln.nTiles)
|
||||
io.mem.release_data.bits.header.dst := UFix(0)
|
||||
io.mem.grant_ack.bits.header.src := UFix(conf.ln.nTiles)
|
||||
io.mem.grant_ack.bits.header.dst := UFix(0)
|
||||
|
||||
val pcrReadData = Reg{Bits(width = io.cpu(0).pcr_rep.bits.getWidth)}
|
||||
for (i <- 0 until conf.ln.nTiles) {
|
||||
for (i <- 0 until conf.ln.nClients) {
|
||||
val my_reset = Reg(resetVal = Bool(true))
|
||||
val my_ipi = Reg(resetVal = Bool(false))
|
||||
|
||||
@ -230,7 +213,7 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
}
|
||||
cpu.ipi_rep.valid := my_ipi
|
||||
cpu.ipi_req.ready := Bool(true)
|
||||
for (j <- 0 until conf.ln.nTiles) {
|
||||
for (j <- 0 until conf.ln.nClients) {
|
||||
when (io.cpu(j).ipi_req.valid && io.cpu(j).ipi_req.bits === UFix(i)) {
|
||||
my_ipi := Bool(true)
|
||||
}
|
||||
@ -257,7 +240,7 @@ class rocketHTIF(w: Int)(implicit conf: CoherenceHubConfiguration) extends Compo
|
||||
val scr_rdata = Vec(io.scr.rdata.size){Bits(width = 64)}
|
||||
for (i <- 0 until scr_rdata.size)
|
||||
scr_rdata(i) := io.scr.rdata(i)
|
||||
scr_rdata(0) := conf.ln.nTiles
|
||||
scr_rdata(0) := conf.ln.nClients
|
||||
scr_rdata(1) := (UFix(REFILL_CYCLES*MEM_DATA_BITS/8) << x_init.io.enq.bits.addr.getWidth) >> 20
|
||||
|
||||
io.scr.wen := false
|
||||
|
@ -54,7 +54,7 @@ class Frontend(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) ex
|
||||
{
|
||||
val io = new Bundle {
|
||||
val cpu = new CPUFrontendIO()(c).flip
|
||||
val mem = new UncachedRequestorIO
|
||||
val mem = new UncachedTileLinkIO
|
||||
}
|
||||
|
||||
val btb = new rocketDpathBTB(c.nbtb)
|
||||
@ -134,7 +134,7 @@ class ICache(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) exte
|
||||
val datablock = Bits(width = c.databits)
|
||||
})
|
||||
val invalidate = Bool(INPUT)
|
||||
val mem = new UncachedRequestorIO
|
||||
val mem = new UncachedTileLinkIO
|
||||
}
|
||||
|
||||
val s_ready :: s_request :: s_refill_wait :: s_refill :: Nil = Enum(4) { UFix() }
|
||||
@ -172,7 +172,8 @@ class ICache(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) exte
|
||||
val s2_miss = s2_valid && !s2_any_tag_hit
|
||||
rdy := state === s_ready && !s2_miss
|
||||
|
||||
val (rf_cnt, refill_done) = Counter(io.mem.grant.valid, REFILL_CYCLES)
|
||||
Assert(!c.co.isVoluntary(io.mem.grant.bits.payload) || !io.mem.grant.valid, "UncachedRequestors shouldn't get voluntary grants.")
|
||||
val (rf_cnt, refill_done) = Counter(io.mem.grant.valid && !c.co.isVoluntary(io.mem.grant.bits.payload), REFILL_CYCLES)
|
||||
val repl_way = if (c.dm) UFix(0) else LFSR16(s2_miss)(log2Up(c.assoc)-1,0)
|
||||
|
||||
val enc_tagbits = c.code.width(c.tagbits)
|
||||
@ -224,7 +225,7 @@ class ICache(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) exte
|
||||
for (i <- 0 until c.assoc) {
|
||||
val data_array = Mem(c.sets*REFILL_CYCLES, seqRead = true){ Bits(width = c.code.width(c.databits)) }
|
||||
val s1_raddr = Reg{UFix()}
|
||||
when (io.mem.grant.valid && repl_way === UFix(i)) {
|
||||
when (io.mem.grant.valid && c.co.messageHasData(io.mem.grant.bits.payload) && repl_way === UFix(i)) {
|
||||
val d = io.mem.grant.bits.payload.data
|
||||
data_array(Cat(s2_idx,rf_cnt)) := c.code.encode(d)
|
||||
}
|
||||
@ -240,13 +241,14 @@ class ICache(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) exte
|
||||
io.resp.bits.datablock := Mux1H(s2_tag_hit, s2_dout)
|
||||
|
||||
val finish_q = (new Queue(1)) { new GrantAck }
|
||||
finish_q.io.enq.valid := refill_done && io.mem.grant.bits.payload.require_ack
|
||||
finish_q.io.enq.valid := refill_done && c.co.requiresAck(io.mem.grant.bits.payload)
|
||||
finish_q.io.enq.bits.master_xact_id := io.mem.grant.bits.payload.master_xact_id
|
||||
|
||||
// output signals
|
||||
io.resp.valid := s2_hit
|
||||
io.mem.acquire.valid := (state === s_request) && finish_q.io.enq.ready
|
||||
io.mem.acquire.bits.payload := c.co.getUncachedReadAcquire(s2_addr >> UFix(c.offbits), UFix(0))
|
||||
io.mem.acquire_data.valid := Bool(false)
|
||||
io.mem.grant_ack <> FIFOedLogicalNetworkIOWrapper(finish_q.io.deq)
|
||||
io.mem.grant.ready := Bool(true)
|
||||
|
||||
@ -260,7 +262,6 @@ class ICache(implicit c: ICacheConfig, lnconf: LogicalNetworkConfiguration) exte
|
||||
when (io.mem.acquire.ready && finish_q.io.enq.ready) { state := s_refill_wait }
|
||||
}
|
||||
is (s_refill_wait) {
|
||||
when (io.mem.abort.valid) { state := s_request }
|
||||
when (io.mem.grant.valid) { state := s_refill }
|
||||
}
|
||||
is (s_refill) {
|
||||
|
@ -123,11 +123,18 @@ class DataWriteReq(implicit conf: DCacheConfig) extends Bundle {
|
||||
override def clone = new DataWriteReq().asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
class InternalProbe(implicit conf: DCacheConfig) extends Probe {
|
||||
val client_xact_id = Bits(width = CLIENT_XACT_ID_MAX_BITS)
|
||||
|
||||
override def clone = new InternalProbe().asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
class WritebackReq(implicit conf: DCacheConfig) extends Bundle {
|
||||
val tag = Bits(width = conf.tagbits)
|
||||
val idx = Bits(width = conf.idxbits)
|
||||
val way_en = Bits(width = conf.ways)
|
||||
val client_xact_id = Bits(width = CLIENT_XACT_ID_BITS)
|
||||
val client_xact_id = Bits(width = CLIENT_XACT_ID_MAX_BITS)
|
||||
val r_type = UFix(width = RELEASE_TYPE_MAX_BITS)
|
||||
|
||||
override def clone = new WritebackReq().asInstanceOf[this.type]
|
||||
}
|
||||
@ -153,7 +160,7 @@ class MetaWriteReq(implicit conf: DCacheConfig) extends Bundle {
|
||||
override def clone = new MetaWriteReq().asInstanceOf[this.type]
|
||||
}
|
||||
|
||||
class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
class MSHR(id: Int)(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguration) extends Component {
|
||||
val io = new Bundle {
|
||||
val req_pri_val = Bool(INPUT)
|
||||
val req_pri_rdy = Bool(OUTPUT)
|
||||
@ -162,17 +169,17 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
val req_bits = new MSHRReq().asInput
|
||||
val req_sdq_id = UFix(INPUT, log2Up(conf.nsdq))
|
||||
|
||||
val idx_match = Bool(OUTPUT)
|
||||
val tag = Bits(OUTPUT, conf.tagbits)
|
||||
val idx_match = Bool(OUTPUT)
|
||||
val probe_idx_match = Bool(OUTPUT)
|
||||
val tag = Bits(OUTPUT, conf.tagbits)
|
||||
|
||||
val mem_req = (new FIFOIO) { new Acquire }
|
||||
val mem_resp = new DataWriteReq().asOutput
|
||||
val meta_read = (new FIFOIO) { new MetaReadReq }
|
||||
val meta_write = (new FIFOIO) { new MetaWriteReq }
|
||||
val replay = (new FIFOIO) { new Replay() }
|
||||
val mem_abort = (new PipeIO) { new Abort }.flip
|
||||
val mem_rep = (new PipeIO) { new Grant }.flip
|
||||
val mem_finish = (new FIFOIO) { new GrantAck }
|
||||
val mem_grant = (new PipeIO) { (new LogicalNetworkIO) {new Grant} }.flip
|
||||
val mem_finish = (new FIFOIO) { (new LogicalNetworkIO) {new GrantAck} }
|
||||
val wb_req = (new FIFOIO) { new WritebackReq }
|
||||
val probe_writeback = (new FIFOIO) { Bool() }.flip
|
||||
}
|
||||
@ -180,33 +187,28 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
val s_invalid :: s_wb_req :: s_wb_resp :: s_meta_clear :: s_refill_req :: s_refill_resp :: s_meta_write_req :: s_meta_write_resp :: s_drain_rpq :: Nil = Enum(9) { UFix() }
|
||||
val state = Reg(resetVal = s_invalid)
|
||||
|
||||
val acq_type = Reg { UFix() }
|
||||
val acquire_type = Reg { UFix() }
|
||||
val release_type = Reg { UFix() }
|
||||
val line_state = Reg { UFix() }
|
||||
val refill_count = Reg { UFix(width = log2Up(REFILL_CYCLES)) }
|
||||
val req = Reg { new MSHRReq() }
|
||||
val writeback_probed = Reg{Bool()}
|
||||
|
||||
val req_cmd = io.req_bits.cmd
|
||||
val req_idx = req.addr(conf.untagbits-1,conf.offbits)
|
||||
val idx_match = req_idx === io.req_bits.addr(conf.untagbits-1,conf.offbits)
|
||||
val sec_rdy = idx_match && (state === s_wb_req || state === s_wb_resp || state === s_meta_clear || (state === s_refill_req || state === s_refill_resp) && !conf.co.needsTransactionOnSecondaryMiss(req_cmd, io.mem_req.bits))
|
||||
|
||||
val reply = io.mem_grant.valid && io.mem_grant.bits.payload.client_xact_id === UFix(id)
|
||||
val refill_done = reply && refill_count.andR
|
||||
val wb_done = reply && (state === s_wb_resp)
|
||||
|
||||
val rpq = (new Queue(conf.nrpq)) { new Replay }
|
||||
rpq.io.enq.valid := (io.req_pri_val && io.req_pri_rdy || io.req_sec_val && sec_rdy) && !isPrefetch(req_cmd)
|
||||
rpq.io.enq.bits := io.req_bits
|
||||
rpq.io.enq.bits.sdq_id := io.req_sdq_id
|
||||
rpq.io.deq.ready := io.replay.ready && state === s_drain_rpq || state === s_invalid
|
||||
|
||||
val abort = io.mem_abort.valid && io.mem_abort.bits.client_xact_id === UFix(id)
|
||||
val reply = io.mem_rep.valid && io.mem_rep.bits.client_xact_id === UFix(id)
|
||||
val refill_done = reply && refill_count.andR
|
||||
val wb_done = reply && (state === s_wb_resp)
|
||||
|
||||
io.wb_req.valid := Bool(false)
|
||||
when (io.probe_writeback.valid && idx_match && io.probe_writeback.bits) {
|
||||
writeback_probed := true
|
||||
}
|
||||
io.probe_writeback.ready := !idx_match || state != s_wb_req && state != s_wb_resp && state != s_meta_clear
|
||||
io.probe_writeback.ready := (state != s_wb_req && state != s_wb_resp && state != s_meta_clear) || !idx_match //TODO != s_drain_rpq ?
|
||||
|
||||
when (state === s_drain_rpq && !rpq.io.deq.valid) {
|
||||
state := s_invalid
|
||||
@ -222,38 +224,31 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
when (refill_done) { state := s_meta_write_req }
|
||||
when (reply) {
|
||||
refill_count := refill_count + UFix(1)
|
||||
line_state := conf.co.newStateOnGrant(io.mem_rep.bits, io.mem_req.bits)
|
||||
line_state := conf.co.newStateOnGrant(io.mem_grant.bits.payload, io.mem_req.bits)
|
||||
}
|
||||
when (abort) { state := s_refill_req }
|
||||
}
|
||||
when (state === s_refill_req) {
|
||||
when (abort) { state := s_refill_req }
|
||||
.elsewhen (io.mem_req.ready) { state := s_refill_resp }
|
||||
when (io.mem_req.ready) { state := s_refill_resp }
|
||||
}
|
||||
when (state === s_meta_clear && io.meta_write.ready) {
|
||||
state := s_refill_req
|
||||
}
|
||||
when (state === s_wb_resp) {
|
||||
when (reply) { state := s_meta_clear }
|
||||
when (abort) { state := Mux(writeback_probed, s_refill_req, s_wb_req) }
|
||||
}
|
||||
when (state === s_wb_req) {
|
||||
io.wb_req.valid := true
|
||||
when (writeback_probed) {
|
||||
io.wb_req.valid := false
|
||||
state := s_refill_req
|
||||
}.elsewhen (io.wb_req.ready) { state := s_wb_resp }
|
||||
when (state === s_wb_req && io.wb_req.ready) {
|
||||
state := s_wb_resp
|
||||
}
|
||||
|
||||
when (io.req_sec_val && io.req_sec_rdy) { // s_wb_req, s_wb_resp, s_refill_req
|
||||
acq_type := conf.co.getAcquireTypeOnSecondaryMiss(req_cmd, conf.co.newStateOnFlush(), io.mem_req.bits)
|
||||
acquire_type := conf.co.getAcquireTypeOnSecondaryMiss(req_cmd, conf.co.newStateOnFlush(), io.mem_req.bits)
|
||||
}
|
||||
when (io.req_pri_val && io.req_pri_rdy) {
|
||||
line_state := conf.co.newStateOnFlush()
|
||||
refill_count := UFix(0)
|
||||
acq_type := conf.co.getAcquireTypeOnPrimaryMiss(req_cmd, conf.co.newStateOnFlush())
|
||||
acquire_type := conf.co.getAcquireTypeOnPrimaryMiss(req_cmd, conf.co.newStateOnFlush())
|
||||
release_type := conf.co.getReleaseTypeOnVoluntaryWriteback() //TODO downgrades etc
|
||||
req := io.req_bits
|
||||
writeback_probed := false
|
||||
|
||||
state := Mux(conf.co.needsWriteback(io.req_bits.old_meta.state), s_wb_req, s_refill_req)
|
||||
when (io.req_bits.tag_match) {
|
||||
@ -266,9 +261,10 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
}
|
||||
}
|
||||
|
||||
val finish_q = (new Queue(2 /* wb + refill */)) { new GrantAck }
|
||||
finish_q.io.enq.valid := wb_done || refill_done
|
||||
finish_q.io.enq.bits.master_xact_id := io.mem_rep.bits.master_xact_id
|
||||
val finish_q = (new Queue(2 /* wb + refill */)) { (new LogicalNetworkIO){new GrantAck} }
|
||||
finish_q.io.enq.valid := (wb_done || refill_done) && conf.co.requiresAck(io.mem_grant.bits.payload)
|
||||
finish_q.io.enq.bits.payload.master_xact_id := io.mem_grant.bits.payload.master_xact_id
|
||||
finish_q.io.enq.bits.header.dst := io.mem_grant.bits.header.src
|
||||
val can_finish = state === s_invalid || state === s_refill_req || state === s_refill_resp
|
||||
io.mem_finish.valid := finish_q.io.deq.valid && can_finish
|
||||
finish_q.io.deq.ready := io.mem_finish.ready && can_finish
|
||||
@ -287,13 +283,15 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
io.meta_write.bits.data.tag := io.tag
|
||||
io.meta_write.bits.way_en := req.way_en
|
||||
|
||||
io.wb_req.valid := state === s_wb_req
|
||||
io.wb_req.bits.tag := req.old_meta.tag
|
||||
io.wb_req.bits.idx := req_idx
|
||||
io.wb_req.bits.way_en := req.way_en
|
||||
io.wb_req.bits.client_xact_id := Bits(id)
|
||||
io.wb_req.bits.r_type := conf.co.getReleaseTypeOnVoluntaryWriteback()
|
||||
|
||||
io.mem_req.valid := state === s_refill_req
|
||||
io.mem_req.bits.a_type := acq_type
|
||||
io.mem_req.bits.a_type := acquire_type
|
||||
io.mem_req.bits.addr := Cat(io.tag, req_idx).toUFix
|
||||
io.mem_req.bits.client_xact_id := Bits(id)
|
||||
io.mem_finish <> finish_q.io.deq
|
||||
@ -313,7 +311,7 @@ class MSHR(id: Int)(implicit conf: DCacheConfig) extends Component {
|
||||
}
|
||||
}
|
||||
|
||||
class MSHRFile(implicit conf: DCacheConfig) extends Component {
|
||||
class MSHRFile(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguration) extends Component {
|
||||
val io = new Bundle {
|
||||
val req = (new FIFOIO) { new MSHRReq }.flip
|
||||
val secondary_miss = Bool(OUTPUT)
|
||||
@ -323,11 +321,10 @@ class MSHRFile(implicit conf: DCacheConfig) extends Component {
|
||||
val meta_read = (new FIFOIO) { new MetaReadReq }
|
||||
val meta_write = (new FIFOIO) { new MetaWriteReq }
|
||||
val replay = (new FIFOIO) { new Replay }
|
||||
val mem_abort = (new PipeIO) { new Abort }.flip
|
||||
val mem_rep = (new PipeIO) { new Grant }.flip
|
||||
val mem_finish = (new FIFOIO) { new GrantAck }
|
||||
val mem_grant = (new PipeIO) { (new LogicalNetworkIO){new Grant} }.flip
|
||||
val mem_finish = (new FIFOIO) { (new LogicalNetworkIO){new GrantAck} }
|
||||
val wb_req = (new FIFOIO) { new WritebackReq }
|
||||
val probe = (new FIFOIO) { Bool() }.flip
|
||||
val probe = (new FIFOIO) { new Bool() }.flip
|
||||
|
||||
val fence_rdy = Bool(OUTPUT)
|
||||
}
|
||||
@ -346,7 +343,7 @@ class MSHRFile(implicit conf: DCacheConfig) extends Component {
|
||||
val meta_read_arb = (new Arbiter(conf.nmshr)) { new MetaReadReq }
|
||||
val meta_write_arb = (new Arbiter(conf.nmshr)) { new MetaWriteReq }
|
||||
val mem_req_arb = (new Arbiter(conf.nmshr)) { new Acquire }
|
||||
val mem_finish_arb = (new Arbiter(conf.nmshr)) { new GrantAck }
|
||||
val mem_finish_arb = (new Arbiter(conf.nmshr)) { (new LogicalNetworkIO){new GrantAck} }
|
||||
val wb_req_arb = (new Arbiter(conf.nmshr)) { new WritebackReq }
|
||||
val replay_arb = (new Arbiter(conf.nmshr)) { new Replay() }
|
||||
val alloc_arb = (new Arbiter(conf.nmshr)) { Bool() }
|
||||
@ -383,8 +380,7 @@ class MSHRFile(implicit conf: DCacheConfig) extends Component {
|
||||
mshr.io.probe_writeback.valid := io.probe.valid
|
||||
mshr.io.probe_writeback.bits := wb_probe_match
|
||||
|
||||
mshr.io.mem_abort <> io.mem_abort
|
||||
mshr.io.mem_rep <> io.mem_rep
|
||||
mshr.io.mem_grant <> io.mem_grant
|
||||
memRespMux(i) := mshr.io.mem_resp
|
||||
|
||||
pri_rdy = pri_rdy || mshr.io.req_pri_rdy
|
||||
@ -404,7 +400,7 @@ class MSHRFile(implicit conf: DCacheConfig) extends Component {
|
||||
|
||||
io.req.ready := Mux(idx_match, tag_match && sec_rdy, pri_rdy) && sdq_rdy
|
||||
io.secondary_miss := idx_match
|
||||
io.mem_resp := memRespMux(io.mem_rep.bits.client_xact_id)
|
||||
io.mem_resp := memRespMux(io.mem_grant.bits.payload.client_xact_id)
|
||||
io.fence_rdy := !fence
|
||||
io.probe.ready := writeback_probe_rdy || !wb_probe_match
|
||||
|
||||
@ -426,13 +422,11 @@ class WritebackUnit(implicit conf: DCacheConfig) extends Component {
|
||||
val meta_read = (new FIFOIO) { new MetaReadReq }
|
||||
val data_req = (new FIFOIO) { new DataReadReq() }
|
||||
val data_resp = Bits(INPUT, conf.bitsperrow)
|
||||
val mem_req = (new FIFOIO) { new Acquire }
|
||||
val mem_req_data = (new FIFOIO) { new AcquireData }
|
||||
val release = (new FIFOIO) { new Release }
|
||||
val release_data = (new FIFOIO) { new ReleaseData }
|
||||
}
|
||||
|
||||
val valid = Reg(resetVal = Bool(false))
|
||||
val is_probe = Reg{Bool()}
|
||||
val r1_data_req_fired = Reg(resetVal = Bool(false))
|
||||
val r2_data_req_fired = Reg(resetVal = Bool(false))
|
||||
val cmd_sent = Reg{Bool()}
|
||||
@ -447,7 +441,7 @@ class WritebackUnit(implicit conf: DCacheConfig) extends Component {
|
||||
cnt := cnt + 1
|
||||
}
|
||||
|
||||
when (r2_data_req_fired && !Mux(is_probe, io.release_data.ready, io.mem_req_data.ready)) {
|
||||
when (r2_data_req_fired && !io.release_data.ready) {
|
||||
r1_data_req_fired := false
|
||||
r2_data_req_fired := false
|
||||
cnt := cnt - Mux[UFix](r1_data_req_fired, 2, 1)
|
||||
@ -457,20 +451,18 @@ class WritebackUnit(implicit conf: DCacheConfig) extends Component {
|
||||
valid := false
|
||||
}
|
||||
|
||||
when (valid && io.mem_req.ready) {
|
||||
when (valid && io.release.ready) {
|
||||
cmd_sent := true
|
||||
}
|
||||
}
|
||||
when (io.probe.fire()) {
|
||||
valid := true
|
||||
is_probe := true
|
||||
cmd_sent := true
|
||||
cnt := 0
|
||||
req := io.probe.bits
|
||||
}
|
||||
when (io.req.fire()) {
|
||||
valid := true
|
||||
is_probe := false
|
||||
cmd_sent := false
|
||||
cnt := 0
|
||||
req := io.req.bits
|
||||
@ -483,22 +475,21 @@ class WritebackUnit(implicit conf: DCacheConfig) extends Component {
|
||||
io.data_req.bits.way_en := req.way_en
|
||||
io.data_req.bits.addr := Cat(req.idx, cnt(log2Up(REFILL_CYCLES)-1,0)) << conf.ramoffbits
|
||||
|
||||
io.mem_req.valid := valid && !cmd_sent
|
||||
io.mem_req.bits.a_type := conf.co.getAcquireTypeOnWriteback()
|
||||
io.mem_req.bits.addr := Cat(req.tag, req.idx).toUFix
|
||||
io.mem_req.bits.client_xact_id := req.client_xact_id
|
||||
io.mem_req_data.valid := r2_data_req_fired && !is_probe
|
||||
io.mem_req_data.bits.data := io.data_resp
|
||||
io.release_data.valid := r2_data_req_fired && is_probe
|
||||
io.release.valid := valid && !cmd_sent
|
||||
io.release.bits.r_type := req.r_type
|
||||
io.release.bits.addr := Cat(req.tag, req.idx).toUFix
|
||||
io.release.bits.client_xact_id := req.client_xact_id
|
||||
io.release.bits.master_xact_id := UFix(0)
|
||||
io.release_data.valid := r2_data_req_fired
|
||||
io.release_data.bits.data := io.data_resp
|
||||
|
||||
io.meta_read.valid := fire
|
||||
io.meta_read.bits.addr := io.mem_req.bits.addr << conf.offbits
|
||||
io.meta_read.bits.addr := io.release.bits.addr << conf.offbits
|
||||
}
|
||||
|
||||
class ProbeUnit(implicit conf: DCacheConfig) extends Component {
|
||||
val io = new Bundle {
|
||||
val req = (new FIFOIO) { new Probe }.flip
|
||||
val req = (new FIFOIO) { new InternalProbe }.flip
|
||||
val rep = (new FIFOIO) { new Release }
|
||||
val meta_read = (new FIFOIO) { new MetaReadReq }
|
||||
val meta_write = (new FIFOIO) { new MetaWriteReq }
|
||||
@ -512,7 +503,7 @@ class ProbeUnit(implicit conf: DCacheConfig) extends Component {
|
||||
val state = Reg(resetVal = s_invalid)
|
||||
val line_state = Reg() { UFix() }
|
||||
val way_en = Reg() { Bits() }
|
||||
val req = Reg() { new Probe() }
|
||||
val req = Reg() { new InternalProbe }
|
||||
val hit = way_en.orR
|
||||
|
||||
when (state === s_meta_write && io.meta_write.ready) {
|
||||
@ -552,7 +543,7 @@ class ProbeUnit(implicit conf: DCacheConfig) extends Component {
|
||||
|
||||
io.req.ready := state === s_invalid
|
||||
io.rep.valid := state === s_release
|
||||
io.rep.bits := conf.co.newRelease(req, Mux(hit, line_state, conf.co.newStateOnFlush))
|
||||
io.rep.bits := conf.co.newRelease(req, Mux(hit, line_state, conf.co.newStateOnFlush), req.client_xact_id)
|
||||
|
||||
io.meta_read.valid := state === s_meta_read
|
||||
io.meta_read.bits.addr := req.addr << UFix(conf.offbits)
|
||||
@ -568,6 +559,8 @@ class ProbeUnit(implicit conf: DCacheConfig) extends Component {
|
||||
io.wb_req.bits.way_en := way_en
|
||||
io.wb_req.bits.idx := req.addr
|
||||
io.wb_req.bits.tag := req.addr >> UFix(conf.idxbits)
|
||||
io.wb_req.bits.r_type := UFix(0) // DNC
|
||||
io.wb_req.bits.client_xact_id := UFix(0) // DNC
|
||||
}
|
||||
|
||||
class MetaDataArray(implicit conf: DCacheConfig) extends Component {
|
||||
@ -913,13 +906,16 @@ class HellaCache(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguratio
|
||||
mshr.io.req.bits.way_en := Mux(s2_tag_match, s2_tag_match_way, s2_replaced_way_en)
|
||||
mshr.io.req.bits.data := s2_req.data
|
||||
|
||||
mshr.io.mem_rep.valid := io.mem.grant.fire()
|
||||
mshr.io.mem_rep.bits := io.mem.grant.bits.payload
|
||||
mshr.io.mem_abort.valid := io.mem.abort.valid
|
||||
mshr.io.mem_abort.bits := io.mem.abort.bits.payload
|
||||
io.mem.abort.ready := Bool(true)
|
||||
mshr.io.mem_grant.valid := io.mem.grant.fire()
|
||||
mshr.io.mem_grant.bits := io.mem.grant.bits
|
||||
when (mshr.io.req.fire()) { replacer.miss }
|
||||
|
||||
io.mem.acquire <> FIFOedLogicalNetworkIOWrapper(mshr.io.mem_req)
|
||||
//TODO io.mem.acquire_data should be connected to uncached store data generator
|
||||
//io.mem.acquire_data <> FIFOedLogicalNetworkIOWrapper(TODO)
|
||||
io.mem.acquire_data.valid := Bool(false)
|
||||
io.mem.acquire_data.bits.payload.data := UFix(0)
|
||||
|
||||
// replays
|
||||
readArb.io.in(1).valid := mshr.io.replay.valid
|
||||
readArb.io.in(1).bits := mshr.io.replay.bits
|
||||
@ -930,8 +926,11 @@ class HellaCache(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguratio
|
||||
metaWriteArb.io.in(0) <> mshr.io.meta_write
|
||||
|
||||
// probes
|
||||
val releaseArb = (new Arbiter(2)) { new Release }
|
||||
FIFOedLogicalNetworkIOWrapper(releaseArb.io.out) <> io.mem.release
|
||||
|
||||
prober.io.req <> FIFOedLogicalNetworkIOUnwrapper(io.mem.probe)
|
||||
FIFOedLogicalNetworkIOWrapper(prober.io.rep) <> io.mem.release
|
||||
prober.io.rep <> releaseArb.io.in(1)
|
||||
prober.io.mshr_req <> mshr.io.probe
|
||||
prober.io.wb_req <> wb.io.probe
|
||||
prober.io.way_en := s2_tag_match_way
|
||||
@ -952,6 +951,7 @@ class HellaCache(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguratio
|
||||
wb.io.meta_read <> metaReadArb.io.in(3)
|
||||
wb.io.data_req <> readArb.io.in(2)
|
||||
wb.io.data_resp := s2_data_corrected
|
||||
releaseArb.io.in(0) <> wb.io.release
|
||||
FIFOedLogicalNetworkIOWrapper(wb.io.release_data) <> io.mem.release_data
|
||||
|
||||
// store->load bypassing
|
||||
@ -1015,11 +1015,5 @@ class HellaCache(implicit conf: DCacheConfig, lnconf: LogicalNetworkConfiguratio
|
||||
io.cpu.resp.bits.data_subword := loadgen.byte
|
||||
io.cpu.resp.bits.store_data := s2_req.data
|
||||
|
||||
val acquire_arb = (new Arbiter(2)) { new Acquire }
|
||||
acquire_arb.io.in(0) <> wb.io.mem_req
|
||||
acquire_arb.io.in(1) <> mshr.io.mem_req
|
||||
io.mem.acquire <> FIFOedLogicalNetworkIOWrapper(acquire_arb.io.out)
|
||||
|
||||
io.mem.acquire_data <> FIFOedLogicalNetworkIOWrapper(wb.io.mem_req_data)
|
||||
io.mem.grant_ack <> FIFOedLogicalNetworkIOWrapper(mshr.io.mem_finish)
|
||||
io.mem.grant_ack <> mshr.io.mem_finish
|
||||
}
|
||||
|
@ -24,41 +24,36 @@ case class RocketConfiguration(lnConf: LogicalNetworkConfiguration, co: Coherenc
|
||||
class Tile(resetSignal: Bool = null)(confIn: RocketConfiguration) extends Component(resetSignal) with ClientCoherenceAgent
|
||||
{
|
||||
val memPorts = 2 + confIn.vec
|
||||
val dcachePortId = 0
|
||||
val icachePortId = 1
|
||||
val vicachePortId = 2
|
||||
implicit val dcConf = confIn.dcache.copy(reqtagbits = confIn.dcacheReqTagBits + log2Up(memPorts), databits = confIn.xprlen)
|
||||
implicit val lnConf = confIn.lnConf
|
||||
implicit val conf = confIn.copy(dcache = dcConf)
|
||||
|
||||
val io = new Bundle {
|
||||
val tilelink = new TileLinkIO
|
||||
val host = new HTIFIO(lnConf.nTiles)
|
||||
val host = new HTIFIO(lnConf.nClients)
|
||||
}
|
||||
|
||||
val core = new Core
|
||||
val icache = new Frontend()(confIn.icache, lnConf)
|
||||
val dcache = new HellaCache
|
||||
|
||||
val arbiter = new MemArbiter(memPorts)
|
||||
arbiter.io.requestor(0) <> dcache.io.mem
|
||||
arbiter.io.requestor(1) <> icache.io.mem
|
||||
val arbiter = new UncachedTileLinkIOArbiter(memPorts)
|
||||
arbiter.io.in(dcachePortId) <> dcache.io.mem
|
||||
arbiter.io.in(icachePortId) <> icache.io.mem
|
||||
|
||||
io.tilelink.acquire.valid := arbiter.io.mem.acquire.valid
|
||||
arbiter.io.mem.acquire.ready := io.tilelink.acquire.ready
|
||||
io.tilelink.acquire.bits := arbiter.io.mem.acquire.bits
|
||||
io.tilelink.acquire_data.valid := dcache.io.mem.acquire_data.valid
|
||||
dcache.io.mem.acquire_data.ready := io.tilelink.acquire_data.ready
|
||||
io.tilelink.acquire_data.bits := dcache.io.mem.acquire_data.bits
|
||||
arbiter.io.mem.abort <> io.tilelink.abort
|
||||
arbiter.io.mem.grant <> io.tilelink.grant
|
||||
io.tilelink.grant_ack.valid := arbiter.io.mem.grant_ack.valid
|
||||
arbiter.io.mem.grant_ack.ready := io.tilelink.grant_ack.ready
|
||||
io.tilelink.grant_ack.bits := arbiter.io.mem.grant_ack.bits
|
||||
io.tilelink.acquire <> arbiter.io.out.acquire
|
||||
io.tilelink.acquire_data <> arbiter.io.out.acquire_data
|
||||
arbiter.io.out.grant <> io.tilelink.grant
|
||||
io.tilelink.grant_ack <> arbiter.io.out.grant_ack
|
||||
dcache.io.mem.probe <> io.tilelink.probe
|
||||
io.tilelink.release.valid := dcache.io.mem.release.valid
|
||||
io.tilelink.release_data <> dcache.io.mem.release_data
|
||||
io.tilelink.release.valid := dcache.io.mem.release.valid
|
||||
dcache.io.mem.release.ready := io.tilelink.release.ready
|
||||
io.tilelink.release.bits := dcache.io.mem.release.bits
|
||||
io.tilelink.release_data.valid := dcache.io.mem.release_data.valid
|
||||
dcache.io.mem.release_data.ready := io.tilelink.release_data.ready
|
||||
io.tilelink.release_data.bits := dcache.io.mem.release_data.bits
|
||||
io.tilelink.release.bits.payload.client_xact_id := Cat(dcache.io.mem.release.bits.payload.client_xact_id, UFix(dcachePortId, log2Up(memPorts))) // Mimic client id extension done by UncachedTileLinkIOArbiter for Acquires from either client)
|
||||
|
||||
val ioSubBundles = io.tilelink.getClass.getMethods.filter( x =>
|
||||
classOf[ClientSourcedIO[Data]].isAssignableFrom(x.getReturnType)).map{ m =>
|
||||
@ -70,7 +65,7 @@ class Tile(resetSignal: Bool = null)(confIn: RocketConfiguration) extends Compon
|
||||
|
||||
if (conf.vec) {
|
||||
val vicache = new Frontend()(ICacheConfig(128, 1, conf.co), lnConf) // 128 sets x 1 ways (8KB)
|
||||
arbiter.io.requestor(2) <> vicache.io.mem
|
||||
arbiter.io.in(vicachePortId) <> vicache.io.mem
|
||||
core.io.vimem <> vicache.io.cpu
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user