1
0
Fork 0

Merge pull request #826 from freechipsproject/tlb2

Various memory system improvements
This commit is contained in:
Andrew Waterman 2017-06-28 13:51:24 -07:00 committed by GitHub
commit 936096dd42
9 changed files with 169 additions and 124 deletions

View File

@ -11,32 +11,42 @@ import uncore.tilelink2._
import uncore.util._
import util._
import TLMessages._
import scala.math.min
class DCacheDataReq(implicit p: Parameters) extends L1HellaCacheBundle()(p) {
val eccBytes = cacheParams.dataECCBytes
val addr = Bits(width = untagBits)
val write = Bool()
val wdata = Bits(width = rowBits)
val wmask = Bits(width = rowBytes)
val wdata = UInt(width = cacheParams.dataECC.width(eccBytes*8) * rowBytes/eccBytes)
val wordMask = UInt(width = rowBytes / wordBytes)
val eccMask = UInt(width = wordBytes / eccBytes)
val way_en = Bits(width = nWays)
}
class DCacheDataArray(implicit p: Parameters) extends L1HellaCacheModule()(p) {
val io = new Bundle {
val req = Valid(new DCacheDataReq).flip
val resp = Vec(nWays, Bits(OUTPUT, rowBits))
val resp = Vec(nWays, UInt(width = req.bits.wdata.getWidth)).asOutput
}
val data_arrays = Seq.fill(nWays) { SeqMem(nSets*refillCycles, Vec(rowBytes, Bits(width=8))) }
require(rowBytes % wordBytes == 0)
val eccBits = cacheParams.dataECCBytes * 8
val encBits = cacheParams.dataECC.width(eccBits)
val encWordBits = encBits * (wordBits / eccBits)
val eccMask = if (eccBits == wordBits) Seq(true.B) else io.req.bits.eccMask.toBools
val wMask = if (nWays == 1) eccMask else (0 until nWays).flatMap(i => eccMask.map(_ && io.req.bits.way_en(i)))
val wWords = io.req.bits.wdata.grouped(encBits * (wordBits / eccBits))
val addr = io.req.bits.addr >> rowOffBits
for ((array, w) <- data_arrays zipWithIndex) {
val valid = io.req.valid && (Bool(nWays == 1) || io.req.bits.way_en(w))
val data_arrays = Seq.fill(rowBytes / wordBytes) { SeqMem(nSets * refillCycles, Vec(nWays * (wordBits / eccBits), UInt(width = encBits))) }
val rdata = for ((array, i) <- data_arrays zipWithIndex) yield {
val valid = io.req.valid && (Bool(data_arrays.size == 1) || io.req.bits.wordMask(i))
when (valid && io.req.bits.write) {
val data = Vec.tabulate(rowBytes)(i => io.req.bits.wdata(8*(i+1)-1, 8*i))
array.write(addr, data, io.req.bits.wmask.toBools)
val wData = wWords(i).grouped(encBits)
array.write(addr, Vec((0 until nWays).flatMap(i => wData)), wMask)
}
io.resp(w) := array.read(addr, valid && !io.req.bits.write).asUInt
val data = array.read(addr, valid && !io.req.bits.write)
data.grouped(wordBits / eccBits).map(_.asUInt).toSeq
}
(io.resp zip rdata.transpose).foreach { case (resp, data) => resp := data.asUInt }
}
class DCache(hartid: Int, val scratch: () => Option[AddressSet] = () => None)(implicit p: Parameters) extends HellaCache(hartid)(p) {
@ -44,9 +54,15 @@ class DCache(hartid: Int, val scratch: () => Option[AddressSet] = () => None)(im
}
class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
// no ECC support
require(cacheParams.tagECC.isInstanceOf[IdentityCode])
require(cacheParams.dataECC.isInstanceOf[IdentityCode])
// no tag ECC support
val tECC = cacheParams.tagECC
val dECC = cacheParams.dataECC
val eccBytes = cacheParams.dataECCBytes
val eccBits = eccBytes * 8
require(tECC.isInstanceOf[IdentityCode])
require(isPow2(eccBytes) && eccBytes <= wordBytes)
require(eccBytes == 1 || !dECC.isInstanceOf[IdentityCode])
val usingRMW = eccBytes > 1 || usingAtomics
// tags
val replacer = cacheParams.replacement
@ -58,6 +74,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val data = Module(new DCacheDataArray)
val dataArb = Module(new Arbiter(new DCacheDataReq, 4))
data.io.req <> dataArb.io.out
data.io.req.bits.wdata := encodeData(dataArb.io.out.bits.wdata(rowBits-1, 0))
dataArb.io.out.ready := true
val rational = p(coreplex.RocketCrossing) match {
@ -65,13 +82,8 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
case _ => false
}
val tl_out_a = Wire(tl_out.a)
val q_depth = if (rational) min(2, maxUncachedInFlight-1) else 0
if (q_depth <= 0) {
tl_out.a <> tl_out_a
} else {
tl_out.a <> Queue(tl_out_a, q_depth, flow = true, pipe = true)
}
val q_depth = if (rational) (2 min maxUncachedInFlight-1) else 0
val tl_out_a = if (q_depth == 0) tl_out.a else Queue(tl_out.a, q_depth, flow = true, pipe = true)
val s1_valid = Reg(next=io.cpu.req.fire(), init=Bool(false))
val s1_probe = Reg(next=tl_out.b.fire(), init=Bool(false))
@ -84,7 +96,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
s1_req := io.cpu.req.bits
s1_req.addr := Cat(io.cpu.req.bits.addr >> untagBits, metaReadArb.io.out.bits.idx, io.cpu.req.bits.addr(blockOffBits-1,0))
}
val s1_read = isRead(s1_req.cmd)
val s1_read = needsRead(s1_req)
val s1_write = isWrite(s1_req.cmd)
val s1_readwrite = s1_read || s1_write
val s1_sfence = s1_req.cmd === M_SFENCE
@ -94,8 +106,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val cached_grant_wait = Reg(init=Bool(false))
val release_ack_wait = Reg(init=Bool(false))
val release_state = Reg(init=s_ready)
val pstore1_valid = Wire(Bool())
val pstore2_valid = Reg(Bool())
val any_pstore_valid = Wire(Bool())
val inWriteback = release_state.isOneOf(s_voluntary_writeback, s_probe_rep_dirty)
val releaseWay = Wire(UInt())
io.cpu.req.ready := (release_state === s_ready) && !cached_grant_wait && !s1_nack
@ -106,11 +117,13 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val uncachedReqs = Seq.fill(maxUncachedInFlight) { Reg(new HellaCacheReq) }
// hit initiation path
dataArb.io.in(3).valid := io.cpu.req.valid && isRead(io.cpu.req.bits.cmd)
val s0_read = needsRead(io.cpu.req.bits)
dataArb.io.in(3).valid := io.cpu.req.valid && s0_read
dataArb.io.in(3).bits.write := false
dataArb.io.in(3).bits.addr := io.cpu.req.bits.addr
dataArb.io.in(3).bits.wordMask := UIntToOH(io.cpu.req.bits.addr.extract(rowOffBits-1,offsetlsb))
dataArb.io.in(3).bits.way_en := ~UInt(0, nWays)
when (!dataArb.io.in(3).ready && isRead(io.cpu.req.bits.cmd)) { io.cpu.req.ready := false }
when (!dataArb.io.in(3).ready && s0_read) { io.cpu.req.ready := false }
metaReadArb.io.in(2).valid := io.cpu.req.valid
metaReadArb.io.in(2).bits.idx := io.cpu.req.bits.addr(idxMSB, idxLSB)
metaReadArb.io.in(2).bits.way_en := ~UInt(0, nWays)
@ -127,7 +140,6 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
tlb.io.req.bits.passthrough := s1_req.phys
tlb.io.req.bits.vaddr := s1_req.addr
tlb.io.req.bits.instruction := false
tlb.io.req.bits.store := s1_write
tlb.io.req.bits.size := s1_req.typ
tlb.io.req.bits.cmd := s1_req.cmd
when (!tlb.io.req.ready && !io.cpu.req.bits.phys) { io.cpu.req.ready := false }
@ -183,13 +195,20 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
val s2_hit_state = RegEnable(s1_hit_state, s1_valid_not_nacked)
val s2_hit_valid = s2_hit_state.isValid()
val (s2_hit, s2_grow_param, s2_new_hit_state) = s2_hit_state.onAccess(s2_req.cmd)
val s2_valid_hit = s2_valid_masked && s2_readwrite && s2_hit
val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_hit && !(pstore1_valid || pstore2_valid) && !release_ack_wait
val s2_data_decoded = decodeData(s2_data)
val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val s2_data_error = needsRead(s2_req) && (s2_data_decoded.map(_.error).grouped(wordBits/eccBits).map(_.reduce(_||_)).toSeq)(s2_word_idx)
val s2_data_corrected = (s2_data_decoded.map(_.corrected): Seq[UInt]).asUInt
val s2_data_uncorrected = (s2_data_decoded.map(_.uncorrected): Seq[UInt]).asUInt
val s2_valid_hit_pre_data_ecc = s2_valid_masked && s2_readwrite && s2_hit
val s2_valid_data_error = s2_valid_hit_pre_data_ecc && s2_data_error
val s2_valid_hit = s2_valid_hit_pre_data_ecc && !s2_data_error
val s2_valid_miss = s2_valid_masked && s2_readwrite && !s2_hit && !any_pstore_valid && !release_ack_wait
val s2_valid_cached_miss = s2_valid_miss && !s2_uncached && !uncachedInFlight.asUInt.orR
val s2_victimize = s2_valid_cached_miss || s2_flush_valid
val s2_victimize = Bool(!usingDataScratchpad) && (s2_valid_cached_miss || s2_valid_data_error || s2_flush_valid)
val s2_valid_uncached = s2_valid_miss && s2_uncached
val s2_victim_way = Mux(s2_hit_valid && !s2_flush_valid, s2_hit_way, UIntToOH(RegEnable(s1_victim_way, s1_valid_not_nacked || s1_flush_valid)))
val s2_victim_tag = RegEnable(s1_victim_meta.tag, s1_valid_not_nacked || s1_flush_valid)
val s2_victim_tag = Mux(s2_valid_data_error, s2_req.addr >> untagBits, RegEnable(s1_victim_meta.tag, s1_valid_not_nacked || s1_flush_valid))
val s2_victim_state = Mux(s2_hit_valid && !s2_flush_valid, s2_hit_state, RegEnable(s1_victim_meta.coh, s1_valid_not_nacked || s1_flush_valid))
val s2_victim_valid = s2_victim_state.isValid()
val (s2_prb_ack_data, s2_report_param, probeNewCoh)= s2_probe_state.onProbe(probe_bits.param)
@ -198,12 +217,6 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
io.cpu.s2_nack := s2_valid && !s2_valid_hit && !(s2_valid_uncached && tl_out_a.ready && !uncachedInFlight.asUInt.andR)
when (io.cpu.s2_nack || (s2_valid_hit && s2_update_meta)) { s1_nack := true }
val s3_valid = Reg(next = s2_valid, init=Bool(false))
val s3_uncached = Reg(next = s2_uncached, init=Bool(false))
when (s2_valid_cached_miss) {
assert( !(s3_valid && s3_uncached) )
}
// load reservations
val s2_lr = Bool(usingAtomics && !usingDataScratchpad) && s2_req.cmd === M_XLR
val s2_sc = Bool(usingAtomics && !usingDataScratchpad) && s2_req.cmd === M_XSC
@ -229,20 +242,24 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
}
// pending store buffer
val s2_correct = s2_data_error && !any_pstore_valid && Bool(usingDataScratchpad)
val s2_valid_correct = s2_valid_hit_pre_data_ecc && s2_correct
val pstore1_cmd = RegEnable(s1_req.cmd, s1_valid_not_nacked && s1_write)
val pstore1_typ = RegEnable(s1_req.typ, s1_valid_not_nacked && s1_write)
val pstore1_addr = RegEnable(s1_paddr, s1_valid_not_nacked && s1_write)
val pstore1_data = RegEnable(io.cpu.s1_data.data, s1_valid_not_nacked && s1_write)
val pstore1_way = RegEnable(s1_hit_way, s1_valid_not_nacked && s1_write)
val pstore1_mask = RegEnable(s1_mask, s1_valid_not_nacked && s1_write)
val pstore1_storegen_data = Wire(init = pstore1_data)
val pstore1_amo = Bool(usingAtomics) && isRead(pstore1_cmd)
val pstore_drain_structural = pstore1_valid && pstore2_valid && ((s1_valid && s1_write) || pstore1_amo)
val pstore_drain_opportunistic = !(io.cpu.req.valid && isRead(io.cpu.req.bits.cmd))
val pstore1_rmw = Bool(usingRMW) && RegEnable(s1_read, s1_valid_not_nacked && s1_write)
val pstore1_valid = Wire(Bool())
val pstore2_valid = Reg(Bool())
any_pstore_valid := pstore1_valid || pstore2_valid
val pstore_drain_structural = pstore1_valid && pstore2_valid && ((s1_valid && s1_write) || pstore1_rmw)
val pstore_drain_opportunistic = !(io.cpu.req.valid && s0_read)
val pstore_drain_on_miss = releaseInFlight || io.cpu.s2_nack
val pstore_drain =
Bool(usingAtomics) && pstore_drain_structural ||
(((pstore1_valid && !pstore1_amo) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss))
Bool(usingRMW) && pstore_drain_structural ||
(((pstore1_valid && !pstore1_rmw) || pstore2_valid) && (pstore_drain_opportunistic || pstore_drain_on_miss))
pstore1_valid := {
val s2_store_valid = s2_valid_hit && s2_write && !s2_sc_fail
val pstore1_held = Reg(Bool())
@ -250,25 +267,27 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
pstore1_held := (s2_store_valid || pstore1_held) && pstore2_valid && !pstore_drain
s2_store_valid || pstore1_held
}
val advance_pstore1 = pstore1_valid && (pstore2_valid === pstore_drain)
val advance_pstore1 = (pstore1_valid || s2_valid_correct) && (pstore2_valid === pstore_drain)
pstore2_valid := pstore2_valid && !pstore_drain || advance_pstore1
val pstore2_addr = RegEnable(pstore1_addr, advance_pstore1)
val pstore2_way = RegEnable(pstore1_way, advance_pstore1)
val pstore2_addr = RegEnable(Mux(s2_correct, s2_req.addr, pstore1_addr), advance_pstore1)
val pstore2_way = RegEnable(Mux(s2_correct, s2_hit_way, pstore1_way), advance_pstore1)
val pstore2_storegen_data = RegEnable(pstore1_storegen_data, advance_pstore1)
val pstore2_storegen_mask = RegEnable(pstore1_mask, advance_pstore1)
val pstore2_storegen_mask = RegEnable(~Mux(s2_correct, 0.U, ~pstore1_mask), advance_pstore1)
dataArb.io.in(0).valid := pstore_drain
dataArb.io.in(0).bits.write := true
dataArb.io.in(0).bits.addr := Mux(pstore2_valid, pstore2_addr, pstore1_addr)
dataArb.io.in(0).bits.way_en := Mux(pstore2_valid, pstore2_way, pstore1_way)
dataArb.io.in(0).bits.wdata := Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_storegen_data))
val pstore_mask_shift = Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1,offsetlsb) << wordOffBits
dataArb.io.in(0).bits.wmask := Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask) << pstore_mask_shift
dataArb.io.in(0).bits.wdata := Fill(rowWords, Mux(pstore2_valid, pstore2_storegen_data, pstore1_data))
dataArb.io.in(0).bits.wordMask := UIntToOH(Mux(pstore2_valid, pstore2_addr, pstore1_addr).extract(rowOffBits-1,offsetlsb))
dataArb.io.in(0).bits.eccMask := eccMask(Mux(pstore2_valid, pstore2_storegen_mask, pstore1_mask))
// store->load RAW hazard detection
val s1_idx = s1_req.addr(idxMSB, wordOffBits)
def s1Depends(addr: UInt, mask: UInt) =
addr(idxMSB, wordOffBits) === s1_req.addr(idxMSB, wordOffBits) &&
Mux(s1_write, (eccByteMask(mask) & eccByteMask(s1_mask)).orR, (mask & s1_mask).orR)
val s1_raw_hazard = s1_read &&
((pstore1_valid && pstore1_addr(idxMSB, wordOffBits) === s1_idx && (pstore1_mask & s1_mask).orR) ||
(pstore2_valid && pstore2_addr(idxMSB, wordOffBits) === s1_idx && (pstore2_storegen_mask & s1_mask).orR))
((pstore1_valid && s1Depends(pstore1_addr, pstore1_mask)) ||
(pstore2_valid && s1Depends(pstore2_addr, pstore2_storegen_mask)))
when (s1_valid && s1_raw_hazard) { s1_nack := true }
metaWriteArb.io.in(0).valid := (s2_valid_hit && s2_update_meta) || (s2_victimize && !s2_victim_dirty)
@ -303,13 +322,13 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
M_XA_MAXU -> edge.Arithmetic(a_source, access_address, a_size, a_data, TLAtomics.MAXU)._2))
} else {
// If no managers support atomics, assert fail if processor asks for them
assert (!(tl_out_a.valid && pstore1_amo && s2_write && s2_uncached))
assert (!(tl_out_a.valid && s2_read && s2_write && s2_uncached))
Wire(new TLBundleA(edge.bundle))
}
tl_out_a.valid := (s2_valid_cached_miss && !s2_victim_dirty) ||
(s2_valid_uncached && !uncachedInFlight.asUInt.andR)
tl_out_a.bits := Mux(!s2_uncached, acquire, Mux(!s2_write, get, Mux(!pstore1_amo, put, atomics)))
tl_out_a.bits := Mux(!s2_uncached, acquire, Mux(!s2_write, get, Mux(!s2_read, put, atomics)))
// Set pending bits for outstanding TileLink transaction
val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset
@ -357,7 +376,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
}
}
when (grantIsUncachedData) {
s2_data := tl_out.d.bits.data
s2_data := dummyEncodeData(tl_out.d.bits.data)
s2_req.cmd := M_XRD
s2_req.typ := req.typ
s2_req.tag := req.tag
@ -378,7 +397,8 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
dataArb.io.in(1).bits.addr := s2_req_block_addr | d_address_inc
dataArb.io.in(1).bits.way_en := s2_victim_way
dataArb.io.in(1).bits.wdata := tl_out.d.bits.data
dataArb.io.in(1).bits.wmask := ~UInt(0, rowBytes)
dataArb.io.in(1).bits.wordMask := ~UInt(0, rowBytes / wordBytes)
dataArb.io.in(1).bits.eccMask := ~UInt(0, wordBytes / eccBytes)
// tag updates on refill
metaWriteArb.io.in(1).valid := grantIsCached && d_done
assert(!metaWriteArb.io.in(1).valid || metaWriteArb.io.in(1).ready)
@ -429,7 +449,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
toAddress = probe_bits.address,
lgSize = lgCacheBlockBytes,
shrinkPermissions = s2_shrink_param,
data = s2_data)._2
data = 0.U)._2
} else {
Wire(new TLBundleC(edge.bundle))
}
@ -441,7 +461,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
edge.ProbeAck(
b = probe_bits,
reportPermissions = s2_report_param,
data = s2_data))
data = 0.U))
tl_out.c.valid := s2_release_data_valid
tl_out.c.bits := nackResponseMessage
@ -449,7 +469,7 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
releaseWay := s2_probe_way
when (s2_victimize && s2_victim_dirty) {
assert(!(s2_valid && s2_hit_valid))
assert(!(s2_valid && s2_hit_valid && !s2_data_error))
release_state := s_voluntary_writeback
probe_bits.address := Cat(s2_victim_tag, s2_req.addr(idxMSB, idxLSB)) << idxLSB
}
@ -478,11 +498,12 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
}
when (s2_probe && !tl_out.c.fire()) { s1_nack := true }
tl_out.c.bits.address := probe_bits.address
tl_out.c.bits.data := s2_data
tl_out.c.bits.data := s2_data_corrected
dataArb.io.in(2).valid := inWriteback && releaseDataBeat < refillCycles
dataArb.io.in(2).bits.write := false
dataArb.io.in(2).bits.addr := tl_out.c.bits.address | (releaseDataBeat(log2Up(refillCycles)-1,0) << rowOffBits)
dataArb.io.in(2).bits.wordMask := ~UInt(0, rowBytes / wordBytes)
dataArb.io.in(2).bits.way_en := ~UInt(0, nWays)
metaWriteArb.io.in(2).valid := release_state.isOneOf(s_voluntary_write_meta, s_probe_write_meta)
@ -514,8 +535,8 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
}
// load data subword mux/sign extension
val s2_word_idx = s2_req.addr.extract(log2Up(rowBits/8)-1, log2Up(wordBytes))
val s2_data_word = s2_data >> Cat(s2_word_idx, UInt(0, log2Up(coreDataBits)))
val s2_data_word = ((0 until rowBits by wordBits).map(i => s2_data_uncorrected(wordBits+i-1,i)): Seq[UInt])(s2_word_idx)
val s2_data_word_corrected = ((0 until rowBits by wordBits).map(i => s2_data_corrected(wordBits+i-1,i)): Seq[UInt])(s2_word_idx)
val loadgen = new LoadGen(s2_req.typ, mtSigned(s2_req.typ), s2_req.addr, s2_data_word, s2_sc, wordBytes)
io.cpu.resp.bits.data := loadgen.data | s2_sc_fail
io.cpu.resp.bits.data_word_bypass := loadgen.wordData
@ -523,16 +544,17 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
io.cpu.resp.bits.store_data := pstore1_data
// AMOs
if (usingAtomics) {
if (usingRMW) {
val amoalu = Module(new AMOALU(xLen))
amoalu.io.mask := pstore1_mask
amoalu.io.cmd := pstore1_cmd
amoalu.io.cmd := (if (usingAtomics) pstore1_cmd else M_XWR)
amoalu.io.lhs := s2_data_word
amoalu.io.rhs := pstore1_data
pstore1_storegen_data := amoalu.io.out
} else {
assert(!(s1_valid_masked && s1_read && s1_write), "unsupported D$ operation")
}
when (s2_correct) { pstore1_storegen_data := s2_data_word_corrected }
// flushes
val flushed = Reg(init=Bool(true))
@ -565,4 +587,14 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) {
io.cpu.perf.acquire := edge.done(tl_out_a)
io.cpu.perf.release := edge.done(tl_out.c)
io.cpu.perf.tlbMiss := io.ptw.req.fire()
def encodeData(x: UInt) = x.grouped(eccBits).map(dECC.encode(_)).asUInt
def dummyEncodeData(x: UInt) = x.grouped(eccBits).map(dECC.swizzle(_)).asUInt
def decodeData(x: UInt) = x.grouped(dECC.width(eccBits)).map(dECC.decode(_))
def eccMask(byteMask: UInt) = byteMask.grouped(eccBytes).map(_.orR).asUInt
def eccByteMask(byteMask: UInt) = FillInterleaved(eccBytes, eccMask(byteMask))
def needsRead(req: HellaCacheReq) =
isRead(req.cmd) ||
(isWrite(req.cmd) && (req.cmd === M_PWR || mtSize(req.typ) < log2Ceil(eccBytes)))
}

View File

@ -18,13 +18,21 @@ class FrontendReq(implicit p: Parameters) extends CoreBundle()(p) {
val speculative = Bool()
}
class FrontendExceptions extends Bundle {
val pf = new Bundle {
val inst = Bool()
}
val ae = new Bundle {
val inst = Bool()
}
}
class FrontendResp(implicit p: Parameters) extends CoreBundle()(p) {
val btb = Valid(new BTBResp)
val pc = UInt(width = vaddrBitsExtended) // ID stage PC
val data = UInt(width = fetchWidth * coreInstBits)
val mask = Bits(width = fetchWidth)
val pf = Bool()
val ae = Bool()
val xcpt = new FrontendExceptions
val replay = Bool()
}
@ -82,12 +90,8 @@ class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
val s2_pc = Reg(init=io.resetVector)
val s2_btb_resp_valid = Reg(init=Bool(false))
val s2_btb_resp_bits = Reg(new BTBResp)
val s2_maybe_pf = Reg(Bool())
val s2_maybe_ae = Reg(Bool())
val s2_tlb_miss = Reg(Bool())
val s2_pf = s2_maybe_pf && !s2_tlb_miss
val s2_ae = s2_maybe_ae && !s2_tlb_miss
val s2_xcpt = s2_pf || s2_ae
val s2_tlb_resp = Reg(tlb.io.resp)
val s2_xcpt = !s2_tlb_resp.miss && fq.io.enq.bits.xcpt.asUInt.orR
val s2_speculative = Reg(init=Bool(false))
val fetchBytes = coreInstBytes * fetchWidth
@ -113,9 +117,7 @@ class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
s2_valid := true
s2_pc := s1_pc
s2_speculative := s1_speculative
s2_maybe_pf := tlb.io.resp.pf.inst
s2_maybe_ae := tlb.io.resp.ae.inst
s2_tlb_miss := tlb.io.resp.miss
s2_tlb_resp := tlb.io.resp
}
if (usingBTB) {
@ -149,7 +151,6 @@ class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
tlb.io.req.bits.vaddr := s1_pc
tlb.io.req.bits.passthrough := Bool(false)
tlb.io.req.bits.instruction := Bool(true)
tlb.io.req.bits.store := Bool(false)
tlb.io.req.bits.sfence := io.cpu.sfence
tlb.io.req.bits.size := log2Ceil(coreInstBytes*fetchWidth)
@ -160,7 +161,7 @@ class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
icache.io.s1_paddr := tlb.io.resp.paddr
icache.io.s2_vaddr := s2_pc
icache.io.s1_kill := io.cpu.req.valid || tlb.io.resp.miss || s2_replay
icache.io.s2_kill := s2_valid && (s2_speculative || s2_xcpt)
icache.io.s2_kill := s2_valid && (s2_speculative && !s2_tlb_resp.cacheable || s2_xcpt)
fq.io.enq.valid := s2_valid && (icache.io.resp.valid || icache.io.s2_kill)
fq.io.enq.bits.pc := s2_pc
@ -168,8 +169,7 @@ class FrontendModule(outer: Frontend) extends LazyModuleImp(outer)
fq.io.enq.bits.data := icache.io.resp.bits
fq.io.enq.bits.mask := UInt((1 << fetchWidth)-1) << s2_pc.extract(log2Ceil(fetchWidth)+log2Ceil(coreInstBytes)-1, log2Ceil(coreInstBytes))
fq.io.enq.bits.pf := s2_pf
fq.io.enq.bits.ae := s2_ae
fq.io.enq.bits.xcpt := s2_tlb_resp
fq.io.enq.bits.replay := icache.io.s2_kill && !icache.io.resp.valid && !s2_xcpt
fq.io.enq.bits.btb.valid := s2_btb_resp_valid
fq.io.enq.bits.btb.bits := s2_btb_resp_bits

View File

@ -22,6 +22,7 @@ case class DCacheParams(
nTLBEntries: Int = 32,
tagECC: Code = new IdentityCode,
dataECC: Code = new IdentityCode,
dataECCBytes: Int = 1,
nMSHRs: Int = 1,
nSDQ: Int = 17,
nRPQ: Int = 16,

View File

@ -9,10 +9,8 @@ import tile._
import util._
class Instruction(implicit val p: Parameters) extends ParameterizedBundle with HasCoreParameters {
val pf0 = Bool() // page fault on first half of instruction
val pf1 = Bool() // page fault on second half of instruction
val ae0 = Bool() // access exception on first half of instruction
val ae1 = Bool() // access exception on second half of instruction
val xcpt0 = new FrontendExceptions // exceptions on first half of instruction
val xcpt1 = new FrontendExceptions // exceptions on second half of instruction
val replay = Bool()
val btb_hit = Bool()
val rvc = Bool()
@ -80,8 +78,7 @@ class IBuf(implicit p: Parameters) extends CoreModule {
val valid = (UIntToOH(nValid) - 1)(fetchWidth-1, 0)
val bufMask = UIntToOH(nBufValid) - 1
val pf = valid & (Mux(buf.pf, bufMask, UInt(0)) | Mux(io.imem.bits.pf, ~bufMask, UInt(0)))
val ae = valid & (Mux(buf.ae, bufMask, UInt(0)) | Mux(io.imem.bits.ae, ~bufMask, UInt(0)))
val xcpt = (0 until bufMask.getWidth).map(i => Mux(bufMask(i), buf.xcpt, io.imem.bits.xcpt))
val ic_replay = valid & (Mux(buf.replay, bufMask, UInt(0)) | Mux(io.imem.bits.replay, ~bufMask, UInt(0)))
val ibufBTBHitMask = Mux(ibufBTBHit, UIntToOH(ibufBTBResp.bridx), UInt(0))
assert(!io.imem.valid || !io.imem.bits.btb.valid || io.imem.bits.btb.bits.bridx >= pcWordBits)
@ -100,11 +97,9 @@ class IBuf(implicit p: Parameters) extends CoreModule {
if (usingCompressed) {
val replay = ic_replay(j) || (!exp.io.rvc && (btbHitMask(j) || ic_replay(j+1)))
io.inst(i).valid := valid(j) && (exp.io.rvc || valid(j+1) || pf(j+1) || ae(j+1) || replay)
io.inst(i).bits.pf0 := pf(j)
io.inst(i).bits.pf1 := !exp.io.rvc && pf(j+1)
io.inst(i).bits.ae0 := ae(j)
io.inst(i).bits.ae1 := !exp.io.rvc && ae(j+1)
io.inst(i).valid := valid(j) && (exp.io.rvc || valid(j+1) || xcpt(j+1).asUInt.orR || replay)
io.inst(i).bits.xcpt0 := xcpt(j)
io.inst(i).bits.xcpt1 := Mux(exp.io.rvc, 0.U, xcpt(j+1).asUInt).asTypeOf(new FrontendExceptions)
io.inst(i).bits.replay := replay
io.inst(i).bits.btb_hit := btbHitMask(j) || (!exp.io.rvc && btbHitMask(j+1))
io.inst(i).bits.rvc := exp.io.rvc
@ -115,10 +110,8 @@ class IBuf(implicit p: Parameters) extends CoreModule {
} else {
when (io.inst(i).ready) { nReady := i+1 }
io.inst(i).valid := valid(i)
io.inst(i).bits.pf0 := pf(i)
io.inst(i).bits.pf1 := false
io.inst(i).bits.ae0 := ae(i)
io.inst(i).bits.ae1 := false
io.inst(i).bits.xcpt0 := xcpt(i)
io.inst(i).bits.xcpt1 := 0.U.asTypeOf(new FrontendExceptions)
io.inst(i).bits.replay := ic_replay(i)
io.inst(i).bits.rvc := false
io.inst(i).bits.btb_hit := btbHitMask(i)

View File

@ -712,7 +712,6 @@ class NonBlockingDCacheModule(outer: NonBlockingDCache) extends HellaCacheModule
dtlb.io.req.bits.passthrough := s1_req.phys
dtlb.io.req.bits.vaddr := s1_req.addr
dtlb.io.req.bits.instruction := Bool(false)
dtlb.io.req.bits.store := s1_write
dtlb.io.req.bits.size := s1_req.typ
dtlb.io.req.bits.cmd := s1_req.cmd
when (!dtlb.io.req.ready && !io.cpu.req.bits.phys) { io.cpu.req.ready := Bool(false) }

View File

@ -231,14 +231,16 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
bpu.io.pc := ibuf.io.pc
bpu.io.ea := mem_reg_wdata
val id_xcpt_pf = ibuf.io.inst(0).bits.pf0 || ibuf.io.inst(0).bits.pf1
val id_xcpt_ae = ibuf.io.inst(0).bits.ae0 || ibuf.io.inst(0).bits.ae1
val id_xcpt0 = ibuf.io.inst(0).bits.xcpt0
val id_xcpt1 = ibuf.io.inst(0).bits.xcpt1
val (id_xcpt, id_cause) = checkExceptions(List(
(csr.io.interrupt, csr.io.interrupt_cause),
(bpu.io.debug_if, UInt(CSR.debugTriggerCause)),
(bpu.io.xcpt_if, UInt(Causes.breakpoint)),
(id_xcpt_pf, UInt(Causes.fetch_page_fault)),
(id_xcpt_ae, UInt(Causes.fetch_access)),
(id_xcpt0.pf.inst, UInt(Causes.fetch_page_fault)),
(id_xcpt0.ae.inst, UInt(Causes.fetch_access)),
(id_xcpt1.pf.inst, UInt(Causes.fetch_page_fault)),
(id_xcpt1.ae.inst, UInt(Causes.fetch_access)),
(id_illegal_insn, UInt(Causes.illegal_instruction))))
val dcache_bypass_data =
@ -305,15 +307,15 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
ex_ctrl.alu_dw := DW_XPR
ex_ctrl.sel_alu1 := A1_RS1 // badaddr := instruction
ex_ctrl.sel_alu2 := A2_ZERO
when (bpu.io.xcpt_if || id_xcpt_pf || id_xcpt_ae) { // badaddr := PC
when (id_xcpt1.asUInt.orR) { // badaddr := PC+2
ex_ctrl.sel_alu1 := A1_PC
}
val pf_second = !ibuf.io.inst(0).bits.pf0 && ibuf.io.inst(0).bits.pf1
val ae_second = !ibuf.io.inst(0).bits.ae0 && ibuf.io.inst(0).bits.ae1
when (!bpu.io.xcpt_if && (pf_second || (!id_xcpt_pf && ae_second))) { // badaddr := PC+2
ex_ctrl.sel_alu2 := A2_SIZE
ex_reg_rvc := true
}
when (bpu.io.xcpt_if || id_xcpt0.asUInt.orR) { // badaddr := PC
ex_ctrl.sel_alu1 := A1_PC
ex_ctrl.sel_alu2 := A2_ZERO
}
}
ex_reg_flush_pipe := id_ctrl.fence_i || id_csr_flush
ex_reg_load_use := id_load_use
@ -354,6 +356,7 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
val ctrl_killx = take_pc_mem_wb || replay_ex || !ex_reg_valid
// detect 2-cycle load-use delay for LB/LH/SC
val ex_slow_bypass = ex_ctrl.mem_cmd === M_XSC || Vec(MT_B, MT_BU, MT_H, MT_HU).contains(ex_ctrl.mem_type)
val ex_sfence = Bool(usingVM) && ex_ctrl.mem && ex_ctrl.mem_cmd === M_SFENCE
val (ex_xcpt, ex_cause) = checkExceptions(List(
(ex_reg_xcpt_interrupt || ex_reg_xcpt, ex_reg_cause)))
@ -385,7 +388,7 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
mem_reg_rvc := ex_reg_rvc
mem_reg_load := ex_ctrl.mem && isRead(ex_ctrl.mem_cmd)
mem_reg_store := ex_ctrl.mem && isWrite(ex_ctrl.mem_cmd)
mem_reg_sfence := Bool(usingVM) && ex_ctrl.mem && ex_ctrl.mem_cmd === M_SFENCE
mem_reg_sfence := ex_sfence
mem_reg_btb_hit := ex_reg_btb_hit
when (ex_reg_btb_hit) { mem_reg_btb_resp := ex_reg_btb_resp }
mem_reg_flush_pipe := ex_reg_flush_pipe
@ -395,7 +398,7 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
mem_reg_inst := ex_reg_inst
mem_reg_pc := ex_reg_pc
mem_reg_wdata := alu.io.out
when (ex_ctrl.rxs2 && (ex_ctrl.mem || ex_ctrl.rocc)) {
when (ex_ctrl.rxs2 && (ex_ctrl.mem || ex_ctrl.rocc || ex_sfence)) {
val typ = Mux(ex_ctrl.rocc, log2Ceil(xLen/8).U, ex_ctrl.mem_type)
mem_reg_rs2 := new uncore.util.StoreGen(typ, 0.U, ex_rs(1), coreDataBytes).data
}
@ -429,7 +432,7 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
wb_reg_rvc := mem_reg_rvc
wb_reg_sfence := mem_reg_sfence
wb_reg_wdata := Mux(!mem_reg_xcpt && mem_ctrl.fp && mem_ctrl.wxd, io.fpu.toint_data, mem_int_wdata)
when (mem_ctrl.rocc) {
when (mem_ctrl.rocc || mem_reg_sfence) {
wb_reg_rs2 := mem_reg_rs2
}
wb_reg_cause := mem_cause
@ -673,7 +676,7 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p)
else {
printf("C%d: %d [%d] pc=[%x] W[r%d=%x][%d] R[r%d=%x] R[r%d=%x] inst=[%x] DASM(%x)\n",
io.hartid, csr.io.time(31,0), wb_valid, wb_reg_pc,
Mux(rf_wen, rf_waddr, UInt(0)), rf_wdata, rf_wen,
Mux(rf_wen && !(wb_set_sboard && wb_wen), rf_waddr, UInt(0)), rf_wdata, rf_wen,
wb_reg_inst(19,15), Reg(next=Reg(next=ex_rs(0))),
wb_reg_inst(24,20), Reg(next=Reg(next=ex_rs(1))),
wb_reg_inst, wb_reg_inst)

View File

@ -27,7 +27,6 @@ class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) {
val vaddr = UInt(width = vaddrBitsExtended)
val passthrough = Bool()
val instruction = Bool()
val store = Bool()
val sfence = Valid(new SFenceReq)
val size = UInt(width = log2Ceil(lgMaxSize + 1))
val cmd = Bits(width = M_SZ)
@ -80,6 +79,7 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
val totalEntries = nEntries + 1
val normalEntries = nEntries
val specialEntry = nEntries
val aeEntry = specialEntry - (1 << log2Floor(nEntries))
val valid = Reg(init = UInt(0, totalEntries))
val reg_entries = Reg(Vec(totalEntries, UInt(width = new Entry().getWidth)))
val entries = reg_entries.map(_.asTypeOf(new Entry))
@ -118,7 +118,6 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
val prot_aa = fastCheck(_.supportsArithmetic) || cacheable
val prot_x = fastCheck(_.executable) && pmp.io.x
val prot_eff = fastCheck(Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains _.regionType)
val isSpecial = !(io.ptw.resp.bits.homogeneous || io.ptw.resp.bits.ae)
val lookup_tag = Cat(io.ptw.ptbr.asid, vpn(vpnBits-1,0))
val hitsVec = (0 until totalEntries).map { i => vm_enabled && {
@ -141,7 +140,7 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
// permission bit arrays
when (do_refill && !invalidate_refill) {
val waddr = Mux(isSpecial, specialEntry.U, r_refill_waddr)
val waddr = Mux(io.ptw.resp.bits.ae, aeEntry.U, Mux(!io.ptw.resp.bits.homogeneous, specialEntry.U, r_refill_waddr))
val pte = io.ptw.resp.bits.pte
val newEntry = Wire(new Entry)
newEntry.ppn := pte.ppn
@ -150,13 +149,10 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
newEntry.c := cacheable
newEntry.u := pte.u
newEntry.g := pte.g
// if an access exception occurs during PTW, pretend the page has full
// permissions so that a page fault will not occur, but clear the
// phyiscal memory permissions, so that an access exception will occur.
newEntry.ae := io.ptw.resp.bits.ae
newEntry.sr := pte.sr() || io.ptw.resp.bits.ae
newEntry.sw := pte.sw() || io.ptw.resp.bits.ae
newEntry.sx := pte.sx() || io.ptw.resp.bits.ae
newEntry.sr := pte.sr()
newEntry.sw := pte.sw()
newEntry.sx := pte.sx()
newEntry.pr := prot_r && !io.ptw.resp.bits.ae
newEntry.pw := prot_w && !io.ptw.resp.bits.ae
newEntry.px := prot_x && !io.ptw.resp.bits.ae
@ -171,10 +167,12 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
val plru = new PseudoLRU(normalEntries)
val repl_waddr = Mux(!valid(normalEntries-1, 0).andR, PriorityEncoder(~valid(normalEntries-1, 0)), plru.replace)
val priv_ok = entries.map(_.ae).asUInt | Mux(priv_s, ~Mux(io.ptw.status.sum, UInt(0), entries.map(_.u).asUInt), entries.map(_.u).asUInt)
val r_array = Cat(true.B, priv_ok & (entries.map(_.sr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.sx).asUInt, UInt(0))))
val w_array = Cat(true.B, priv_ok & entries.map(_.sw).asUInt)
val x_array = Cat(true.B, priv_ok & entries.map(_.sx).asUInt)
val ptw_ae_array = entries(aeEntry).ae << aeEntry
val priv_rw_ok = Mux(!priv_s || io.ptw.status.sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U)
val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt)
val r_array = Cat(true.B, priv_rw_ok & (entries.map(_.sr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.sx).asUInt, UInt(0))))
val w_array = Cat(true.B, priv_rw_ok & entries.map(_.sw).asUInt)
val x_array = Cat(true.B, priv_x_ok & entries.map(_.sx).asUInt)
val pr_array = Cat(Fill(2, prot_r), entries.init.map(_.pr).asUInt)
val pw_array = Cat(Fill(2, prot_w), entries.init.map(_.pw).asUInt)
val px_array = Cat(Fill(2, prot_x), entries.init.map(_.px).asUInt)
@ -199,8 +197,9 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
Mux(Bool(usingAtomics) && isAMOArithmetic(io.req.bits.cmd), ~paa_array, 0.U)
val ma_ld_array = Mux(misaligned && isRead(io.req.bits.cmd), ~eff_array, 0.U)
val ma_st_array = Mux(misaligned && isWrite(io.req.bits.cmd), ~eff_array, 0.U)
val pf_ld_array = Mux(isRead(io.req.bits.cmd), ~r_array, 0.U)
val pf_st_array = Mux(isWrite(io.req.bits.cmd), ~w_array, 0.U)
val pf_ld_array = Mux(isRead(io.req.bits.cmd), ~(r_array | ptw_ae_array), 0.U)
val pf_st_array = Mux(isWrite(io.req.bits.cmd), ~(w_array | ptw_ae_array), 0.U)
val pf_inst_array = ~(x_array | ptw_ae_array)
val tlb_hit = hits(totalEntries-1, 0).orR
val tlb_miss = vm_enabled && !bad_va && !tlb_hit && !io.req.bits.sfence.valid
@ -218,7 +217,7 @@ class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters
io.req.ready := state === s_ready
io.resp.pf.ld := (bad_va && isRead(io.req.bits.cmd)) || (pf_ld_array & hits).orR
io.resp.pf.st := (bad_va && isWrite(io.req.bits.cmd)) || (pf_st_array & hits).orR
io.resp.pf.inst := bad_va || (~x_array & hits).orR
io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR
io.resp.ae.ld := (ae_ld_array & hits).orR
io.resp.ae.st := (ae_st_array & hits).orR
io.resp.ae.inst := (~px_array & hits).orR

View File

@ -19,12 +19,20 @@ abstract class Code
def width(w0: Int): Int
def encode(x: UInt): UInt
def decode(x: UInt): Decoding
/** Copy the bits in x to the right bit positions in an encoded word,
* so that x === decode(swizzle(x)).uncorrected; but don't generate
* the other code bits, so decode(swizzle(x)).error might be true.
* For codes for which this operation is not trivial, throw an
* UnsupportedOperationException. */
def swizzle(x: UInt): UInt
}
class IdentityCode extends Code
{
def width(w0: Int) = w0
def encode(x: UInt) = x
def swizzle(x: UInt) = x
def decode(y: UInt) = new Decoding {
def uncorrected = y
def corrected = y
@ -37,6 +45,7 @@ class ParityCode extends Code
{
def width(w0: Int) = w0+1
def encode(x: UInt) = Cat(x.xorR, x)
def swizzle(x: UInt) = Cat(false.B, x)
def decode(y: UInt) = new Decoding {
val uncorrected = y(y.getWidth-2,0)
val corrected = uncorrected
@ -66,6 +75,11 @@ class SECCode extends Code
}
y.asUInt
}
def swizzle(x: UInt) = {
val y = for (i <- 1 to width(x.getWidth))
yield (if (isPow2(i)) false.B else x(mapping(i)))
y.asUInt
}
def decode(y: UInt) = new Decoding {
val n = y.getWidth
require(n > 0 && !isPow2(n))
@ -93,6 +107,7 @@ class SECDEDCode extends Code
def width(k: Int) = sec.width(k)+1
def encode(x: UInt) = par.encode(sec.encode(x))
def swizzle(x: UInt) = par.swizzle(sec.swizzle(x))
def decode(x: UInt) = new Decoding {
val secdec = sec.decode(x(x.getWidth-2,0))
val pardec = par.decode(x)

View File

@ -57,6 +57,9 @@ package object util {
else x(hi, lo)
}
def grouped(width: Int): Seq[UInt] =
(0 until x.getWidth by width).map(base => x(base + width - 1, base))
def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds
}