2016-11-28 01:16:37 +01:00
|
|
|
// See LICENSE.SiFive for license details.
|
|
|
|
// See LICENSE.Berkeley for license details.
|
2014-09-13 03:06:41 +02:00
|
|
|
|
2012-10-10 06:35:03 +02:00
|
|
|
package rocket
|
|
|
|
|
2013-07-24 05:26:17 +02:00
|
|
|
import Chisel._
|
2016-09-29 01:10:32 +02:00
|
|
|
import Chisel.ImplicitConversions._
|
2016-11-18 23:05:14 +01:00
|
|
|
import config._
|
2016-11-21 20:48:10 +01:00
|
|
|
import diplomacy._
|
2017-02-09 22:59:09 +01:00
|
|
|
import coreplex.CacheBlockBytes
|
|
|
|
import tile.{XLen, CoreModule, CoreBundle}
|
2016-11-21 20:48:10 +01:00
|
|
|
import uncore.tilelink2._
|
2017-04-13 06:49:37 +02:00
|
|
|
import uncore.constants._
|
2017-02-09 22:59:09 +01:00
|
|
|
import util._
|
2012-10-10 06:35:03 +02:00
|
|
|
|
2017-01-17 03:24:08 +01:00
|
|
|
case object PAddrBits extends Field[Int]
|
2016-08-17 05:04:02 +02:00
|
|
|
case object PgLevels extends Field[Int]
|
|
|
|
case object ASIdBits extends Field[Int]
|
2015-02-02 05:04:13 +01:00
|
|
|
|
2017-03-14 21:54:49 +01:00
|
|
|
class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) {
|
|
|
|
val rs1 = Bool()
|
|
|
|
val rs2 = Bool()
|
|
|
|
val asid = UInt(width = asIdBits max 1) // TODO zero-width
|
|
|
|
}
|
|
|
|
|
2017-03-15 23:18:56 +01:00
|
|
|
class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) {
|
2017-03-13 04:42:51 +01:00
|
|
|
val vaddr = UInt(width = vaddrBitsExtended)
|
2012-11-06 17:13:44 +01:00
|
|
|
val passthrough = Bool()
|
2012-10-10 06:35:03 +02:00
|
|
|
val instruction = Bool()
|
2015-03-14 10:49:07 +01:00
|
|
|
val store = Bool()
|
2017-03-14 21:54:49 +01:00
|
|
|
val sfence = Valid(new SFenceReq)
|
2017-03-15 23:18:56 +01:00
|
|
|
val size = UInt(width = log2Ceil(lgMaxSize + 1))
|
2017-04-13 06:49:37 +02:00
|
|
|
val cmd = Bits(width = M_SZ)
|
2017-03-15 23:18:56 +01:00
|
|
|
|
|
|
|
override def cloneType = new TLBReq(lgMaxSize).asInstanceOf[this.type]
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
|
|
|
|
2017-03-27 03:18:35 +02:00
|
|
|
class TLBExceptions extends Bundle {
|
|
|
|
val ld = Bool()
|
|
|
|
val st = Bool()
|
|
|
|
val inst = Bool()
|
|
|
|
}
|
|
|
|
|
2016-07-02 23:26:05 +02:00
|
|
|
class TLBResp(implicit p: Parameters) extends CoreBundle()(p) {
|
2012-10-10 06:35:03 +02:00
|
|
|
// lookup responses
|
2017-03-27 03:18:35 +02:00
|
|
|
val miss = Bool()
|
|
|
|
val paddr = UInt(width = paddrBits)
|
|
|
|
val pf = new TLBExceptions
|
|
|
|
val ae = new TLBExceptions
|
2017-04-13 06:49:37 +02:00
|
|
|
val ma = new TLBExceptions
|
2017-03-27 03:18:35 +02:00
|
|
|
val cacheable = Bool()
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
|
|
|
|
2017-03-23 03:25:02 +01:00
|
|
|
class TLB(lgMaxSize: Int, nEntries: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) {
|
2012-10-10 06:35:03 +02:00
|
|
|
val io = new Bundle {
|
2017-03-15 23:18:56 +01:00
|
|
|
val req = Decoupled(new TLBReq(lgMaxSize)).flip
|
2017-03-27 03:18:35 +02:00
|
|
|
val resp = new TLBResp().asOutput
|
2013-01-07 22:38:59 +01:00
|
|
|
val ptw = new TLBPTWIO
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
2017-03-23 03:25:02 +01:00
|
|
|
|
|
|
|
class Entry extends Bundle {
|
|
|
|
val ppn = UInt(width = ppnBits)
|
|
|
|
val tag = UInt(width = asIdBits + vpnBits)
|
|
|
|
val level = UInt(width = log2Ceil(pgLevels))
|
|
|
|
val u = Bool()
|
|
|
|
val g = Bool()
|
2017-03-29 18:49:31 +02:00
|
|
|
val ae = Bool()
|
2017-03-23 03:25:02 +01:00
|
|
|
val sw = Bool()
|
|
|
|
val sx = Bool()
|
|
|
|
val sr = Bool()
|
2017-03-27 03:18:35 +02:00
|
|
|
val pw = Bool()
|
|
|
|
val px = Bool()
|
|
|
|
val pr = Bool()
|
2017-04-13 06:49:37 +02:00
|
|
|
val pal = Bool() // AMO logical
|
|
|
|
val paa = Bool() // AMO arithmetic
|
2017-04-15 03:22:12 +02:00
|
|
|
val eff = Bool() // get/put effects
|
2017-03-27 03:18:35 +02:00
|
|
|
val c = Bool()
|
2017-03-23 03:25:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
val totalEntries = nEntries + 1
|
|
|
|
val normalEntries = nEntries
|
|
|
|
val specialEntry = nEntries
|
2017-03-13 04:42:51 +01:00
|
|
|
val valid = Reg(init = UInt(0, totalEntries))
|
2017-03-23 03:25:02 +01:00
|
|
|
val reg_entries = Reg(Vec(totalEntries, UInt(width = new Entry().getWidth)))
|
|
|
|
val entries = reg_entries.map(_.asTypeOf(new Entry))
|
2016-03-31 07:48:31 +02:00
|
|
|
|
2013-09-10 19:51:35 +02:00
|
|
|
val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(UInt(), 4)
|
2013-08-16 00:28:15 +02:00
|
|
|
val state = Reg(init=s_ready)
|
2016-07-02 23:26:05 +02:00
|
|
|
val r_refill_tag = Reg(UInt(width = asIdBits + vpnBits))
|
2017-03-13 04:42:51 +01:00
|
|
|
val r_refill_waddr = Reg(UInt(width = log2Ceil(normalEntries)))
|
2017-03-15 23:18:56 +01:00
|
|
|
val r_req = Reg(new TLBReq(lgMaxSize))
|
2016-08-03 00:11:48 +02:00
|
|
|
|
2017-03-15 23:21:13 +01:00
|
|
|
val priv = Mux(io.req.bits.instruction, io.ptw.status.prv, io.ptw.status.dprv)
|
2017-03-15 20:04:29 +01:00
|
|
|
val priv_s = priv(0)
|
2017-03-14 22:37:09 +01:00
|
|
|
val priv_uses_vm = priv <= PRV.S
|
2017-03-13 04:42:51 +01:00
|
|
|
val vm_enabled = Bool(usingVM) && io.ptw.ptbr.mode(io.ptw.ptbr.mode.getWidth-1) && priv_uses_vm && !io.req.bits.passthrough
|
2016-08-03 00:11:48 +02:00
|
|
|
|
|
|
|
// share a single physical memory attribute checker (unshare if critical path)
|
2017-03-13 04:42:51 +01:00
|
|
|
val (vpn, pgOffset) = Split(io.req.bits.vaddr, pgIdxBits)
|
2016-08-03 00:11:48 +02:00
|
|
|
val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0)
|
|
|
|
val do_refill = Bool(usingVM) && io.ptw.resp.valid
|
2017-03-14 21:54:49 +01:00
|
|
|
val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate)
|
2017-03-21 20:01:32 +01:00
|
|
|
val mpu_ppn = Mux(do_refill, refill_ppn,
|
2017-04-14 00:57:57 +02:00
|
|
|
Mux(vm_enabled, entries.last.ppn, vpn))
|
2017-03-21 20:01:32 +01:00
|
|
|
val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0))
|
2017-03-15 23:18:56 +01:00
|
|
|
val pmp = Module(new PMPChecker(lgMaxSize))
|
2017-03-15 09:18:39 +01:00
|
|
|
pmp.io.addr := mpu_physaddr
|
2017-03-15 23:18:56 +01:00
|
|
|
pmp.io.size := io.req.bits.size
|
2017-03-28 02:53:48 +02:00
|
|
|
pmp.io.pmp := (io.ptw.pmp: Seq[PMP])
|
2017-03-24 21:01:47 +01:00
|
|
|
pmp.io.prv := Mux(Bool(usingVM) && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S, priv)
|
2016-11-27 00:11:42 +01:00
|
|
|
val legal_address = edge.manager.findSafe(mpu_physaddr).reduce(_||_)
|
2016-11-21 20:48:10 +01:00
|
|
|
def fastCheck(member: TLManagerParameters => Boolean) =
|
2016-11-27 00:11:42 +01:00
|
|
|
legal_address && Mux1H(edge.manager.findFast(mpu_physaddr), edge.manager.managers.map(m => Bool(member(m))))
|
2017-04-13 06:49:37 +02:00
|
|
|
val cacheable = fastCheck(_.supportsAcquireB)
|
2017-03-16 02:00:32 +01:00
|
|
|
val prot_r = fastCheck(_.supportsGet) && pmp.io.r
|
|
|
|
val prot_w = fastCheck(_.supportsPutFull) && pmp.io.w
|
2017-04-13 06:49:37 +02:00
|
|
|
val prot_al = fastCheck(_.supportsLogical) || cacheable
|
|
|
|
val prot_aa = fastCheck(_.supportsArithmetic) || cacheable
|
2017-03-16 02:00:32 +01:00
|
|
|
val prot_x = fastCheck(_.executable) && pmp.io.x
|
2017-04-15 03:22:12 +02:00
|
|
|
val prot_eff = fastCheck(Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains _.regionType)
|
2017-03-27 03:18:35 +02:00
|
|
|
val isSpecial = !(io.ptw.resp.bits.homogeneous || io.ptw.resp.bits.ae)
|
2016-11-21 20:48:10 +01:00
|
|
|
|
2017-03-13 04:42:51 +01:00
|
|
|
val lookup_tag = Cat(io.ptw.ptbr.asid, vpn(vpnBits-1,0))
|
|
|
|
val hitsVec = (0 until totalEntries).map { i => vm_enabled && {
|
|
|
|
var tagMatch = valid(i)
|
|
|
|
for (j <- 0 until pgLevels) {
|
|
|
|
val base = vpnBits - (j + 1) * pgLevelBits
|
2017-03-23 03:25:02 +01:00
|
|
|
tagMatch = tagMatch && (entries(i).level < j || entries(i).tag(base + pgLevelBits - 1, base) === vpn(base + pgLevelBits - 1, base))
|
2017-03-13 04:42:51 +01:00
|
|
|
}
|
|
|
|
tagMatch
|
|
|
|
}} :+ !vm_enabled
|
2016-08-01 02:13:52 +02:00
|
|
|
val hits = hitsVec.asUInt
|
2017-03-23 03:25:02 +01:00
|
|
|
val level = Mux1H(hitsVec.init, entries.map(_.level))
|
|
|
|
val partialPPN = Mux1H(hitsVec.init, entries.map(_.ppn))
|
2017-03-13 04:42:51 +01:00
|
|
|
val ppn = {
|
|
|
|
var ppn = Mux(vm_enabled, partialPPN, vpn)(pgLevelBits*pgLevels - 1, pgLevelBits*(pgLevels - 1))
|
|
|
|
for (i <- 1 until pgLevels)
|
|
|
|
ppn = Cat(ppn, (Mux(level < i, vpn, 0.U) | partialPPN)(vpnBits - i*pgLevelBits - 1, vpnBits - (i + 1)*pgLevelBits))
|
|
|
|
ppn
|
|
|
|
}
|
2017-03-20 09:34:47 +01:00
|
|
|
|
2012-10-10 06:35:03 +02:00
|
|
|
// permission bit arrays
|
2017-03-14 21:54:49 +01:00
|
|
|
when (do_refill && !invalidate_refill) {
|
2017-03-13 04:42:51 +01:00
|
|
|
val waddr = Mux(isSpecial, specialEntry.U, r_refill_waddr)
|
2015-03-28 00:20:59 +01:00
|
|
|
val pte = io.ptw.resp.bits.pte
|
2017-03-23 03:25:02 +01:00
|
|
|
val newEntry = Wire(new Entry)
|
|
|
|
newEntry.ppn := pte.ppn
|
|
|
|
newEntry.tag := r_refill_tag
|
|
|
|
newEntry.level := io.ptw.resp.bits.level
|
2017-03-27 03:18:35 +02:00
|
|
|
newEntry.c := cacheable
|
2017-03-23 03:25:02 +01:00
|
|
|
newEntry.u := pte.u
|
|
|
|
newEntry.g := pte.g
|
2017-03-29 18:49:31 +02:00
|
|
|
// if an access exception occurs during PTW, pretend the page has full
|
|
|
|
// permissions so that a page fault will not occur, but clear the
|
|
|
|
// phyiscal memory permissions, so that an access exception will occur.
|
|
|
|
newEntry.ae := io.ptw.resp.bits.ae
|
|
|
|
newEntry.sr := pte.sr() || io.ptw.resp.bits.ae
|
|
|
|
newEntry.sw := pte.sw() || io.ptw.resp.bits.ae
|
|
|
|
newEntry.sx := pte.sx() || io.ptw.resp.bits.ae
|
2017-03-27 03:18:35 +02:00
|
|
|
newEntry.pr := prot_r && !io.ptw.resp.bits.ae
|
|
|
|
newEntry.pw := prot_w && !io.ptw.resp.bits.ae
|
|
|
|
newEntry.px := prot_x && !io.ptw.resp.bits.ae
|
2017-04-13 06:49:37 +02:00
|
|
|
newEntry.pal := prot_al
|
|
|
|
newEntry.paa := prot_aa
|
2017-04-15 03:22:12 +02:00
|
|
|
newEntry.eff := prot_eff
|
2017-03-23 03:25:02 +01:00
|
|
|
|
|
|
|
valid := valid | UIntToOH(waddr)
|
|
|
|
reg_entries(waddr) := newEntry.asUInt
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
2017-03-20 09:34:47 +01:00
|
|
|
|
2017-03-13 04:42:51 +01:00
|
|
|
val plru = new PseudoLRU(normalEntries)
|
2017-03-20 09:34:47 +01:00
|
|
|
val repl_waddr = Mux(!valid(normalEntries-1, 0).andR, PriorityEncoder(~valid(normalEntries-1, 0)), plru.replace)
|
2016-03-03 08:29:58 +01:00
|
|
|
|
2017-03-29 18:49:31 +02:00
|
|
|
val priv_ok = entries.map(_.ae).asUInt | Mux(priv_s, ~Mux(io.ptw.status.sum, UInt(0), entries.map(_.u).asUInt), entries.map(_.u).asUInt)
|
2017-03-27 03:18:35 +02:00
|
|
|
val r_array = Cat(true.B, priv_ok & (entries.map(_.sr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.sx).asUInt, UInt(0))))
|
|
|
|
val w_array = Cat(true.B, priv_ok & entries.map(_.sw).asUInt)
|
|
|
|
val x_array = Cat(true.B, priv_ok & entries.map(_.sx).asUInt)
|
|
|
|
val pr_array = Cat(Fill(2, prot_r), entries.init.map(_.pr).asUInt)
|
|
|
|
val pw_array = Cat(Fill(2, prot_w), entries.init.map(_.pw).asUInt)
|
|
|
|
val px_array = Cat(Fill(2, prot_x), entries.init.map(_.px).asUInt)
|
2017-04-13 06:49:37 +02:00
|
|
|
val paa_array = Cat(Fill(2, prot_aa), entries.init.map(_.paa).asUInt)
|
|
|
|
val pal_array = Cat(Fill(2, prot_al), entries.init.map(_.pal).asUInt)
|
2017-04-15 03:22:12 +02:00
|
|
|
val eff_array = Cat(Fill(2, prot_eff), entries.init.map(_.eff).asUInt)
|
2017-03-27 03:18:35 +02:00
|
|
|
val c_array = Cat(Fill(2, cacheable), entries.init.map(_.c).asUInt)
|
2017-04-13 06:49:37 +02:00
|
|
|
val ae_st_array = ~pw_array | Mux(isAMOLogical(io.req.bits.cmd), ~pal_array, 0.U) | Mux(isAMOArithmetic(io.req.bits.cmd), ~paa_array, 0.U)
|
2015-03-14 10:49:07 +01:00
|
|
|
|
2017-04-13 06:49:37 +02:00
|
|
|
val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1)).orR
|
2017-04-15 03:22:12 +02:00
|
|
|
val ae = misaligned || Bool(usingAtomics) && !io.resp.cacheable && io.req.bits.cmd.isOneOf(M_XLR, M_XSC)
|
|
|
|
val bad_va = vm_enabled &&
|
|
|
|
(if (vpnBits == vpnBitsExtended) Bool(false)
|
|
|
|
else vpn(vpnBits) =/= vpn(vpnBits-1))
|
2017-03-13 04:42:51 +01:00
|
|
|
val tlb_hit = hits(totalEntries-1, 0).orR
|
2017-03-14 21:54:49 +01:00
|
|
|
val tlb_miss = vm_enabled && !bad_va && !tlb_hit && !io.req.bits.sfence.valid
|
2015-09-22 18:42:27 +02:00
|
|
|
|
2017-03-13 04:42:51 +01:00
|
|
|
when (io.req.valid && !tlb_miss && !hits(specialEntry)) {
|
|
|
|
plru.access(OHToUInt(hits(normalEntries-1, 0)))
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
|
|
|
|
2017-03-10 23:00:36 +01:00
|
|
|
// Superpages create the possibility that two entries in the TLB may match.
|
|
|
|
// This corresponds to a software bug, but we can't return complete garbage;
|
|
|
|
// we must return either the old translation or the new translation. This
|
|
|
|
// isn't compatible with the Mux1H approach. So, flush the TLB and report
|
|
|
|
// a miss on duplicate entries.
|
2017-03-13 04:42:51 +01:00
|
|
|
val multipleHits = PopCountAtLeast(hits(totalEntries-1, 0), 2)
|
2017-03-10 23:00:36 +01:00
|
|
|
|
2012-10-10 06:35:03 +02:00
|
|
|
io.req.ready := state === s_ready
|
2017-04-13 06:49:37 +02:00
|
|
|
io.resp.pf.ld := (bad_va || (~r_array & hits).orR) && isRead(io.req.bits.cmd)
|
|
|
|
io.resp.pf.st := (bad_va || (~w_array & hits).orR) && isWrite(io.req.bits.cmd)
|
2017-03-27 03:18:35 +02:00
|
|
|
io.resp.pf.inst := bad_va || (~x_array & hits).orR
|
2017-04-15 03:22:12 +02:00
|
|
|
io.resp.ae.ld := ((~pr_array & hits).orR || ae) && isRead(io.req.bits.cmd)
|
|
|
|
io.resp.ae.st := ((ae_st_array & hits).orR || ae) && isWrite(io.req.bits.cmd)
|
2017-03-27 03:18:35 +02:00
|
|
|
io.resp.ae.inst := (~px_array & hits).orR
|
2017-04-15 03:22:12 +02:00
|
|
|
io.resp.ma.ld := (~eff_array & hits).orR && misaligned && isRead(io.req.bits.cmd)
|
|
|
|
io.resp.ma.st := (~eff_array & hits).orR && misaligned && isWrite(io.req.bits.cmd)
|
2017-04-13 06:49:37 +02:00
|
|
|
io.resp.ma.inst := false // this is up to the pipeline to figure out
|
2016-08-03 00:11:48 +02:00
|
|
|
io.resp.cacheable := (c_array & hits).orR
|
2017-03-10 23:00:36 +01:00
|
|
|
io.resp.miss := do_refill || tlb_miss || multipleHits
|
2017-03-13 04:42:51 +01:00
|
|
|
io.resp.paddr := Cat(ppn, pgOffset)
|
2015-03-14 10:49:07 +01:00
|
|
|
|
2012-10-10 06:35:03 +02:00
|
|
|
io.ptw.req.valid := state === s_request
|
2016-10-05 07:28:56 +02:00
|
|
|
io.ptw.req.bits <> io.ptw.status
|
2015-03-14 10:49:07 +01:00
|
|
|
io.ptw.req.bits.addr := r_refill_tag
|
2012-10-10 06:35:03 +02:00
|
|
|
|
2016-03-25 22:17:25 +01:00
|
|
|
if (usingVM) {
|
2017-03-14 21:54:49 +01:00
|
|
|
val sfence = io.req.valid && io.req.bits.sfence.valid
|
2016-03-25 22:17:25 +01:00
|
|
|
when (io.req.fire() && tlb_miss) {
|
|
|
|
state := s_request
|
|
|
|
r_refill_tag := lookup_tag
|
|
|
|
r_refill_waddr := repl_waddr
|
|
|
|
r_req := io.req.bits
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
2016-03-25 22:17:25 +01:00
|
|
|
when (state === s_request) {
|
2017-03-14 21:54:49 +01:00
|
|
|
when (sfence) { state := s_ready }
|
|
|
|
when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) }
|
2016-03-25 22:17:25 +01:00
|
|
|
}
|
2017-03-14 21:54:49 +01:00
|
|
|
when (state === s_wait && sfence) {
|
2016-03-25 22:17:25 +01:00
|
|
|
state := s_wait_invalidate
|
|
|
|
}
|
|
|
|
when (io.ptw.resp.valid) {
|
|
|
|
state := s_ready
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
2016-07-02 23:26:05 +02:00
|
|
|
|
2017-03-15 20:50:05 +01:00
|
|
|
when (sfence) {
|
|
|
|
valid := Mux(io.req.bits.sfence.bits.rs1, valid & ~hits(totalEntries-1, 0),
|
2017-03-27 03:18:35 +02:00
|
|
|
Mux(io.req.bits.sfence.bits.rs2, valid & entries.map(_.g).asUInt, 0))
|
2017-03-14 21:54:49 +01:00
|
|
|
}
|
2017-03-15 20:50:05 +01:00
|
|
|
when (multipleHits) {
|
2016-07-02 23:26:05 +02:00
|
|
|
valid := 0
|
|
|
|
}
|
2012-10-10 06:35:03 +02:00
|
|
|
}
|
|
|
|
}
|