From 7c9a1b026554916dc42744524300312a8fd2dd3c Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 2 Jan 2018 18:41:25 -0800 Subject: [PATCH 1/2] Correctly check for virtual-address canonicalization The previous check was necessary but not sufficient. --- src/main/scala/rocket/RocketCore.scala | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/main/scala/rocket/RocketCore.scala b/src/main/scala/rocket/RocketCore.scala index 2fa64461..efc86b9c 100644 --- a/src/main/scala/rocket/RocketCore.scala +++ b/src/main/scala/rocket/RocketCore.scala @@ -744,11 +744,8 @@ class Rocket(implicit p: Parameters) extends CoreModule()(p) def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) ea else { // efficient means to compress 64-bit VA into vaddrBits+1 bits // (VA is bad if VA(vaddrBits) != VA(vaddrBits-1)) - val a = a0 >> vaddrBits-1 - val e = ea(vaddrBits,vaddrBits-1).asSInt - val msb = - Mux(a === UInt(0) || a === UInt(1), e =/= SInt(0), - Mux(a.asSInt === SInt(-1) || a.asSInt === SInt(-2), e === SInt(-1), e(0))) + val a = a0.asSInt >> vaddrBits + val msb = Mux(a === 0.S || a === -1.S, ea(vaddrBits), !ea(vaddrBits-1)) Cat(msb, ea(vaddrBits-1,0)) } From ee1a9485dfe1ec04cb0f25953d7f94a97ec38038 Mon Sep 17 00:00:00 2001 From: Andrew Waterman Date: Tue, 2 Jan 2018 18:28:59 -0800 Subject: [PATCH 2/2] Enforce physical-address canonicalization When xLen > paddrBits, enforce that physical addresses are zero-extended. This works by checking that the _virtual_ address is _sign_-extended, then checking that its sign is positive. --- src/main/scala/rocket/TLB.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/scala/rocket/TLB.scala b/src/main/scala/rocket/TLB.scala index ed2cc23f..9ecae1e0 100644 --- a/src/main/scala/rocket/TLB.scala +++ b/src/main/scala/rocket/TLB.scala @@ -98,12 +98,11 @@ class TLB(instruction: Boolean, lgMaxSize: Int, nEntries: Int)(implicit edge: TL val vm_enabled = Bool(usingVM) && io.ptw.ptbr.mode(io.ptw.ptbr.mode.getWidth-1) && priv_uses_vm && !io.req.bits.passthrough // share a single physical memory attribute checker (unshare if critical path) - val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits) val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0) val do_refill = Bool(usingVM) && io.ptw.resp.valid val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) val mpu_ppn = Mux(do_refill, refill_ppn, - Mux(vm_enabled, entries.last.ppn, vpn)) + Mux(vm_enabled, entries.last.ppn, io.req.bits.vaddr >> pgIdxBits)) val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) val pmp = Module(new PMPChecker(lgMaxSize)) pmp.io.addr := mpu_physaddr @@ -122,6 +121,7 @@ class TLB(instruction: Boolean, lgMaxSize: Int, nEntries: Int)(implicit edge: TL val prot_x = fastCheck(_.executable) && pmp.io.x val prot_eff = fastCheck(Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains _.regionType) + val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits) val lookup_tag = Cat(io.ptw.ptbr.asid, vpn) val hitsVec = (0 until totalEntries).map { i => if (!usingVM) false.B else vm_enabled && { var tagMatch = valid(i)