1
0

generalize page table walker

also, don't instantiate vitlb when !HAVE_VEC
This commit is contained in:
Andrew Waterman
2012-05-03 02:29:09 -07:00
parent 2d4e5d3813
commit e1f9dc2c1f
3 changed files with 34 additions and 53 deletions

View File

@ -71,18 +71,16 @@ class rocketHellaCacheArbiter(n: Int) extends Component
}
}
class ioPTW extends Bundle
class ioPTW(n: Int) extends Bundle
{
val itlb = (new ioTLB_PTW).flip
val dtlb = (new ioTLB_PTW).flip
val vitlb = (new ioTLB_PTW).flip
val requestor = Vec(n) { new ioTLB_PTW }.flip
val mem = new ioHellaCache
val ptbr = UFix(PADDR_BITS, INPUT)
}
class rocketPTW extends Component
class rocketPTW(n: Int) extends Component
{
val io = new ioPTW
val io = new ioPTW(n)
val levels = 3
val bitsPerLevel = VPN_BITS/levels
@ -101,29 +99,20 @@ class rocketPTW extends Component
val vpn_idxs = (1 until levels).map(i => r_req_vpn((levels-i)*bitsPerLevel-1, (levels-i-1)*bitsPerLevel))
val vpn_idx = (2 until levels).foldRight(vpn_idxs(0))((i,j) => Mux(count === UFix(i-1), vpn_idxs(i-1), j))
val req_val = io.itlb.req_val || io.dtlb.req_val || io.vitlb.req_val
// give ITLB requests priority over DTLB requests
val req_itlb_val = io.itlb.req_val;
val req_dtlb_val = io.dtlb.req_val && !io.itlb.req_val;
val req_vitlb_val = io.vitlb.req_val && !io.itlb.req_val && !io.dtlb.req_val
when ((state === s_ready) && req_itlb_val) {
r_req_vpn := io.itlb.req_vpn;
r_req_dest := Bits(0)
req_addr := Cat(io.ptbr(PADDR_BITS-1,PGIDX_BITS), io.itlb.req_vpn(VPN_BITS-1,VPN_BITS-bitsPerLevel), Bits(0,3))
val req_rdy = state === s_ready
var req_val = Bool(false)
for (r <- io.requestor) {
r.req_rdy := req_rdy && !req_val
req_val = req_val || r.req_val
}
val req_dest = PriorityEncoder(io.requestor.map(_.req_val))
val req_vpn = io.requestor.slice(0, n-1).foldRight(io.requestor(n-1).req_vpn)((r, v) => Mux(r.req_val, r.req_vpn, v))
when ((state === s_ready) && req_dtlb_val) {
r_req_vpn := io.dtlb.req_vpn;
r_req_dest := Bits(1)
req_addr := Cat(io.ptbr(PADDR_BITS-1,PGIDX_BITS), io.dtlb.req_vpn(VPN_BITS-1,VPN_BITS-bitsPerLevel), Bits(0,3))
}
when ((state === s_ready) && req_vitlb_val) {
r_req_vpn := io.vitlb.req_vpn;
r_req_dest := Bits(2)
req_addr := Cat(io.ptbr(PADDR_BITS-1,PGIDX_BITS), io.vitlb.req_vpn(VPN_BITS-1,VPN_BITS-bitsPerLevel), Bits(0,3))
when (state === s_ready && req_val) {
r_req_vpn := req_vpn
r_req_dest := req_dest
req_addr := Cat(io.ptbr(PADDR_BITS-1,PGIDX_BITS), req_vpn(VPN_BITS-1,VPN_BITS-bitsPerLevel), Bits(0,3))
}
val dmem_resp_val = Reg(io.mem.resp.valid, resetVal = Bool(false))
@ -145,26 +134,17 @@ class rocketPTW extends Component
val resp_ptd = io.mem.resp.bits.data_subword(1,0) === Bits(1)
val resp_pte = io.mem.resp.bits.data_subword(1,0) === Bits(2)
io.itlb.req_rdy := (state === s_ready)
io.dtlb.req_rdy := (state === s_ready) && !io.itlb.req_val
io.vitlb.req_rdy := (state === s_ready) && !io.itlb.req_val && !io.dtlb.req_val
io.itlb.resp_val := r_req_dest === Bits(0) && resp_val
io.dtlb.resp_val := r_req_dest === Bits(1) && resp_val
io.vitlb.resp_val := r_req_dest === Bits(2) && resp_val
io.itlb.resp_err := r_req_dest === Bits(0) && resp_err
io.dtlb.resp_err := r_req_dest === Bits(1) && resp_err
io.vitlb.resp_err := r_req_dest === Bits(2) && resp_err
io.itlb.resp_perm := r_resp_perm
io.dtlb.resp_perm := r_resp_perm
io.vitlb.resp_perm:= r_resp_perm
val resp_ppns = (0 until levels-1).map(i => Cat(r_resp_ppn(PPN_BITS-1, VPN_BITS-bitsPerLevel*(i+1)), r_req_vpn(VPN_BITS-1-bitsPerLevel*(i+1), 0)))
val resp_ppn = (0 until levels-1).foldRight(r_resp_ppn)((i,j) => Mux(count === UFix(i), resp_ppns(i), j))
io.itlb.resp_ppn := resp_ppn;
io.dtlb.resp_ppn := resp_ppn;
io.vitlb.resp_ppn := resp_ppn;
for (i <- 0 until io.requestor.size) {
val me = r_req_dest === UFix(i)
io.requestor(i).resp_val := resp_val && me
io.requestor(i).resp_err := resp_err && me
io.requestor(i).resp_perm := r_resp_perm
io.requestor(i).resp_ppn := resp_ppn
}
// control state machine
switch (state) {