xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision e836c7705c53f8360816d56db7f6d37725aad2a6)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
24import utils._
25import utility._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l3Hit = if (EnableSv48) Some(new Bool()) else None
45    val l2Hit = Bool()
46    val ppn = UInt(ptePPNLen.W)
47    val stage1Hit = Bool()
48    val stage1 = new PtwMergeResp
49  }))
50  val resp = DecoupledIO(new Bundle {
51    val source = UInt(bSourceWidth.W)
52    val s2xlate = UInt(2.W)
53    val resp = new PtwMergeResp
54    val h_resp = new HptwResp
55  })
56
57  val llptw = DecoupledIO(new LLPTWInBundle())
58  // NOTE: llptw change from "connect to llptw" to "connect to page cache"
59  // to avoid corner case that caused duplicate entries
60
61  val hptw = new Bundle {
62    val req = DecoupledIO(new Bundle {
63      val source = UInt(bSourceWidth.W)
64      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
65      val gvpn = UInt(ptePPNLen.W)
66    })
67    val resp = Flipped(Valid(new Bundle {
68      val h_resp = Output(new HptwResp)
69    }))
70  }
71  val mem = new Bundle {
72    val req = DecoupledIO(new L2TlbMemReqBundle())
73    val resp = Flipped(ValidIO(UInt(XLEN.W)))
74    val mask = Input(Bool())
75  }
76  val pmp = new Bundle {
77    val req = ValidIO(new PMPReqBundle())
78    val resp = Flipped(new PMPRespBundle())
79  }
80
81  val refill = Output(new Bundle {
82    val req_info = new L2TlbInnerBundle()
83    val level = UInt(log2Up(Level + 1).W)
84  })
85}
86
87class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
88  val io = IO(new PTWIO)
89  val sfence = io.sfence
90  val mem = io.mem
91  val req_s2xlate = Reg(UInt(2.W))
92  val enableS2xlate = req_s2xlate =/= noS2xlate
93  val onlyS1xlate = req_s2xlate === onlyStage1
94  val onlyS2xlate = req_s2xlate === onlyStage2
95  val satp = Wire(new TlbSatpBundle())
96  when (io.req.fire) {
97    satp := Mux(io.req.bits.req_info.s2xlate =/= noS2xlate, io.csr.vsatp, io.csr.satp)
98  } .otherwise {
99    satp := Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
100  }
101  val s1Pbmte = Mux(req_s2xlate =/= noS2xlate, io.csr.hPBMTE, io.csr.mPBMTE)
102
103  val mode = satp.mode
104  val hgatp = io.csr.hgatp
105  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
106  val s2xlate = enableS2xlate && !onlyS1xlate
107  val level = RegInit(3.U(log2Up(Level + 1).W))
108  val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level
109  val gpf_level = RegInit(3.U(log2Up(Level + 1).W))
110  val ppn = Reg(UInt(ptePPNLen.W))
111  val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate)
112  val levelNext = level - 1.U
113  val l3Hit = Reg(Bool())
114  val l2Hit = Reg(Bool())
115  val pte = mem.resp.bits.asTypeOf(new PteBundle())
116
117  // s/w register
118  val s_pmp_check = RegInit(true.B)
119  val s_mem_req = RegInit(true.B)
120  val s_llptw_req = RegInit(true.B)
121  val w_mem_resp = RegInit(true.B)
122  val s_hptw_req = RegInit(true.B)
123  val w_hptw_resp = RegInit(true.B)
124  val s_last_hptw_req = RegInit(true.B)
125  val w_last_hptw_resp = RegInit(true.B)
126  // for updating "level"
127  val mem_addr_update = RegInit(false.B)
128
129  val idle = RegInit(true.B)
130  val finish = WireInit(false.B)
131  val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish
132
133  val pageFault = pte.isPf(level, s1Pbmte)
134  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, false.B, sent_to_pmp)
135
136  val hptw_pageFault = RegInit(false.B)
137  val hptw_accessFault = RegInit(false.B)
138  val need_last_s2xlate = RegInit(false.B)
139  val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire)
140  val stage1 = RegEnable(io.req.bits.stage1, io.req.fire)
141  val hptw_resp_stage2 = Reg(Bool())
142
143  val ppn_af = Mux(enableS2xlate, Mux(onlyS1xlate, pte.isAf(), false.B), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high
144  val find_pte = pte.isLeaf() || ppn_af || pageFault
145  val to_find_pte = level === 1.U && find_pte === false.B
146  val source = RegEnable(io.req.bits.req_info.source, io.req.fire)
147
148  val l3addr = Wire(UInt(PAddrBits.W))
149  val l2addr = Wire(UInt(PAddrBits.W))
150  val l1addr = Wire(UInt(PAddrBits.W))
151  val mem_addr = Wire(UInt(PAddrBits.W))
152
153  l3addr := MakeAddr(satp.ppn, getVpnn(vpn, 3))
154  if (EnableSv48) {
155    when (mode === Sv48) {
156      l2addr := MakeAddr(Mux(l3Hit, ppn, pte.getPPN()), getVpnn(vpn, 2))
157    } .otherwise {
158      l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2))
159    }
160  } else {
161    l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2))
162  }
163  l1addr := MakeAddr(Mux(l2Hit, ppn, pte.getPPN()), getVpnn(vpn, 1))
164  mem_addr := Mux(af_level === 3.U, l3addr, Mux(af_level === 2.U, l2addr, l1addr))
165
166  val hptw_resp = Reg(new HptwResp)
167
168  val update_full_gvpn_mem_resp = RegInit(false.B)
169  val full_gvpn_reg = Reg(UInt(ptePPNLen.W))
170  val full_gvpn_wire = pte.getPPN()
171  val full_gvpn = Mux(update_full_gvpn_mem_resp, full_gvpn_wire, full_gvpn_reg)
172
173  val gpaddr = MuxCase(mem_addr, Seq(
174    (stage1Hit || onlyS2xlate) -> Cat(full_gvpn, 0.U(offLen.W)),
175    !s_last_hptw_req -> Cat(MuxLookup(level, pte.getPPN())(Seq(
176      3.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 3), vpn(vpnnLen * 3 - 1, 0)),
177      2.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)),
178      1.U -> Cat(pte.getPPN()(ptePPNLen - 1, vpnnLen), vpn(vpnnLen - 1, 0)
179    ))),
180    0.U(offLen.W))
181  ))
182  val gvpn_gpf =
183    !(hptw_pageFault || hptw_accessFault ) &&
184    Mux(
185      s2xlate && io.csr.hgatp.mode === Sv39x4,
186      full_gvpn(ptePPNLen - 1, GPAddrBitsSv39x4 - offLen) =/= 0.U,
187      Mux(
188        s2xlate && io.csr.hgatp.mode === Sv48x4,
189        full_gvpn(ptePPNLen - 1, GPAddrBitsSv48x4 - offLen) =/= 0.U,
190        false.B
191      )
192    )
193
194  val guestFault = hptw_pageFault || hptw_accessFault || gvpn_gpf
195  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
196  val fake_h_resp = WireInit(0.U.asTypeOf(new HptwResp))
197  fake_h_resp.entry.tag := get_pn(gpaddr)
198  fake_h_resp.entry.vmid.map(_ := io.csr.hgatp.vmid)
199  fake_h_resp.gpf := true.B
200
201  val pte_valid = RegInit(false.B)  // avoid l1tlb pf from stage1 when gpf happens in the first s2xlate in PTW
202  val fake_pte = WireInit(0.U.asTypeOf(new PteBundle()))
203  fake_pte.perm.v := false.B // tell L1TLB this is fake pte
204  fake_pte.ppn := ppn(ppnLen - 1, 0)
205  fake_pte.ppn_high := ppn(ptePPNLen - 1, ppnLen)
206
207  io.req.ready := idle
208  val ptw_resp = Wire(new PtwMergeResp)
209  ptw_resp.apply(Mux(pte_valid, pageFault && !accessFault, false.B), accessFault || (ppn_af && !(pte_valid && (pageFault || guestFault))), Mux(accessFault, af_level, Mux(guestFault, gpf_level, level)), Mux(pte_valid, pte, fake_pte), vpn, satp.asid, hgatp.vmid, vpn(sectortlbwidth - 1, 0), not_super = false, not_merge = false)
210
211  val normal_resp = idle === false.B && mem_addr_update && !need_last_s2xlate && (guestFault || (w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate )
212  val stageHit_resp = idle === false.B && hptw_resp_stage2
213  io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp)
214  io.resp.bits.source := source
215  io.resp.bits.resp := Mux(stage1Hit || (l3Hit || l2Hit) && guestFault && !pte_valid, stage1, ptw_resp)
216  io.resp.bits.h_resp := Mux(gvpn_gpf, fake_h_resp, hptw_resp)
217  io.resp.bits.s2xlate := req_s2xlate
218
219  io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault && !guestFault
220  io.llptw.bits.req_info.source := source
221  io.llptw.bits.req_info.vpn := vpn
222  io.llptw.bits.req_info.s2xlate := req_s2xlate
223  io.llptw.bits.ppn := DontCare
224
225  io.pmp.req.valid := DontCare // samecycle, do not use valid
226  io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
227  io.pmp.req.bits.size := 3.U // TODO: fix it
228  io.pmp.req.bits.cmd := TlbCmd.read
229
230  mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check
231  mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
232  mem.req.bits.id := FsmReqID.U(bMemID.W)
233  mem.req.bits.hptw_bypassed := false.B
234
235  io.refill.req_info.s2xlate := req_s2xlate
236  io.refill.req_info.vpn := vpn
237  io.refill.level := level
238  io.refill.req_info.source := source
239
240  io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req
241  io.hptw.req.bits.id := FsmReqID.U(bMemID.W)
242  io.hptw.req.bits.gvpn := get_pn(gpaddr)
243  io.hptw.req.bits.source := source
244
245  when (io.req.fire && io.req.bits.stage1Hit){
246    idle := false.B
247    req_s2xlate := io.req.bits.req_info.s2xlate
248    s_last_hptw_req := false.B
249    hptw_resp_stage2 := false.B
250    need_last_s2xlate := false.B
251    hptw_pageFault := false.B
252    hptw_accessFault := false.B
253    full_gvpn_reg := io.req.bits.stage1.genPPN()
254  }
255
256  when (io.resp.fire && stage1Hit){
257    idle := true.B
258  }
259
260  when (io.req.fire && !io.req.bits.stage1Hit){
261    val req = io.req.bits
262    val gvpn_wire = Wire(UInt(ptePPNLen.W))
263    if (EnableSv48) {
264      when (mode === Sv48) {
265        level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U))
266        af_level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U))
267        gpf_level := Mux(req.l2Hit, 2.U, Mux(req.l3Hit.get, 3.U, 0.U))
268        ppn := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn)
269        l3Hit := req.l3Hit.get
270        gvpn_wire := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn)
271      } .otherwise {
272        level := Mux(req.l2Hit, 1.U, 2.U)
273        af_level := Mux(req.l2Hit, 1.U, 2.U)
274        gpf_level := 0.U
275        ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
276        l3Hit := false.B
277        gvpn_wire := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
278      }
279    } else {
280      level := Mux(req.l2Hit, 1.U, 2.U)
281      af_level := Mux(req.l2Hit, 1.U, 2.U)
282      gpf_level := 0.U
283      ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
284      l3Hit := false.B
285      gvpn_wire := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
286    }
287    vpn := io.req.bits.req_info.vpn
288    l2Hit := req.l2Hit
289    accessFault := false.B
290    idle := false.B
291    hptw_pageFault := false.B
292    hptw_accessFault := false.B
293    pte_valid := false.B
294    req_s2xlate := io.req.bits.req_info.s2xlate
295    when(io.req.bits.req_info.s2xlate === onlyStage2){
296      full_gvpn_reg := io.req.bits.req_info.vpn
297      val onlys2_gpaddr = Cat(io.req.bits.req_info.vpn, 0.U(offLen.W)) // is 50 bits, don't need to check high bits when sv48x4 is enabled
298      val check_gpa_high_fail = Mux(io.req.bits.req_info.s2xlate === onlyStage2 && io.csr.hgatp.mode === Sv39x4, onlys2_gpaddr(onlys2_gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, false.B)
299      need_last_s2xlate := false.B
300      when(check_gpa_high_fail){
301        mem_addr_update := true.B
302      }.otherwise{
303        s_last_hptw_req := false.B
304      }
305    }.elsewhen(io.req.bits.req_info.s2xlate === allStage){
306      full_gvpn_reg := 0.U
307      val allstage_gpaddr = Cat(gvpn_wire, 0.U(offLen.W))
308      val check_gpa_high_fail = Mux(io.csr.hgatp.mode === Sv39x4, allstage_gpaddr(allstage_gpaddr.getWidth - 1, GPAddrBitsSv39x4) =/= 0.U, Mux(io.csr.hgatp.mode === Sv48x4, allstage_gpaddr(allstage_gpaddr.getWidth - 1, GPAddrBitsSv48x4) =/= 0.U, false.B))
309      when(check_gpa_high_fail){
310        mem_addr_update := true.B
311      }.otherwise{
312        need_last_s2xlate := true.B
313        s_hptw_req := false.B
314      }
315    }.otherwise {
316      full_gvpn_reg := 0.U
317      need_last_s2xlate := false.B
318      s_pmp_check := false.B
319    }
320  }
321
322  when(io.hptw.req.fire && s_hptw_req === false.B){
323    s_hptw_req := true.B
324    w_hptw_resp := false.B
325  }
326
327  when(io.hptw.resp.fire && w_hptw_resp === false.B) {
328    w_hptw_resp := true.B
329    val g_perm_fail = !io.hptw.resp.bits.h_resp.gaf && (!io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x))
330    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf || g_perm_fail
331    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
332    hptw_resp := io.hptw.resp.bits.h_resp
333    hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || g_perm_fail
334    when(!(g_perm_fail || io.hptw.resp.bits.h_resp.gpf || io.hptw.resp.bits.h_resp.gaf)) {
335      s_pmp_check := false.B
336    }.otherwise {
337      mem_addr_update := true.B
338      need_last_s2xlate := false.B
339    }
340  }
341
342  when(io.hptw.req.fire && s_last_hptw_req === false.B) {
343    w_last_hptw_resp := false.B
344    s_last_hptw_req := true.B
345  }
346
347  when (io.hptw.resp.fire && w_last_hptw_resp === false.B && stage1Hit){
348    w_last_hptw_resp := true.B
349    hptw_resp_stage2 := true.B
350    hptw_resp := io.hptw.resp.bits.h_resp
351  }
352
353  when(io.hptw.resp.fire && w_last_hptw_resp === false.B && !stage1Hit){
354    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
355    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
356    hptw_resp := io.hptw.resp.bits.h_resp
357    w_last_hptw_resp := true.B
358    mem_addr_update := true.B
359  }
360
361  when(sent_to_pmp && mem_addr_update === false.B){
362    s_mem_req := false.B
363    s_pmp_check := true.B
364  }
365
366  when(accessFault && idle === false.B){
367    s_pmp_check := true.B
368    s_mem_req := true.B
369    w_mem_resp := true.B
370    s_llptw_req := true.B
371    s_hptw_req := true.B
372    w_hptw_resp := true.B
373    s_last_hptw_req := true.B
374    w_last_hptw_resp := true.B
375    mem_addr_update := true.B
376    need_last_s2xlate := false.B
377  }
378
379  when(guestFault && idle === false.B){
380    s_pmp_check := true.B
381    s_mem_req := true.B
382    w_mem_resp := true.B
383    s_llptw_req := true.B
384    s_hptw_req := true.B
385    w_hptw_resp := true.B
386    s_last_hptw_req := true.B
387    w_last_hptw_resp := true.B
388    mem_addr_update := true.B
389    need_last_s2xlate := false.B
390  }
391
392  when (mem.req.fire){
393    s_mem_req := true.B
394    w_mem_resp := false.B
395  }
396
397  when(mem.resp.fire && w_mem_resp === false.B){
398    w_mem_resp := true.B
399    af_level := af_level - 1.U
400    s_llptw_req := false.B
401    mem_addr_update := true.B
402    gpf_level := Mux(mode === Sv39 && !pte_valid && !(l3Hit || l2Hit), gpf_level - 2.U, gpf_level - 1.U)
403    pte_valid := true.B
404    update_full_gvpn_mem_resp := true.B
405  }
406
407  when(update_full_gvpn_mem_resp) {
408    update_full_gvpn_mem_resp := false.B
409    full_gvpn_reg := pte.getPPN()
410  }
411
412  when(mem_addr_update){
413    when(level >= 2.U && !onlyS2xlate && !(guestFault || find_pte || accessFault)) {
414      level := levelNext
415      when(s2xlate){
416        s_hptw_req := false.B
417      }.otherwise{
418        s_mem_req := false.B
419      }
420      s_llptw_req := true.B
421      mem_addr_update := false.B
422    }.elsewhen(io.llptw.valid){
423      when(io.llptw.fire) {
424        idle := true.B
425        s_llptw_req := true.B
426        mem_addr_update := false.B
427        need_last_s2xlate := false.B
428      }
429      finish := true.B
430    }.elsewhen(s2xlate && need_last_s2xlate === true.B) {
431      need_last_s2xlate := false.B
432      when(!(guestFault || accessFault || pageFault || ppn_af)){
433        s_last_hptw_req := false.B
434        mem_addr_update := false.B
435      }
436    }.elsewhen(io.resp.valid){
437      when(io.resp.fire) {
438        idle := true.B
439        s_llptw_req := true.B
440        mem_addr_update := false.B
441        accessFault := false.B
442      }
443      finish := true.B
444    }
445  }
446
447
448  when (flush) {
449    idle := true.B
450    s_pmp_check := true.B
451    s_mem_req := true.B
452    s_llptw_req := true.B
453    w_mem_resp := true.B
454    accessFault := false.B
455    mem_addr_update := false.B
456    s_hptw_req := true.B
457    w_hptw_resp := true.B
458    s_last_hptw_req := true.B
459    w_last_hptw_resp := true.B
460  }
461
462
463  XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n")
464
465  // perf
466  XSPerfAccumulate("fsm_count", io.req.fire)
467  for (i <- 0 until PtwWidth) {
468    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U)
469  }
470  XSPerfAccumulate("fsm_busy", !idle)
471  XSPerfAccumulate("fsm_idle", idle)
472  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
473  XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af)
474  XSPerfAccumulate("mem_count", mem.req.fire)
475  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true))
476  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
477
478  val perfEvents = Seq(
479    ("fsm_count         ", io.req.fire                                     ),
480    ("fsm_busy          ", !idle                                           ),
481    ("fsm_idle          ", idle                                            ),
482    ("resp_blocked      ", io.resp.valid && !io.resp.ready                 ),
483    ("mem_count         ", mem.req.fire                                    ),
484    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)),
485    ("mem_blocked       ", mem.req.valid && !mem.req.ready                 ),
486  )
487  generatePerfEvent()
488}
489
490/*========================= LLPTW ==============================*/
491
492/** LLPTW : Last Level Page Table Walker
493  * the page walker that only takes 4KB(last level) page walk.
494  **/
495
496class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
497  val req_info = Output(new L2TlbInnerBundle())
498  val ppn = Output(UInt(ptePPNLen.W))
499}
500
501class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
502  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
503  val out = DecoupledIO(new Bundle {
504    val req_info = Output(new L2TlbInnerBundle())
505    val id = Output(UInt(bMemID.W))
506    val h_resp = Output(new HptwResp)
507    val first_s2xlate_fault = Output(Bool()) // Whether the first stage 2 translation occurs pf/af
508    val af = Output(Bool())
509  })
510  val mem = new Bundle {
511    val req = DecoupledIO(new L2TlbMemReqBundle())
512    val resp = Flipped(Valid(new Bundle {
513      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
514      val value = Output(UInt(blockBits.W))
515    }))
516    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
517    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
518    val refill = Output(new L2TlbInnerBundle())
519    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
520    val flush_latch = Input(Vec(l2tlbParams.llptwsize, Bool()))
521  }
522  val cache = DecoupledIO(new L2TlbInnerBundle())
523  val pmp = new Bundle {
524    val req = Valid(new PMPReqBundle())
525    val resp = Flipped(new PMPRespBundle())
526  }
527  val hptw = new Bundle {
528    val req = DecoupledIO(new Bundle{
529      val source = UInt(bSourceWidth.W)
530      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
531      val gvpn = UInt(ptePPNLen.W)
532    })
533    val resp = Flipped(Valid(new Bundle {
534      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
535      val h_resp = Output(new HptwResp)
536    }))
537  }
538}
539
540class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
541  val req_info = new L2TlbInnerBundle()
542  val ppn = UInt(ptePPNLen.W)
543  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
544  val af = Bool()
545  val hptw_resp = new HptwResp()
546  val first_s2xlate_fault = Output(Bool())
547}
548
549
550class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
551  val io = IO(new LLPTWIO())
552  val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate
553  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
554  val s1Pbmte = Mux(enableS2xlate, io.csr.hPBMTE, io.csr.mPBMTE)
555
556  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
557  val entries = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(0.U.asTypeOf(new LLPTWEntry()))))
558  val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10)
559  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
560
561  val is_emptys = state.map(_ === state_idle)
562  val is_mems = state.map(_ === state_mem_req)
563  val is_waiting = state.map(_ === state_mem_waiting)
564  val is_having = state.map(_ === state_mem_out)
565  val is_cache = state.map(_ === state_cache)
566  val is_hptw_req = state.map(_ === state_hptw_req)
567  val is_last_hptw_req = state.map(_ === state_last_hptw_req)
568  val is_hptw_resp = state.map(_ === state_hptw_resp)
569  val is_last_hptw_resp = state.map(_ === state_last_hptw_resp)
570
571  val full = !ParallelOR(is_emptys).asBool
572  val enq_ptr = ParallelPriorityEncoder(is_emptys)
573
574  val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry
575  val mem_arb = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
576  for (i <- 0 until l2tlbParams.llptwsize) {
577    mem_arb.io.in(i).bits := entries(i)
578    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
579  }
580
581  // process hptw requests in serial
582  val hyper_arb1 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
583  for (i <- 0 until l2tlbParams.llptwsize) {
584    hyper_arb1.io.in(i).bits := entries(i)
585    hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
586  }
587  val hyper_arb2 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
588  for(i <- 0 until l2tlbParams.llptwsize) {
589    hyper_arb2.io.in(i).bits := entries(i)
590    hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
591  }
592
593  val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W)))
594
595  // duplicate req
596  // to_wait: wait for the last to access mem, set to mem_resp
597  // to_cache: the last is back just right now, set to mem_cache
598  val dup_vec = state.indices.map(i =>
599    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate
600  )
601  val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry
602  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entries, sending mem req already
603  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
604  val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))}
605  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
606  val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) && !io.mem.flush_latch(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
607  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
608  val to_mem_out = dup_wait_resp && ((entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate) || (entries(io.mem.resp.bits.id).req_info.s2xlate === onlyStage1))
609  val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR
610  val to_hptw_req = io.in.bits.req_info.s2xlate === allStage
611  val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage
612  val last_hptw_req_id = io.mem.resp.bits.id
613  val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0))
614  val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0))
615  val index =  Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
616  val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN()
617  XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed")
618
619  XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem")
620  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
621  val enq_state_normal = MuxCase(state_addr_check, Seq(
622    to_mem_out -> state_mem_out, // same to the blew, but the mem resp now
623    to_last_hptw_req -> state_last_hptw_req,
624    to_wait -> state_mem_waiting,
625    to_cache -> state_cache,
626    to_hptw_req -> state_hptw_req
627  ))
628  val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal)
629  when (io.in.fire) {
630    // if prefetch req does not need mem access, just give it up.
631    // so there will be at most 1 + FilterSize entries that needs re-access page cache
632    // so 2 + FilterSize is enough to avoid dead-lock
633    state(enq_ptr) := enq_state
634    entries(enq_ptr).req_info := io.in.bits.req_info
635    entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn)
636    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
637    entries(enq_ptr).af := false.B
638    entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp))
639    entries(enq_ptr).first_s2xlate_fault := false.B
640    mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req
641  }
642
643  val enq_ptr_reg = RegNext(enq_ptr)
644  val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush)
645
646  val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool
647  val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id)
648  val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check
649
650  val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))
651  val gpaddr = MakeGPAddr(entries(hptw_resp_ptr_reg).ppn, getVpnn(entries(hptw_resp_ptr_reg).req_info.vpn, 0))
652  val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp
653  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
654  val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire)
655  io.pmp.req.valid := need_addr_check || hptw_need_addr_check
656  io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr)
657  io.pmp.req.bits.cmd := TlbCmd.read
658  io.pmp.req.bits.size := 3.U // TODO: fix it
659  val pmp_resp_valid = io.pmp.req.valid // same cycle
660  when (pmp_resp_valid) {
661    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
662    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
663    val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg);
664    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
665    entries(ptr).af := accessFault
666    state(ptr) := Mux(accessFault, state_mem_out, state_mem_req)
667  }
668
669  when (mem_arb.io.out.fire) {
670    for (i <- state.indices) {
671      when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp
672      && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate
673      && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
674        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
675        state(i) := state_mem_waiting
676        entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp
677        entries(i).wait_id := mem_arb.io.chosen
678      }
679    }
680  }
681  when (io.mem.resp.fire) {
682    state.indices.map{i =>
683      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
684        val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0))
685        val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0))
686        val index =  Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
687        state(i) := Mux(entries(i).req_info.s2xlate === allStage && !(ptes(index).isPf(0.U, s1Pbmte) || !ptes(index).isLeaf() || ptes(index).isAf() || ptes(index).isStage1Gpf(io.csr.vsatp.mode))
688                , state_last_hptw_req, state_mem_out)
689        mem_resp_hit(i) := true.B
690        entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation
691        entries(i).hptw_resp.gpf := Mux(entries(i).req_info.s2xlate === allStage, ptes(index).isStage1Gpf(io.csr.vsatp.mode), false.B)
692      }
693    }
694  }
695
696  when (hyper_arb1.io.out.fire) {
697    for (i <- state.indices) {
698      when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) {
699        state(i) := state_hptw_resp
700        entries(i).wait_id := hyper_arb1.io.chosen
701      }
702    }
703  }
704
705  when (hyper_arb2.io.out.fire) {
706    for (i <- state.indices) {
707      when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) {
708        state(i) := state_last_hptw_resp
709        entries(i).wait_id := hyper_arb2.io.chosen
710      }
711    }
712  }
713
714  when (io.hptw.resp.fire) {
715    for (i <- state.indices) {
716      when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
717        val check_g_perm_fail = !io.hptw.resp.bits.h_resp.gaf && (!io.hptw.resp.bits.h_resp.entry.perm.get.r && !(io.csr.priv.mxr && io.hptw.resp.bits.h_resp.entry.perm.get.x))
718        when (check_g_perm_fail || io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf) {
719          state(i) := state_mem_out
720          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
721          entries(i).hptw_resp.gpf := io.hptw.resp.bits.h_resp.gpf || check_g_perm_fail
722          entries(i).first_s2xlate_fault := io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf
723        }.otherwise{ // change the entry that is waiting hptw resp
724          val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn))
725          val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id))
726          state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check)
727          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
728          entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id)
729          //To do: change the entry that is having the same hptw req
730        }
731      }
732      when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
733        state(i) := state_mem_out
734        entries(i).hptw_resp := io.hptw.resp.bits.h_resp
735        //To do: change the entry that is having the same hptw req
736      }
737    }
738  }
739  when (io.out.fire) {
740    assert(state(mem_ptr) === state_mem_out)
741    state(mem_ptr) := state_idle
742  }
743  mem_resp_hit.map(a => when (a) { a := false.B } )
744
745  when (io.cache.fire) {
746    state(cache_ptr) := state_idle
747  }
748  XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry")
749
750  when (flush) {
751    state.map(_ := state_idle)
752  }
753
754  io.in.ready := !full
755
756  io.out.valid := ParallelOR(is_having).asBool
757  io.out.bits.req_info := entries(mem_ptr).req_info
758  io.out.bits.id := mem_ptr
759  io.out.bits.af := entries(mem_ptr).af
760  io.out.bits.h_resp := entries(mem_ptr).hptw_resp
761  io.out.bits.first_s2xlate_fault := entries(mem_ptr).first_s2xlate_fault
762
763  val hptw_req_arb = Module(new Arbiter(new Bundle{
764      val source = UInt(bSourceWidth.W)
765      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
766      val ppn = UInt(ptePPNLen.W)
767    } , 2))
768  // first stage 2 translation
769  hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid
770  hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source
771  hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn
772  hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen
773  hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready
774  // last stage 2 translation
775  hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid
776  hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source
777  hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn
778  hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen
779  hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready
780  hptw_req_arb.io.out.ready := io.hptw.req.ready
781  io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush
782  io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn
783  io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id
784  io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source
785
786  io.mem.req.valid := mem_arb.io.out.valid && !flush
787  val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
788  val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
789  io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr)
790  io.mem.req.bits.id := mem_arb.io.chosen
791  io.mem.req.bits.hptw_bypassed := false.B
792  mem_arb.io.out.ready := io.mem.req.ready
793  val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))
794  io.mem.refill := entries(mem_refill_id).req_info
795  io.mem.refill.s2xlate := entries(mem_refill_id).req_info.s2xlate
796  io.mem.buffer_it := mem_resp_hit
797  io.mem.enq_ptr := enq_ptr
798
799  io.cache.valid := Cat(is_cache).orR
800  io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info))
801
802  XSPerfAccumulate("llptw_in_count", io.in.fire)
803  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
804  for (i <- 0 until 7) {
805    XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U)
806  }
807  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
808    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
809    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
810    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
811  }
812  XSPerfAccumulate("mem_count", io.mem.req.fire)
813  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
814  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
815
816  val perfEvents = Seq(
817    ("tlbllptw_incount           ", io.in.fire               ),
818    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
819    ("tlbllptw_memcount          ", io.mem.req.fire          ),
820    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
821  )
822  generatePerfEvent()
823}
824
825/*========================= HPTW ==============================*/
826
827/** HPTW : Hypervisor Page Table Walker
828  * the page walker take the virtual machine's page walk.
829  * guest physical address translation, guest physical address -> host physical address
830  **/
831class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
832  val req = Flipped(DecoupledIO(new Bundle {
833    val source = UInt(bSourceWidth.W)
834    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
835    val gvpn = UInt(gvpnLen.W)
836    val ppn = UInt(ppnLen.W)
837    val l3Hit = if (EnableSv48) Some(new Bool()) else None
838    val l2Hit = Bool()
839    val l1Hit = Bool()
840    val bypassed = Bool() // if bypass, don't refill
841  }))
842  val resp = DecoupledIO(new Bundle {
843    val source = UInt(bSourceWidth.W)
844    val resp = Output(new HptwResp())
845    val id = Output(UInt(bMemID.W))
846  })
847
848  val mem = new Bundle {
849    val req = DecoupledIO(new L2TlbMemReqBundle())
850    val resp = Flipped(ValidIO(UInt(XLEN.W)))
851    val mask = Input(Bool())
852  }
853  val refill = Output(new Bundle {
854    val req_info = new L2TlbInnerBundle()
855    val level = UInt(log2Up(Level + 1).W)
856  })
857  val pmp = new Bundle {
858    val req = ValidIO(new PMPReqBundle())
859    val resp = Flipped(new PMPRespBundle())
860  }
861}
862
863class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
864  val io = IO(new HPTWIO)
865  val hgatp = io.csr.hgatp
866  val mpbmte = io.csr.mPBMTE
867  val sfence = io.sfence
868  val flush = sfence.valid || hgatp.changed || io.csr.satp.changed || io.csr.vsatp.changed
869  val mode = hgatp.mode
870
871  val level = RegInit(3.U(log2Up(Level + 1).W))
872  val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level
873  val gpaddr = Reg(UInt(GPAddrBits.W))
874  val req_ppn = Reg(UInt(ppnLen.W))
875  val vpn = gpaddr(GPAddrBits-1, offLen)
876  val levelNext = level - 1.U
877  val l3Hit = Reg(Bool())
878  val l2Hit = Reg(Bool())
879  val l1Hit = Reg(Bool())
880  val bypassed = Reg(Bool())
881//  val pte = io.mem.resp.bits.MergeRespToPte()
882  val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType)
883  val ppn_l3 = Mux(l3Hit, req_ppn, pte.ppn)
884  val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn)
885  val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn)
886  val ppn = Wire(UInt(PAddrBits.W))
887  val p_pte = MakeAddr(ppn, getVpnn(vpn, level))
888  val pg_base = Wire(UInt(PAddrBits.W))
889  val mem_addr = Wire(UInt(PAddrBits.W))
890  if (EnableSv48) {
891    when (mode === Sv48) {
892      ppn := Mux(af_level === 2.U, ppn_l3, Mux(af_level === 1.U, ppn_l2, ppn_l1)) // for l2, l1 and l3
893      pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 3.U, mode = Sv48)) // for l3
894      mem_addr := Mux(af_level === 3.U, pg_base, p_pte)
895    } .otherwise {
896      ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2
897      pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39))
898      mem_addr := Mux(af_level === 2.U, pg_base, p_pte)
899    }
900  } else {
901    ppn := Mux(af_level === 1.U, ppn_l2, ppn_l1) //for l1 and l2
902    pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39))
903    mem_addr := Mux(af_level === 2.U, pg_base, p_pte)
904  }
905
906  //s/w register
907  val s_pmp_check = RegInit(true.B)
908  val s_mem_req = RegInit(true.B)
909  val w_mem_resp = RegInit(true.B)
910  val idle = RegInit(true.B)
911  val mem_addr_update = RegInit(false.B)
912  val finish = WireInit(false.B)
913
914  val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish
915  val pageFault = pte.isGpf(level, mpbmte) || (!pte.isLeaf() && level === 0.U)
916  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
917
918  val ppn_af = pte.isAf()
919  val find_pte = pte.isLeaf() || ppn_af || pageFault
920
921  val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault))
922  val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W))
923  val source = RegEnable(io.req.bits.source, io.req.fire)
924
925  io.req.ready := idle
926  val resp = Wire(new HptwResp())
927  // accessFault > pageFault > ppn_af
928  resp.apply(
929    gpf = pageFault && !accessFault,
930    gaf = accessFault || (ppn_af && !pageFault),
931    level = Mux(accessFault, af_level, level),
932    pte = pte,
933    vpn = vpn,
934    vmid = hgatp.vmid
935  )
936  io.resp.valid := resp_valid
937  io.resp.bits.id := id
938  io.resp.bits.resp := resp
939  io.resp.bits.source := source
940
941  io.pmp.req.valid := DontCare
942  io.pmp.req.bits.addr := mem_addr
943  io.pmp.req.bits.size := 3.U
944  io.pmp.req.bits.cmd := TlbCmd.read
945
946  io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check
947  io.mem.req.bits.addr := mem_addr
948  io.mem.req.bits.id := HptwReqId.U(bMemID.W)
949  io.mem.req.bits.hptw_bypassed := bypassed
950
951  io.refill.req_info.vpn := vpn
952  io.refill.level := level
953  io.refill.req_info.source := source
954  io.refill.req_info.s2xlate := onlyStage2
955  when (idle){
956    when(io.req.fire){
957      bypassed := io.req.bits.bypassed
958      idle := false.B
959      gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W))
960      accessFault := false.B
961      s_pmp_check := false.B
962      id := io.req.bits.id
963      req_ppn := io.req.bits.ppn
964      if (EnableSv48) {
965        when (mode === Sv48) {
966          level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U)))
967          af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U)))
968          l3Hit := io.req.bits.l3Hit.get
969        } .otherwise {
970          level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
971          af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
972          l3Hit := false.B
973        }
974      } else {
975        level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
976        af_level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
977        l3Hit := false.B
978      }
979      l2Hit := io.req.bits.l2Hit
980      l1Hit := io.req.bits.l1Hit
981    }
982  }
983
984  when(sent_to_pmp && !mem_addr_update){
985    s_mem_req := false.B
986    s_pmp_check := true.B
987  }
988
989  when(accessFault && !idle){
990    s_pmp_check := true.B
991    s_mem_req := true.B
992    w_mem_resp := true.B
993    mem_addr_update := true.B
994  }
995
996  when(io.mem.req.fire){
997    s_mem_req := true.B
998    w_mem_resp := false.B
999  }
1000
1001  when(io.mem.resp.fire && !w_mem_resp){
1002    w_mem_resp := true.B
1003    af_level := af_level - 1.U
1004    mem_addr_update := true.B
1005  }
1006
1007  when(mem_addr_update){
1008    when(!(find_pte || accessFault)){
1009      level := levelNext
1010      s_mem_req := false.B
1011      mem_addr_update := false.B
1012    }.elsewhen(resp_valid){
1013      when(io.resp.fire){
1014        idle := true.B
1015        mem_addr_update := false.B
1016        accessFault := false.B
1017      }
1018      finish := true.B
1019    }
1020  }
1021   when (flush) {
1022    idle := true.B
1023    s_pmp_check := true.B
1024    s_mem_req := true.B
1025    w_mem_resp := true.B
1026    accessFault := false.B
1027    mem_addr_update := false.B
1028  }
1029}
1030