xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision 3ea4388c307775f866cbebd6405f8201d60f1e53)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import xiangshan._
23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
24import utils._
25import utility._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l3Hit = if (EnableSv48) Some(new Bool()) else None
45    val l2Hit = Bool()
46    val ppn = UInt(gvpnLen.W)
47    val stage1Hit = Bool()
48    val stage1 = new PtwMergeResp
49  }))
50  val resp = DecoupledIO(new Bundle {
51    val source = UInt(bSourceWidth.W)
52    val s2xlate = UInt(2.W)
53    val resp = new PtwMergeResp
54    val h_resp = new HptwResp
55  })
56
57  val llptw = DecoupledIO(new LLPTWInBundle())
58  // NOTE: llptw change from "connect to llptw" to "connect to page cache"
59  // to avoid corner case that caused duplicate entries
60
61  val hptw = new Bundle {
62    val req = DecoupledIO(new Bundle {
63      val source = UInt(bSourceWidth.W)
64      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
65      val gvpn = UInt(vpnLen.W)
66    })
67    val resp = Flipped(Valid(new Bundle {
68      val h_resp = Output(new HptwResp)
69    }))
70  }
71  val mem = new Bundle {
72    val req = DecoupledIO(new L2TlbMemReqBundle())
73    val resp = Flipped(ValidIO(UInt(XLEN.W)))
74    val mask = Input(Bool())
75  }
76  val pmp = new Bundle {
77    val req = ValidIO(new PMPReqBundle())
78    val resp = Flipped(new PMPRespBundle())
79  }
80
81  val refill = Output(new Bundle {
82    val req_info = new L2TlbInnerBundle()
83    val level = UInt(log2Up(Level + 1).W)
84  })
85}
86
87class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
88  val io = IO(new PTWIO)
89  val sfence = io.sfence
90  val mem = io.mem
91  val req_s2xlate = Reg(UInt(2.W))
92  val enableS2xlate = req_s2xlate =/= noS2xlate
93  val onlyS1xlate = req_s2xlate === onlyStage1
94  val onlyS2xlate = req_s2xlate === onlyStage2
95
96  val satp = Wire(new TlbSatpBundle())
97  when (io.req.fire) {
98    satp := Mux(io.req.bits.req_info.s2xlate =/= noS2xlate, io.csr.vsatp, io.csr.satp)
99  } .otherwise {
100    satp := Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
101  }
102
103  val mode = satp.mode
104  val hgatp = io.csr.hgatp
105  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
106  val s2xlate = enableS2xlate && !onlyS1xlate
107  val level = RegInit(3.U(log2Up(Level + 1).W))
108  val af_level = RegInit(3.U(log2Up(Level + 1).W)) // access fault return this level
109  val ppn = Reg(UInt(gvpnLen.W))
110  val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate)
111  val levelNext = level - 1.U
112  val l3Hit = Reg(Bool())
113  val l2Hit = Reg(Bool())
114  val pte_valid = RegInit(false.B) // avoid the x states
115  val fake_pte = 0.U.asTypeOf(new PteBundle())
116  fake_pte.perm.v := true.B
117  fake_pte.perm.r := true.B
118  fake_pte.perm.w := true.B
119  fake_pte.perm.x := true.B
120  val pte = Mux(pte_valid, mem.resp.bits.asTypeOf(new PteBundle()), fake_pte)
121
122  // s/w register
123  val s_pmp_check = RegInit(true.B)
124  val s_mem_req = RegInit(true.B)
125  val s_llptw_req = RegInit(true.B)
126  val w_mem_resp = RegInit(true.B)
127  val s_hptw_req = RegInit(true.B)
128  val w_hptw_resp = RegInit(true.B)
129  val s_last_hptw_req = RegInit(true.B)
130  val w_last_hptw_resp = RegInit(true.B)
131  // for updating "level"
132  val mem_addr_update = RegInit(false.B)
133
134  val idle = RegInit(true.B)
135  val finish = WireInit(false.B)
136  val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish
137
138  val pageFault = pte.isPf(level)
139  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
140
141  val hptw_pageFault = RegInit(false.B)
142  val hptw_accessFault = RegInit(false.B)
143  val last_s2xlate = RegInit(false.B)
144  val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire)
145  val stage1 = RegEnable(io.req.bits.stage1, io.req.fire)
146  val hptw_resp_stage2 = Reg(Bool())
147
148  val ppn_af = Mux(s2xlate, pte.isStage1Af(), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high
149  val guest_fault = hptw_pageFault || hptw_accessFault
150  val find_pte = pte.isLeaf() || ppn_af || pageFault
151  val to_find_pte = level === 1.U && find_pte === false.B
152  val source = RegEnable(io.req.bits.req_info.source, io.req.fire)
153
154  val l3addr = Wire(UInt(PAddrBits.W))
155  val l2addr = Wire(UInt(PAddrBits.W))
156  val l1addr = Wire(UInt(PAddrBits.W))
157  val mem_addr = Wire(UInt(PAddrBits.W))
158
159  l3addr := MakeAddr(satp.ppn, getVpnn(vpn, 3))
160  if (EnableSv48) {
161    when (mode === Sv48) {
162      l2addr := MakeAddr(Mux(l3Hit, ppn, pte.getPPN()), getVpnn(vpn, 2))
163    } .otherwise {
164      l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2))
165    }
166  } else {
167    l2addr := MakeAddr(satp.ppn, getVpnn(vpn, 2))
168  }
169  l1addr := MakeAddr(Mux(l2Hit, ppn, pte.getPPN()), getVpnn(vpn, 1))
170  mem_addr := Mux(af_level === 3.U, l3addr, Mux(af_level === 2.U, l2addr, l1addr))
171
172  val hptw_resp = RegEnable(io.hptw.resp.bits.h_resp, io.hptw.resp.fire)
173  val gpaddr = MuxCase(mem_addr, Seq(
174    stage1Hit -> Cat(stage1.genPPN(), 0.U(offLen.W)),
175    onlyS2xlate -> Cat(vpn, 0.U(offLen.W)),
176    !s_last_hptw_req -> Cat(MuxLookup(level, pte.ppn)(Seq(
177      3.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen * 3), vpn(vpnnLen * 3 - 1, 0)),
178      2.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)),
179      1.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen), vpn(vpnnLen - 1, 0)
180    ))),
181    0.U(offLen.W))
182  ))
183  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
184
185  io.req.ready := idle
186  val ptw_resp = Wire(new PtwMergeResp)
187  ptw_resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level, level), pte, vpn, satp.asid, hgatp.asid, vpn(sectortlbwidth - 1, 0), not_super = false)
188
189  val normal_resp = idle === false.B && mem_addr_update && !last_s2xlate && (guest_fault || (w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate)
190  val stageHit_resp = idle === false.B && hptw_resp_stage2
191  io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp)
192  io.resp.bits.source := source
193  io.resp.bits.resp := Mux(stage1Hit, stage1, ptw_resp)
194  io.resp.bits.h_resp := hptw_resp
195  io.resp.bits.s2xlate := req_s2xlate
196
197  io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault
198  io.llptw.bits.req_info.source := source
199  io.llptw.bits.req_info.vpn := vpn
200  io.llptw.bits.req_info.s2xlate := req_s2xlate
201  io.llptw.bits.ppn := DontCare
202
203  io.pmp.req.valid := DontCare // samecycle, do not use valid
204  io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
205  io.pmp.req.bits.size := 3.U // TODO: fix it
206  io.pmp.req.bits.cmd := TlbCmd.read
207
208  mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check
209  mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr)
210  mem.req.bits.id := FsmReqID.U(bMemID.W)
211  mem.req.bits.hptw_bypassed := false.B
212
213  io.refill.req_info.s2xlate := req_s2xlate
214  io.refill.req_info.vpn := vpn
215  io.refill.level := level
216  io.refill.req_info.source := source
217
218  io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req
219  io.hptw.req.bits.id := FsmReqID.U(bMemID.W)
220  io.hptw.req.bits.gvpn := get_pn(gpaddr)
221  io.hptw.req.bits.source := source
222
223  when (io.req.fire && io.req.bits.stage1Hit){
224    idle := false.B
225    req_s2xlate := io.req.bits.req_info.s2xlate
226    s_hptw_req := false.B
227    hptw_resp_stage2 := false.B
228    last_s2xlate := false.B
229    hptw_pageFault := false.B
230    hptw_accessFault := false.B
231  }
232
233  when (io.hptw.resp.fire && w_hptw_resp === false.B && stage1Hit){
234    w_hptw_resp := true.B
235    hptw_resp_stage2 := true.B
236  }
237
238  when (io.resp.fire && stage1Hit){
239    idle := true.B
240  }
241
242  when (io.req.fire && !io.req.bits.stage1Hit){
243    val req = io.req.bits
244    if (EnableSv48) {
245      when (mode === Sv48) {
246        level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U))
247        af_level := Mux(req.l2Hit, 1.U, Mux(req.l3Hit.get, 2.U, 3.U))
248        ppn := Mux(req.l2Hit || req.l3Hit.get, io.req.bits.ppn, satp.ppn)
249        l3Hit := req.l3Hit.get
250      } .otherwise {
251        level := Mux(req.l2Hit, 1.U, 2.U)
252        af_level := Mux(req.l2Hit, 1.U, 2.U)
253        ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
254        l3Hit := false.B
255      }
256    } else {
257      level := Mux(req.l2Hit, 1.U, 2.U)
258      af_level := Mux(req.l2Hit, 1.U, 2.U)
259      ppn := Mux(req.l2Hit, io.req.bits.ppn, satp.ppn)
260      l3Hit := false.B
261    }
262    vpn := io.req.bits.req_info.vpn
263    l2Hit := req.l2Hit
264    accessFault := false.B
265    idle := false.B
266    hptw_pageFault := false.B
267    hptw_accessFault := false.B
268    pte_valid := false.B
269    req_s2xlate := io.req.bits.req_info.s2xlate
270    when(io.req.bits.req_info.s2xlate =/= noS2xlate && io.req.bits.req_info.s2xlate =/= onlyStage1){
271      last_s2xlate := true.B
272      s_hptw_req := false.B
273    }.otherwise {
274      last_s2xlate := false.B
275      s_pmp_check := false.B
276    }
277  }
278
279  when(io.hptw.req.fire && s_hptw_req === false.B){
280    s_hptw_req := true.B
281    w_hptw_resp := false.B
282  }
283
284  when(io.hptw.resp.fire && w_hptw_resp === false.B && !stage1Hit) {
285    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
286    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
287    w_hptw_resp := true.B
288    when(onlyS2xlate){
289      mem_addr_update := true.B
290      last_s2xlate := false.B
291    }.elsewhen(!(io.hptw.resp.bits.h_resp.gpf || io.hptw.resp.bits.h_resp.gaf)) {
292      s_pmp_check := false.B
293    }
294  }
295
296  when(io.hptw.req.fire && s_last_hptw_req === false.B) {
297    w_last_hptw_resp := false.B
298    s_last_hptw_req := true.B
299  }
300
301  when(io.hptw.resp.fire && w_last_hptw_resp === false.B){
302    hptw_pageFault := io.hptw.resp.bits.h_resp.gpf
303    hptw_accessFault := io.hptw.resp.bits.h_resp.gaf
304    w_last_hptw_resp := true.B
305    mem_addr_update := true.B
306    last_s2xlate := false.B
307  }
308
309  when(sent_to_pmp && mem_addr_update === false.B){
310    s_mem_req := false.B
311    s_pmp_check := true.B
312  }
313
314  when(accessFault && idle === false.B){
315    s_pmp_check := true.B
316    s_mem_req := true.B
317    w_mem_resp := true.B
318    s_llptw_req := true.B
319    s_hptw_req := true.B
320    w_hptw_resp := true.B
321    s_last_hptw_req := true.B
322    w_last_hptw_resp := true.B
323    mem_addr_update := true.B
324    last_s2xlate := false.B
325  }
326
327  when(guest_fault && idle === false.B){
328    s_pmp_check := true.B
329    s_mem_req := true.B
330    w_mem_resp := true.B
331    s_llptw_req := true.B
332    s_hptw_req := true.B
333    w_hptw_resp := true.B
334    s_last_hptw_req := true.B
335    w_last_hptw_resp := true.B
336    mem_addr_update := true.B
337    last_s2xlate := false.B
338  }
339
340  when (mem.req.fire){
341    s_mem_req := true.B
342    w_mem_resp := false.B
343  }
344
345  when(mem.resp.fire && w_mem_resp === false.B){
346    w_mem_resp := true.B
347    af_level := af_level - 1.U
348    s_llptw_req := false.B
349    mem_addr_update := true.B
350    pte_valid := true.B
351  }
352
353  when(mem_addr_update){
354    when(level >= 2.U && !onlyS2xlate && !(guest_fault || find_pte || accessFault)) {
355      level := levelNext
356      when(s2xlate){
357        s_hptw_req := false.B
358      }.otherwise{
359        s_mem_req := false.B
360      }
361      s_llptw_req := true.B
362      mem_addr_update := false.B
363    }.elsewhen(io.llptw.valid){
364      when(io.llptw.fire) {
365        idle := true.B
366        s_llptw_req := true.B
367        mem_addr_update := false.B
368        last_s2xlate := false.B
369      }
370      finish := true.B
371    }.elsewhen(s2xlate && last_s2xlate === true.B) {
372      when(accessFault || pageFault || ppn_af){
373        last_s2xlate := false.B
374      }.otherwise{
375        s_last_hptw_req := false.B
376        mem_addr_update := false.B
377      }
378    }.elsewhen(io.resp.valid){
379      when(io.resp.fire) {
380        idle := true.B
381        s_llptw_req := true.B
382        mem_addr_update := false.B
383        accessFault := false.B
384      }
385      finish := true.B
386    }
387  }
388
389
390  when (flush) {
391    idle := true.B
392    s_pmp_check := true.B
393    s_mem_req := true.B
394    s_llptw_req := true.B
395    w_mem_resp := true.B
396    accessFault := false.B
397    mem_addr_update := false.B
398    s_hptw_req := true.B
399    w_hptw_resp := true.B
400    s_last_hptw_req := true.B
401    w_last_hptw_resp := true.B
402  }
403
404
405  XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n")
406
407  // perf
408  XSPerfAccumulate("fsm_count", io.req.fire)
409  for (i <- 0 until PtwWidth) {
410    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U)
411  }
412  XSPerfAccumulate("fsm_busy", !idle)
413  XSPerfAccumulate("fsm_idle", idle)
414  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
415  XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af)
416  XSPerfAccumulate("mem_count", mem.req.fire)
417  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true))
418  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
419
420  TimeOutAssert(!idle, timeOutThreshold, "page table walker time out")
421
422  val perfEvents = Seq(
423    ("fsm_count         ", io.req.fire                                     ),
424    ("fsm_busy          ", !idle                                             ),
425    ("fsm_idle          ", idle                                              ),
426    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
427    ("mem_count         ", mem.req.fire                                    ),
428    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)),
429    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
430  )
431  generatePerfEvent()
432}
433
434/*========================= LLPTW ==============================*/
435
436/** LLPTW : Last Level Page Table Walker
437  * the page walker that only takes 4KB(last level) page walk.
438  **/
439
440class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
441  val req_info = Output(new L2TlbInnerBundle())
442  val ppn = Output(UInt(gvpnLen.W))
443}
444
445class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
446  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
447  val out = DecoupledIO(new Bundle {
448    val req_info = Output(new L2TlbInnerBundle())
449    val id = Output(UInt(bMemID.W))
450    val h_resp = Output(new HptwResp)
451    val first_s2xlate_fault = Output(Bool()) // Whether the first stage 2 translation occurs pf/af
452    val af = Output(Bool())
453  })
454  val mem = new Bundle {
455    val req = DecoupledIO(new L2TlbMemReqBundle())
456    val resp = Flipped(Valid(new Bundle {
457      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
458      val value = Output(UInt(blockBits.W))
459    }))
460    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
461    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
462    val refill = Output(new L2TlbInnerBundle())
463    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
464  }
465  val cache = DecoupledIO(new L2TlbInnerBundle())
466  val pmp = new Bundle {
467    val req = Valid(new PMPReqBundle())
468    val resp = Flipped(new PMPRespBundle())
469  }
470  val hptw = new Bundle {
471    val req = DecoupledIO(new Bundle{
472      val source = UInt(bSourceWidth.W)
473      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
474      val gvpn = UInt(vpnLen.W)
475    })
476    val resp = Flipped(Valid(new Bundle {
477      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
478      val h_resp = Output(new HptwResp)
479    }))
480  }
481}
482
483class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
484  val req_info = new L2TlbInnerBundle()
485  val ppn = UInt(gvpnLen.W)
486  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
487  val af = Bool()
488  val hptw_resp = new HptwResp()
489  val first_s2xlate_fault = Output(Bool())
490}
491
492
493class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
494  val io = IO(new LLPTWIO())
495  val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate
496  val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp)
497
498  val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed
499  val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry()))
500  val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10)
501  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
502
503  val is_emptys = state.map(_ === state_idle)
504  val is_mems = state.map(_ === state_mem_req)
505  val is_waiting = state.map(_ === state_mem_waiting)
506  val is_having = state.map(_ === state_mem_out)
507  val is_cache = state.map(_ === state_cache)
508  val is_hptw_req = state.map(_ === state_hptw_req)
509  val is_last_hptw_req = state.map(_ === state_last_hptw_req)
510  val is_hptw_resp = state.map(_ === state_hptw_resp)
511  val is_last_hptw_resp = state.map(_ === state_last_hptw_resp)
512
513  val full = !ParallelOR(is_emptys).asBool
514  val enq_ptr = ParallelPriorityEncoder(is_emptys)
515
516  val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry
517  val mem_arb = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
518  for (i <- 0 until l2tlbParams.llptwsize) {
519    mem_arb.io.in(i).bits := entries(i)
520    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
521  }
522
523  // process hptw requests in serial
524  val hyper_arb1 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
525  for (i <- 0 until l2tlbParams.llptwsize) {
526    hyper_arb1.io.in(i).bits := entries(i)
527    hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
528  }
529  val hyper_arb2 = Module(new RRArbiterInit(new LLPTWEntry(), l2tlbParams.llptwsize))
530  for(i <- 0 until l2tlbParams.llptwsize) {
531    hyper_arb2.io.in(i).bits := entries(i)
532    hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR)
533  }
534
535  val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W)))
536
537  // duplicate req
538  // to_wait: wait for the last to access mem, set to mem_resp
539  // to_cache: the last is back just right now, set to mem_cache
540  val dup_vec = state.indices.map(i =>
541    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate
542  )
543  val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry
544  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entries, sending mem req already
545  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
546  val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))}
547  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
548  val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
549  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
550  val to_mem_out = dup_wait_resp && ((entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate) || (entries(io.mem.resp.bits.id).req_info.s2xlate === onlyStage1))
551  val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR
552  val to_hptw_req = io.in.bits.req_info.s2xlate === allStage
553  val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage
554  val last_hptw_req_id = io.mem.resp.bits.id
555  val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0))
556  val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0))
557  val index =  Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
558  val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN()
559  XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed")
560
561  XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem")
562  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
563  val enq_state_normal = MuxCase(state_addr_check, Seq(
564    to_mem_out -> state_mem_out, // same to the blew, but the mem resp now
565    to_last_hptw_req -> state_last_hptw_req,
566    to_wait -> state_mem_waiting,
567    to_cache -> state_cache,
568    to_hptw_req -> state_hptw_req
569  ))
570  val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal)
571  when (io.in.fire) {
572    // if prefetch req does not need mem access, just give it up.
573    // so there will be at most 1 + FilterSize entries that needs re-access page cache
574    // so 2 + FilterSize is enough to avoid dead-lock
575    state(enq_ptr) := enq_state
576    entries(enq_ptr).req_info := io.in.bits.req_info
577    entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn)
578    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
579    entries(enq_ptr).af := false.B
580    entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp))
581    entries(enq_ptr).first_s2xlate_fault := false.B
582    mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req
583  }
584
585  val enq_ptr_reg = RegNext(enq_ptr)
586  val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush)
587
588  val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool
589  val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id)
590  val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check
591
592  val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))
593  val gpaddr = MakeGPAddr(entries(hptw_resp_ptr_reg).ppn, getVpnn(entries(hptw_resp_ptr_reg).req_info.vpn, 0))
594  val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp
595  val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr))
596  val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire)
597  io.pmp.req.valid := need_addr_check || hptw_need_addr_check
598  io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr)
599  io.pmp.req.bits.cmd := TlbCmd.read
600  io.pmp.req.bits.size := 3.U // TODO: fix it
601  val pmp_resp_valid = io.pmp.req.valid // same cycle
602  when (pmp_resp_valid) {
603    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
604    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
605    val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg);
606    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
607    entries(ptr).af := accessFault
608    state(ptr) := Mux(accessFault, state_mem_out, state_mem_req)
609  }
610
611  when (mem_arb.io.out.fire) {
612    for (i <- state.indices) {
613      when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp
614      && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate
615      && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
616        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
617        state(i) := state_mem_waiting
618        entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp
619        entries(i).wait_id := mem_arb.io.chosen
620      }
621    }
622  }
623  when (io.mem.resp.fire) {
624    state.indices.map{i =>
625      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
626        val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0))
627        val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0))
628        val index =  Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
629        state(i) := Mux(entries(i).req_info.s2xlate === allStage && !(ptes(index).isPf(0.U) || !ptes(index).isLeaf() || ptes(index).isAf()), state_last_hptw_req, state_mem_out)
630        mem_resp_hit(i) := true.B
631        entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation
632      }
633    }
634  }
635
636  when (hyper_arb1.io.out.fire) {
637    for (i <- state.indices) {
638      when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) {
639        state(i) := state_hptw_resp
640        entries(i).wait_id := hyper_arb1.io.chosen
641      }
642    }
643  }
644
645  when (hyper_arb2.io.out.fire) {
646    for (i <- state.indices) {
647      when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) {
648        state(i) := state_last_hptw_resp
649        entries(i).wait_id := hyper_arb2.io.chosen
650      }
651    }
652  }
653
654  when (io.hptw.resp.fire) {
655    for (i <- state.indices) {
656      when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
657        when (io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf) {
658          state(i) := state_mem_out
659          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
660          entries(i).first_s2xlate_fault := io.hptw.resp.bits.h_resp.gaf || io.hptw.resp.bits.h_resp.gpf
661        }.otherwise{ // change the entry that is waiting hptw resp
662          val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn))
663          val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id))
664          state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check)
665          entries(i).hptw_resp := io.hptw.resp.bits.h_resp
666          entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id)
667          //To do: change the entry that is having the same hptw req
668        }
669      }
670      when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) {
671        state(i) := state_mem_out
672        entries(i).hptw_resp := io.hptw.resp.bits.h_resp
673        //To do: change the entry that is having the same hptw req
674      }
675    }
676  }
677  when (io.out.fire) {
678    assert(state(mem_ptr) === state_mem_out)
679    state(mem_ptr) := state_idle
680  }
681  mem_resp_hit.map(a => when (a) { a := false.B } )
682
683  when (io.cache.fire) {
684    state(cache_ptr) := state_idle
685  }
686  XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry")
687
688  when (flush) {
689    state.map(_ := state_idle)
690  }
691
692  io.in.ready := !full
693
694  io.out.valid := ParallelOR(is_having).asBool
695  io.out.bits.req_info := entries(mem_ptr).req_info
696  io.out.bits.id := mem_ptr
697  io.out.bits.af := entries(mem_ptr).af
698  io.out.bits.h_resp := entries(mem_ptr).hptw_resp
699  io.out.bits.first_s2xlate_fault := entries(mem_ptr).first_s2xlate_fault
700
701  val hptw_req_arb = Module(new Arbiter(new Bundle{
702      val source = UInt(bSourceWidth.W)
703      val id = UInt(log2Up(l2tlbParams.llptwsize).W)
704      val ppn = UInt(gvpnLen.W)
705    } , 2))
706  // first stage 2 translation
707  hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid
708  hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source
709  hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn
710  hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen
711  hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready
712  // last stage 2 translation
713  hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid
714  hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source
715  hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn
716  hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen
717  hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready
718  hptw_req_arb.io.out.ready := io.hptw.req.ready
719  io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush
720  io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn
721  io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id
722  io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source
723
724  io.mem.req.valid := mem_arb.io.out.valid && !flush
725  val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
726  val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
727  io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr)
728  io.mem.req.bits.id := mem_arb.io.chosen
729  io.mem.req.bits.hptw_bypassed := false.B
730  mem_arb.io.out.ready := io.mem.req.ready
731  val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))
732  io.mem.refill := entries(mem_refill_id).req_info
733  io.mem.refill.s2xlate := entries(mem_refill_id).req_info.s2xlate
734  io.mem.buffer_it := mem_resp_hit
735  io.mem.enq_ptr := enq_ptr
736
737  io.cache.valid := Cat(is_cache).orR
738  io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info))
739
740  XSPerfAccumulate("llptw_in_count", io.in.fire)
741  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
742  for (i <- 0 until 7) {
743    XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U)
744  }
745  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
746    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
747    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
748    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
749  }
750  XSPerfAccumulate("mem_count", io.mem.req.fire)
751  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
752  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
753
754  for (i <- 0 until l2tlbParams.llptwsize) {
755    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
756  }
757
758  val perfEvents = Seq(
759    ("tlbllptw_incount           ", io.in.fire               ),
760    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
761    ("tlbllptw_memcount          ", io.mem.req.fire          ),
762    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
763  )
764  generatePerfEvent()
765}
766
767/*========================= HPTW ==============================*/
768
769/** HPTW : Hypervisor Page Table Walker
770  * the page walker take the virtual machine's page walk.
771  * guest physical address translation, guest physical address -> host physical address
772  **/
773class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
774  val req = Flipped(DecoupledIO(new Bundle {
775    val source = UInt(bSourceWidth.W)
776    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
777    val gvpn = UInt(vpnLen.W)
778    val ppn = UInt(ppnLen.W)
779    val l3Hit = if (EnableSv48) Some(new Bool()) else None
780    val l2Hit = Bool()
781    val l1Hit = Bool()
782    val bypassed = Bool() // if bypass, don't refill
783  }))
784  val resp = DecoupledIO(new Bundle {
785    val source = UInt(bSourceWidth.W)
786    val resp = Output(new HptwResp())
787    val id = Output(UInt(bMemID.W))
788  })
789
790  val mem = new Bundle {
791    val req = DecoupledIO(new L2TlbMemReqBundle())
792    val resp = Flipped(ValidIO(UInt(XLEN.W)))
793    val mask = Input(Bool())
794  }
795  val refill = Output(new Bundle {
796    val req_info = new L2TlbInnerBundle()
797    val level = UInt(log2Up(Level + 1).W)
798  })
799  val pmp = new Bundle {
800    val req = ValidIO(new PMPReqBundle())
801    val resp = Flipped(new PMPRespBundle())
802  }
803}
804
805class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
806  val io = IO(new HPTWIO)
807  val hgatp = io.csr.hgatp
808  val sfence = io.sfence
809  val flush = sfence.valid || hgatp.changed || io.csr.satp.changed || io.csr.vsatp.changed
810  val mode = hgatp.mode
811
812  val level = RegInit(3.U(log2Up(Level + 1).W))
813  val gpaddr = Reg(UInt(GPAddrBits.W))
814  val req_ppn = Reg(UInt(ppnLen.W))
815  val vpn = gpaddr(GPAddrBits-1, offLen)
816  val levelNext = level - 1.U
817  val l3Hit = Reg(Bool())
818  val l2Hit = Reg(Bool())
819  val l1Hit = Reg(Bool())
820  val bypassed = Reg(Bool())
821//  val pte = io.mem.resp.bits.MergeRespToPte()
822  val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType)
823  val ppn_l3 = Mux(l3Hit, req_ppn, pte.ppn)
824  val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn)
825  val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn)
826  val ppn = Wire(UInt(PAddrBits.W))
827  val p_pte = MakeAddr(ppn, getVpnn(vpn, level))
828  val pg_base = Wire(UInt(PAddrBits.W))
829  val mem_addr = Wire(UInt(PAddrBits.W))
830  if (EnableSv48) {
831    when (mode === Sv48) {
832      ppn := Mux(level === 2.U, ppn_l3, Mux(level === 1.U, ppn_l2, ppn_l1)) // for l2, l1 and l3
833      pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 3.U, mode = Sv48)) // for l3
834      mem_addr := Mux(level === 3.U, pg_base, p_pte)
835    } .otherwise {
836      ppn := Mux(level === 1.U, ppn_l2, ppn_l1) //for l1 and l2
837      pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39))
838      mem_addr := Mux(level === 2.U, pg_base, p_pte)
839    }
840  } else {
841    ppn := Mux(level === 1.U, ppn_l2, ppn_l1) //for l1 and l2
842    pg_base := MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U, mode = Sv39))
843    mem_addr := Mux(level === 2.U, pg_base, p_pte)
844  }
845
846  //s/w register
847  val s_pmp_check = RegInit(true.B)
848  val s_mem_req = RegInit(true.B)
849  val w_mem_resp = RegInit(true.B)
850  val idle = RegInit(true.B)
851  val mem_addr_update = RegInit(false.B)
852  val finish = WireInit(false.B)
853
854  val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish
855  val pageFault = pte.isPf(level) || (!pte.isLeaf() && level === 0.U)
856  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
857
858  val ppn_af = pte.isAf()
859  val find_pte = pte.isLeaf() || ppn_af || pageFault
860
861  val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault))
862  val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W))
863  val source = RegEnable(io.req.bits.source, io.req.fire)
864
865  io.req.ready := idle
866  val resp = Wire(new HptwResp())
867  resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, level, pte, vpn, hgatp.asid)
868  io.resp.valid := resp_valid
869  io.resp.bits.id := id
870  io.resp.bits.resp := resp
871  io.resp.bits.source := source
872
873  io.pmp.req.valid := DontCare
874  io.pmp.req.bits.addr := mem_addr
875  io.pmp.req.bits.size := 3.U
876  io.pmp.req.bits.cmd := TlbCmd.read
877
878  io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check
879  io.mem.req.bits.addr := mem_addr
880  io.mem.req.bits.id := HptwReqId.U(bMemID.W)
881  io.mem.req.bits.hptw_bypassed := bypassed
882
883  io.refill.req_info.vpn := vpn
884  io.refill.level := level
885  io.refill.req_info.source := source
886  io.refill.req_info.s2xlate := onlyStage2
887  when (idle){
888    when(io.req.fire){
889      bypassed := io.req.bits.bypassed
890      idle := false.B
891      gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W))
892      accessFault := false.B
893      s_pmp_check := false.B
894      id := io.req.bits.id
895      req_ppn := io.req.bits.ppn
896      if (EnableSv48) {
897        when (mode === Sv48) {
898          level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, Mux(io.req.bits.l3Hit.get, 2.U, 3.U)))
899          l3Hit := io.req.bits.l3Hit.get
900        } .otherwise {
901          level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
902          l3Hit := false.B
903        }
904      } else {
905        level := Mux(io.req.bits.l1Hit, 0.U, Mux(io.req.bits.l2Hit, 1.U, 2.U))
906        l3Hit := false.B
907      }
908      l2Hit := io.req.bits.l2Hit
909      l1Hit := io.req.bits.l1Hit
910    }
911  }
912
913  when(sent_to_pmp && !mem_addr_update){
914    s_mem_req := false.B
915    s_pmp_check := true.B
916  }
917
918  when(accessFault && !idle){
919    s_pmp_check := true.B
920    s_mem_req := true.B
921    w_mem_resp := true.B
922    mem_addr_update := true.B
923  }
924
925  when(io.mem.req.fire){
926    s_mem_req := true.B
927    w_mem_resp := false.B
928  }
929
930  when(io.mem.resp.fire && !w_mem_resp){
931    w_mem_resp := true.B
932    mem_addr_update := true.B
933  }
934
935  when(mem_addr_update){
936    when(!(find_pte || accessFault)){
937      level := levelNext
938      s_mem_req := false.B
939      mem_addr_update := false.B
940    }.elsewhen(resp_valid){
941      when(io.resp.fire){
942        idle := true.B
943        mem_addr_update := false.B
944        accessFault := false.B
945      }
946      finish := true.B
947    }
948  }
949   when (flush) {
950    idle := true.B
951    s_pmp_check := true.B
952    s_mem_req := true.B
953    w_mem_resp := true.B
954    accessFault := false.B
955    mem_addr_update := false.B
956  }
957}
958