xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/PageTableWalker.scala (revision 7a2fc509e2d355879c4db3dc3f17a6ccacd3d09e)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import chisel3.internal.naming.chiselName
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29
30/** Page Table Walk is divided into two parts
31  * One,   PTW: page walk for pde, except for leaf entries, one by one
32  * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel
33  */
34
35
36/** PTW : page table walker
37  * a finite state machine
38  * only take 1GB and 2MB page walks
39  * or in other words, except the last level(leaf)
40  **/
41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
42  val req = Flipped(DecoupledIO(new Bundle {
43    val req_info = new L2TlbInnerBundle()
44    val l1Hit = Bool()
45    val ppn = UInt(ppnLen.W)
46  }))
47  val resp = DecoupledIO(new Bundle {
48    val source = UInt(bSourceWidth.W)
49    val resp = new PtwResp
50  })
51
52  val llptw = DecoupledIO(new LLPTWInBundle())
53
54  val mem = new Bundle {
55    val req = DecoupledIO(new L2TlbMemReqBundle())
56    val resp = Flipped(ValidIO(UInt(XLEN.W)))
57    val mask = Input(Bool())
58  }
59  val pmp = new Bundle {
60    val req = ValidIO(new PMPReqBundle())
61    val resp = Flipped(new PMPRespBundle())
62  }
63
64  val refill = Output(new Bundle {
65    val req_info = new L2TlbInnerBundle()
66    val level = UInt(log2Up(Level).W)
67  })
68}
69
70@chiselName
71class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
72  val io = IO(new PTWIO)
73
74  val sfence = io.sfence
75  val mem = io.mem
76  val satp = io.csr.satp
77  val flush = io.sfence.valid || io.csr.satp.changed
78
79  val s_idle :: s_addr_check :: s_mem_req :: s_mem_resp :: s_check_pte :: Nil = Enum(5)
80  val state = RegInit(s_idle)
81  val level = RegInit(0.U(log2Up(Level).W))
82  val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level
83  val ppn = Reg(UInt(ppnLen.W))
84  val vpn = Reg(UInt(vpnLen.W))
85  val levelNext = level + 1.U
86  val l1Hit = Reg(Bool())
87  val memPte = mem.resp.bits.asTypeOf(new PteBundle().cloneType)
88  io.req.ready := state === s_idle
89
90  val finish = WireInit(false.B)
91  val sent_to_pmp = state === s_addr_check || (state === s_check_pte && !finish)
92  val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp)
93  val pageFault = memPte.isPf(level)
94  switch (state) {
95    is (s_idle) {
96      when (io.req.fire()) {
97        val req = io.req.bits
98        state := s_addr_check
99        level := Mux(req.l1Hit, 1.U, 0.U)
100        af_level := Mux(req.l1Hit, 1.U, 0.U)
101        ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn)
102        vpn := io.req.bits.req_info.vpn
103        l1Hit := req.l1Hit
104        accessFault := false.B
105      }
106    }
107
108    is (s_addr_check) {
109      state := s_mem_req
110    }
111
112    is (s_mem_req) {
113      when (mem.req.fire()) {
114        state := s_mem_resp
115      }
116      when (accessFault) {
117        state := s_check_pte
118      }
119    }
120
121    is (s_mem_resp) {
122      when(mem.resp.fire()) {
123        state := s_check_pte
124        af_level := af_level + 1.U
125      }
126    }
127
128    is (s_check_pte) {
129      when (io.resp.valid) { // find pte already or accessFault (mentioned below)
130        when (io.resp.fire()) {
131          state := s_idle
132        }
133        finish := true.B
134      }.elsewhen(io.llptw.valid) { // the next level is pte, go to miss queue
135        when (io.llptw.fire()) {
136          state := s_idle
137        }
138        finish := true.B
139      } otherwise { // go to next level, access the memory, need pmp check first
140        when (io.pmp.resp.ld) { // pmp check failed, raise access-fault
141          // do nothing, RegNext the pmp check result and do it later (mentioned above)
142        }.otherwise { // go to next level.
143          assert(level === 0.U)
144          level := levelNext
145          state := s_mem_req
146        }
147      }
148    }
149  }
150
151  when (sfence.valid) {
152    state := s_idle
153    accessFault := false.B
154  }
155
156  // memPte is valid when at s_check_pte. when mem.resp.fire, it's not ready.
157  val is_pte = memPte.isLeaf() || memPte.isPf(level)
158  val find_pte = is_pte
159  val to_find_pte = level === 1.U && !is_pte
160  val source = RegEnable(io.req.bits.req_info.source, io.req.fire())
161  io.resp.valid := state === s_check_pte && (find_pte || accessFault)
162  io.resp.bits.source := source
163  io.resp.bits.resp.apply(pageFault && !accessFault, accessFault, Mux(accessFault, af_level, level), memPte, vpn, satp.asid)
164
165  io.llptw.valid := state === s_check_pte && to_find_pte && !accessFault
166  io.llptw.bits.req_info.source := source
167  io.llptw.bits.req_info.vpn := vpn
168  io.llptw.bits.ppn := memPte.ppn
169
170  assert(level =/= 2.U || level =/= 3.U)
171
172  val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2))
173  val l2addr = MakeAddr(Mux(l1Hit, ppn, memPte.ppn), getVpnn(vpn, 1))
174  val mem_addr = Mux(af_level === 0.U, l1addr, l2addr)
175  io.pmp.req.valid := DontCare // samecycle, do not use valid
176  io.pmp.req.bits.addr := mem_addr
177  io.pmp.req.bits.size := 3.U // TODO: fix it
178  io.pmp.req.bits.cmd := TlbCmd.read
179
180  mem.req.valid := state === s_mem_req && !io.mem.mask && !accessFault
181  mem.req.bits.addr := mem_addr
182  mem.req.bits.id := FsmReqID.U(bMemID.W)
183
184  io.refill.req_info.vpn := vpn
185  io.refill.level := level
186  io.refill.req_info.source := source
187
188  XSDebug(p"[ptw] state:${state} level:${level} notFound:${pageFault}\n")
189
190  // perf
191  XSPerfAccumulate("fsm_count", io.req.fire())
192  for (i <- 0 until PtwWidth) {
193    XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire() && io.req.bits.req_info.source === i.U)
194  }
195  XSPerfAccumulate("fsm_busy", state =/= s_idle)
196  XSPerfAccumulate("fsm_idle", state === s_idle)
197  XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready)
198  XSPerfAccumulate("mem_count", mem.req.fire())
199  XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire(), true))
200  XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready)
201
202  TimeOutAssert(state =/= s_idle, timeOutThreshold, "page table walker time out")
203
204  val perfEvents = Seq(
205    ("fsm_count         ", io.req.fire()                                     ),
206    ("fsm_busy          ", state =/= s_idle                                  ),
207    ("fsm_idle          ", state === s_idle                                  ),
208    ("resp_blocked      ", io.resp.valid && !io.resp.ready                   ),
209    ("mem_count         ", mem.req.fire()                                    ),
210    ("mem_cycle         ", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)),
211    ("mem_blocked       ", mem.req.valid && !mem.req.ready                   ),
212  )
213  generatePerfEvent()
214}
215
216/*========================= LLPTW ==============================*/
217
218/** LLPTW : Last Level Page Table Walker
219  * the page walker that only takes 4KB(last level) page walk.
220  **/
221
222class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
223  val req_info = Output(new L2TlbInnerBundle())
224  val ppn = Output(UInt(PAddrBits.W))
225}
226
227class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst {
228  val in = Flipped(DecoupledIO(new LLPTWInBundle()))
229  val out = DecoupledIO(new Bundle {
230    val req_info = Output(new L2TlbInnerBundle())
231    val id = Output(UInt(bMemID.W))
232    val af = Output(Bool())
233  })
234  val mem = new Bundle {
235    val req = DecoupledIO(new L2TlbMemReqBundle())
236    val resp = Flipped(Valid(new Bundle {
237      val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W))
238    }))
239    val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W))
240    val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool()))
241    val refill = Output(new L2TlbInnerBundle())
242    val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool()))
243  }
244  val pmp = new Bundle {
245    val req = Valid(new PMPReqBundle())
246    val resp = Flipped(new PMPRespBundle())
247  }
248}
249
250class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
251  val req_info = new L2TlbInnerBundle()
252  val ppn = UInt(ppnLen.W)
253  val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W)
254  val af = Bool()
255}
256
257
258@chiselName
259class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents {
260  val io = IO(new LLPTWIO())
261
262  val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry()))
263  val state_idle :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: Nil = Enum(5)
264  val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle)))
265  val is_emptys = state.map(_ === state_idle)
266  val is_mems = state.map(_ === state_mem_req)
267  val is_waiting = state.map(_ === state_mem_waiting)
268  val is_having = state.map(_ === state_mem_out)
269
270  val full = !ParallelOR(is_emptys).asBool()
271  val enq_ptr = ParallelPriorityEncoder(is_emptys)
272
273  val mem_ptr = ParallelPriorityEncoder(is_having)
274  val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize))
275  for (i <- 0 until l2tlbParams.llptwsize) {
276    mem_arb.io.in(i).bits := entries(i)
277    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
278  }
279
280  // duplicate req
281  // to_wait: wait for the last to access mem, set to mem_resp
282  // to_cache: the last is back just right now, set to mem_cache
283  def dup(vpn1: UInt, vpn2: UInt): Bool = {
284    dropL3SectorBits(vpn1) === dropL3SectorBits(vpn2)
285  }
286  val dup_vec = state.indices.map(i =>
287    dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn)
288  )
289  val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) // dup with the req fire entry
290  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already
291  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now
292  val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id)))
293  val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle
294  val to_wait = Cat(dup_vec_wait).orR || dup_req_fire
295  val to_mem_out = dup_wait_resp
296  val to_cache_low = Cat(dup_vec_having).orR
297  assert(RegNext(!(dup_req_fire && Cat(dup_vec_wait).orR), init = true.B), "mem req but some entries already waiting, should not happed")
298
299  val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B)))
300  val enq_state = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now
301    Mux(to_wait, state_mem_waiting, state_addr_check))
302  when (io.in.fire()) {
303    // if prefetch req does not need mem access, just give it up.
304    // so there will be at most 1 + FilterSize entries that needs re-access page cache
305    // so 2 + FilterSize is enough to avoid dead-lock
306    state(enq_ptr) := Mux(from_pre(io.in.bits.req_info.source) && enq_state =/= state_addr_check, state_idle, enq_state)
307    entries(enq_ptr).req_info := io.in.bits.req_info
308    entries(enq_ptr).ppn := io.in.bits.ppn
309    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
310    entries(enq_ptr).af := false.B
311    mem_resp_hit(enq_ptr) := to_mem_out
312  }
313  when (mem_arb.io.out.fire()) {
314    for (i <- state.indices) {
315      when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) {
316        // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait"
317        state(i) := state_mem_waiting
318        entries(i).wait_id := mem_arb.io.chosen
319      }
320    }
321  }
322  when (io.mem.resp.fire()) {
323    state.indices.map{i =>
324      when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) {
325        state(i) := state_mem_out
326        mem_resp_hit(i) := true.B
327      }
328    }
329  }
330  when (io.out.fire()) {
331    assert(state(mem_ptr) === state_mem_out)
332    state(mem_ptr) := state_idle
333  }
334  mem_resp_hit.map(a => when (a) { a := false.B } )
335
336  val enq_ptr_reg = RegNext(enq_ptr)
337
338  io.pmp.req.valid := RegNext(enq_state === state_addr_check)
339  io.pmp.req.bits.addr := MakeAddr(entries(enq_ptr_reg).ppn, getVpnn(entries(enq_ptr_reg).req_info.vpn, 0))
340  io.pmp.req.bits.cmd := TlbCmd.read
341  io.pmp.req.bits.size := 3.U // TODO: fix it
342  val pmp_resp_valid = io.pmp.req.valid // same cycle
343  when (pmp_resp_valid && (state(enq_ptr_reg) === state_addr_check) &&
344    !(mem_arb.io.out.fire && dup(entries(enq_ptr_reg).req_info.vpn, mem_arb.io.out.bits.req_info.vpn))) {
345    // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before
346    //       when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare
347    val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio
348    entries(enq_ptr_reg).af := accessFault
349    state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req)
350  }
351
352  val flush = io.sfence.valid || io.csr.satp.changed
353  when (flush) {
354    state.map(_ := state_idle)
355  }
356
357  io.in.ready := !full
358
359  io.out.valid := ParallelOR(is_having).asBool()
360  io.out.bits.req_info := entries(mem_ptr).req_info
361  io.out.bits.id := mem_ptr
362  io.out.bits.af := entries(mem_ptr).af
363
364  io.mem.req.valid := mem_arb.io.out.valid && !flush
365  io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0))
366  io.mem.req.bits.id := mem_arb.io.chosen
367  mem_arb.io.out.ready := io.mem.req.ready
368  io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))).req_info
369  io.mem.buffer_it := mem_resp_hit
370  io.mem.enq_ptr := enq_ptr
371
372  XSPerfAccumulate("llptw_in_count", io.in.fire())
373  XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready)
374  for (i <- 0 until 7) {
375    XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U)
376  }
377  for (i <- 0 until (l2tlbParams.llptwsize + 1)) {
378    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
379    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
380    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
381  }
382  XSPerfAccumulate("mem_count", io.mem.req.fire())
383  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
384  XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready)
385
386  for (i <- 0 until l2tlbParams.llptwsize) {
387    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
388  }
389
390  val perfEvents = Seq(
391    ("tlbllptw_incount           ", io.in.fire()               ),
392    ("tlbllptw_inblock           ", io.in.valid && !io.in.ready),
393    ("tlbllptw_memcount          ", io.mem.req.fire()          ),
394    ("tlbllptw_memcycle          ", PopCount(is_waiting)       ),
395  )
396  generatePerfEvent()
397}