xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/L2TLB.scala (revision e1d5ffc2d93873b72146e78c8f6a904926de8590)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.experimental.ExtModule
22import chisel3.util._
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import utility._
27import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp}
28import freechips.rocketchip.tilelink._
29import xiangshan.backend.fu.{PMP, PMPChecker, PMPReqBundle, PMPRespBundle}
30import xiangshan.backend.fu.util.HasCSRConst
31import difftest._
32
33class L2TLB()(implicit p: Parameters) extends LazyModule with HasPtwConst {
34  override def shouldBeInlined: Boolean = false
35
36  val node = TLClientNode(Seq(TLMasterPortParameters.v1(
37    clients = Seq(TLMasterParameters.v1(
38      "ptw",
39      sourceId = IdRange(0, MemReqWidth)
40    )),
41    requestFields = Seq(ReqSourceField())
42  )))
43
44  lazy val module = new L2TLBImp(this)
45}
46
47class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) with HasCSRConst with HasPerfEvents {
48
49  val (mem, edge) = outer.node.out.head
50
51  val io = IO(new L2TLBIO)
52  val difftestIO = IO(new Bundle() {
53    val ptwResp = Output(Bool())
54    val ptwAddr = Output(UInt(64.W))
55    val ptwData = Output(Vec(4, UInt(64.W)))
56  })
57
58  /* Ptw processes multiple requests
59   * Divide Ptw procedure into two stages: cache access ; mem access if cache miss
60   *           miss queue itlb       dtlb
61   *               |       |         |
62   *               ------arbiter------
63   *                            |
64   *                    l1 - l2 - l3 - sp
65   *                            |
66   *          -------------------------------------------
67   *    miss  |  queue                                  | hit
68   *    [][][][][][]                                    |
69   *          |                                         |
70   *    state machine accessing mem                     |
71   *          |                                         |
72   *          ---------------arbiter---------------------
73   *                 |                    |
74   *                itlb                 dtlb
75   */
76
77  difftestIO <> DontCare
78
79  val sfence_tmp = DelayN(io.sfence, 1)
80  val csr_tmp    = DelayN(io.csr.tlb, 1)
81  val sfence_dup = Seq.fill(9)(RegNext(sfence_tmp))
82  val csr_dup = Seq.fill(8)(RegNext(csr_tmp)) // TODO: add csr_modified?
83  val satp   = csr_dup(0).satp
84  val vsatp  = csr_dup(0).vsatp
85  val hgatp  = csr_dup(0).hgatp
86  val priv   = csr_dup(0).priv
87  val flush  = sfence_dup(0).valid || satp.changed || vsatp.changed || hgatp.changed
88
89  val pmp = Module(new PMP())
90  val pmp_check = VecInit(Seq.fill(3)(Module(new PMPChecker(lgMaxSize = 3, sameCycle = true)).io))
91  pmp.io.distribute_csr := io.csr.distribute_csr
92  pmp_check.foreach(_.check_env.apply(ModeS, pmp.io.pmp, pmp.io.pma))
93
94  val missQueue = Module(new L2TlbMissQueue)
95  val cache = Module(new PtwCache)
96  val ptw = Module(new PTW)
97  val hptw = Module(new HPTW)
98  val llptw = Module(new LLPTW)
99  val blockmq = Module(new BlockHelper(3))
100  val arb1 = Module(new Arbiter(new PtwReq, PtwWidth))
101  val arb2 = Module(new Arbiter(new L2TlbWithHptwIdBundle, ((if (l2tlbParams.enablePrefetch) 4 else 3) + (if(HasHExtension) 1 else 0))))
102  val hptw_req_arb = Module(new Arbiter(new Bundle {
103    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
104    val source = UInt(bSourceWidth.W)
105    val gvpn = UInt(gvpnLen.W)
106  }, 2))
107  val hptw_resp_arb = Module(new Arbiter(new Bundle {
108    val resp = new HptwResp()
109    val id = UInt(log2Up(l2tlbParams.llptwsize).W)
110  }, 2))
111  val outArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle {
112    val s2xlate = UInt(2.W)
113    val s1 = new PtwSectorResp ()
114    val s2 = new HptwResp()
115  }, 1)).io)
116  val mergeArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle {
117    val s2xlate = UInt(2.W)
118    val s1 = new PtwMergeResp()
119    val s2 = new HptwResp()
120  }, 3)).io)
121  val outArbCachePort = 0
122  val outArbFsmPort = 1
123  val outArbMqPort = 2
124
125  // hptw arb input port
126  val InHptwArbPTWPort = 0
127  val InHptwArbLLPTWPort = 1
128  hptw_req_arb.io.in(InHptwArbPTWPort).valid := ptw.io.hptw.req.valid
129  hptw_req_arb.io.in(InHptwArbPTWPort).bits.gvpn := ptw.io.hptw.req.bits.gvpn
130  hptw_req_arb.io.in(InHptwArbPTWPort).bits.id := ptw.io.hptw.req.bits.id
131  hptw_req_arb.io.in(InHptwArbPTWPort).bits.source := ptw.io.hptw.req.bits.source
132  ptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbPTWPort).ready
133
134  hptw_req_arb.io.in(InHptwArbLLPTWPort).valid := llptw.io.hptw.req.valid
135  hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.gvpn := llptw.io.hptw.req.bits.gvpn
136  hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.id := llptw.io.hptw.req.bits.id
137  hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.source := llptw.io.hptw.req.bits.source
138  llptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbLLPTWPort).ready
139
140  // arb2 input port
141  val InArbHPTWPort = 0
142  val InArbPTWPort = 1
143  val InArbMissQueuePort = 2
144  val InArbTlbPort = 3
145  val InArbPrefetchPort = 4
146  // NOTE: when cache out but miss and ptw doesnt accept,
147  arb1.io.in <> VecInit(io.tlb.map(_.req(0)))
148
149
150  arb2.io.in(InArbPTWPort).valid := ptw.io.llptw.valid
151  arb2.io.in(InArbPTWPort).bits.req_info := ptw.io.llptw.bits.req_info
152  arb2.io.in(InArbPTWPort).bits.isHptwReq := false.B
153  arb2.io.in(InArbPTWPort).bits.isLLptw := false.B
154  arb2.io.in(InArbPTWPort).bits.hptwId := DontCare
155  ptw.io.llptw.ready := arb2.io.in(InArbPTWPort).ready
156  block_decoupled(missQueue.io.out, arb2.io.in(InArbMissQueuePort), Mux(missQueue.io.out.bits.isLLptw, !llptw.io.in.ready, !ptw.io.req.ready))
157
158  arb2.io.in(InArbTlbPort).valid := arb1.io.out.valid
159  arb2.io.in(InArbTlbPort).bits.req_info.vpn := arb1.io.out.bits.vpn
160  arb2.io.in(InArbTlbPort).bits.req_info.s2xlate := arb1.io.out.bits.s2xlate
161  arb2.io.in(InArbTlbPort).bits.req_info.source := arb1.io.chosen
162  arb2.io.in(InArbTlbPort).bits.isHptwReq := false.B
163  arb2.io.in(InArbTlbPort).bits.isLLptw := false.B
164  arb2.io.in(InArbTlbPort).bits.hptwId := DontCare
165  arb1.io.out.ready := arb2.io.in(InArbTlbPort).ready
166
167  arb2.io.in(InArbHPTWPort).valid := hptw_req_arb.io.out.valid
168  arb2.io.in(InArbHPTWPort).bits.req_info.vpn := hptw_req_arb.io.out.bits.gvpn
169  arb2.io.in(InArbHPTWPort).bits.req_info.s2xlate := onlyStage2
170  arb2.io.in(InArbHPTWPort).bits.req_info.source := hptw_req_arb.io.out.bits.source
171  arb2.io.in(InArbHPTWPort).bits.isHptwReq := true.B
172  arb2.io.in(InArbHPTWPort).bits.isLLptw := false.B
173  arb2.io.in(InArbHPTWPort).bits.hptwId := hptw_req_arb.io.out.bits.id
174  hptw_req_arb.io.out.ready := arb2.io.in(InArbHPTWPort).ready
175  val hartId = p(XSCoreParamsKey).HartId
176  if (l2tlbParams.enablePrefetch) {
177    val prefetch = Module(new L2TlbPrefetch())
178    val recv = cache.io.resp
179    // NOTE: 1. prefetch doesn't gen prefetch 2. req from mq doesn't gen prefetch
180    // NOTE: 1. miss req gen prefetch 2. hit but prefetched gen prefetch
181    prefetch.io.in.valid := recv.fire && !from_pre(recv.bits.req_info.source) && (!recv.bits.hit  ||
182      recv.bits.prefetch) && recv.bits.isFirst
183    prefetch.io.in.bits.vpn := recv.bits.req_info.vpn
184    prefetch.io.sfence := sfence_dup(0)
185    prefetch.io.csr := csr_dup(0)
186    arb2.io.in(InArbPrefetchPort) <> prefetch.io.out
187
188    val isWriteL2TlbPrefetchTable = Constantin.createRecord(s"isWriteL2TlbPrefetchTable$hartId")
189    val L2TlbPrefetchTable = ChiselDB.createTable(s"L2TlbPrefetch_hart$hartId", new L2TlbPrefetchDB)
190    val L2TlbPrefetchDB = Wire(new L2TlbPrefetchDB)
191    L2TlbPrefetchDB.vpn := prefetch.io.out.bits.req_info.vpn
192    L2TlbPrefetchTable.log(L2TlbPrefetchDB, isWriteL2TlbPrefetchTable.orR && prefetch.io.out.fire, "L2TlbPrefetch", clock, reset)
193  }
194  arb2.io.out.ready := cache.io.req.ready
195
196
197  val mq_arb = Module(new Arbiter(new L2TlbWithHptwIdBundle, 2))
198  mq_arb.io.in(0).valid := cache.io.resp.valid && !cache.io.resp.bits.hit &&
199    !from_pre(cache.io.resp.bits.req_info.source) && !cache.io.resp.bits.isHptwReq && // hptw reqs are not sent to missqueue
200    (cache.io.resp.bits.bypassed || (
201      ((!cache.io.resp.bits.toFsm.l1Hit || cache.io.resp.bits.toFsm.stage1Hit) && !cache.io.resp.bits.isHptwReq && (cache.io.resp.bits.isFirst || !ptw.io.req.ready)) // send to ptw, is first or ptw is busy;
202      || (cache.io.resp.bits.toFsm.l1Hit && !llptw.io.in.ready) // send to llptw, llptw is full
203    ))
204
205  mq_arb.io.in(0).bits.req_info :=  cache.io.resp.bits.req_info
206  mq_arb.io.in(0).bits.isHptwReq := false.B
207  mq_arb.io.in(0).bits.hptwId :=  DontCare
208  mq_arb.io.in(0).bits.isLLptw := cache.io.resp.bits.toFsm.l1Hit
209  mq_arb.io.in(1).bits.req_info := llptw.io.cache.bits
210  mq_arb.io.in(1).bits.isHptwReq := false.B
211  mq_arb.io.in(1).bits.hptwId := DontCare
212  mq_arb.io.in(1).bits.isLLptw := false.B
213  mq_arb.io.in(1).valid := llptw.io.cache.valid
214  llptw.io.cache.ready := mq_arb.io.in(1).ready
215  missQueue.io.in <> mq_arb.io.out
216  missQueue.io.sfence  := sfence_dup(6)
217  missQueue.io.csr := csr_dup(5)
218
219  blockmq.io.start := missQueue.io.out.fire
220  blockmq.io.enable := ptw.io.req.fire
221
222  llptw.io.in.valid := cache.io.resp.valid &&
223    !cache.io.resp.bits.hit &&
224    cache.io.resp.bits.toFsm.l1Hit &&
225    !cache.io.resp.bits.bypassed &&
226    !cache.io.resp.bits.isHptwReq
227  llptw.io.in.bits.req_info := cache.io.resp.bits.req_info
228  llptw.io.in.bits.ppn := cache.io.resp.bits.toFsm.ppn
229  llptw.io.sfence := sfence_dup(1)
230  llptw.io.csr := csr_dup(1)
231  val llptw_stage1 = Reg(Vec(l2tlbParams.llptwsize, new PtwMergeResp()))
232  when(llptw.io.in.fire){
233    llptw_stage1(llptw.io.mem.enq_ptr) := cache.io.resp.bits.stage1
234  }
235
236  cache.io.req.valid := arb2.io.out.valid
237  cache.io.req.bits.req_info := arb2.io.out.bits.req_info
238  cache.io.req.bits.isFirst := (arb2.io.chosen =/= InArbMissQueuePort.U && !arb2.io.out.bits.isHptwReq)
239  cache.io.req.bits.isHptwReq := arb2.io.out.bits.isHptwReq
240  cache.io.req.bits.hptwId := arb2.io.out.bits.hptwId
241  cache.io.req.bits.bypassed.map(_ := false.B)
242  cache.io.sfence := sfence_dup(2)
243  cache.io.csr := csr_dup(2)
244  cache.io.sfence_dup.zip(sfence_dup.drop(2).take(4)).map(s => s._1 := s._2)
245  cache.io.csr_dup.zip(csr_dup.drop(2).take(3)).map(c => c._1 := c._2)
246  cache.io.resp.ready := MuxCase(mq_arb.io.in(0).ready || ptw.io.req.ready, Seq(
247    (!cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw.io.req.ready,
248    (cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw_resp_arb.io.in(HptwRespArbCachePort).ready,
249    cache.io.resp.bits.hit -> outReady(cache.io.resp.bits.req_info.source, outArbCachePort),
250    (cache.io.resp.bits.toFsm.l1Hit && !cache.io.resp.bits.bypassed && llptw.io.in.ready) -> llptw.io.in.ready,
251    (cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst) -> mq_arb.io.in(0).ready
252  ))
253
254  // NOTE: missQueue req has higher priority
255  ptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.toFsm.l1Hit &&
256    !cache.io.resp.bits.bypassed &&
257    !cache.io.resp.bits.isFirst &&
258    !cache.io.resp.bits.isHptwReq
259  ptw.io.req.bits.req_info := cache.io.resp.bits.req_info
260  if (EnableSv48) {
261    ptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toFsm.l3Hit.get
262  }
263  ptw.io.req.bits.l2Hit := cache.io.resp.bits.toFsm.l2Hit
264  ptw.io.req.bits.ppn := cache.io.resp.bits.toFsm.ppn
265  ptw.io.req.bits.stage1Hit := cache.io.resp.bits.toFsm.stage1Hit
266  ptw.io.req.bits.stage1 := cache.io.resp.bits.stage1
267  ptw.io.sfence := sfence_dup(7)
268  ptw.io.csr := csr_dup(6)
269  ptw.io.resp.ready := outReady(ptw.io.resp.bits.source, outArbFsmPort)
270
271  hptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq
272  hptw.io.req.bits.gvpn := cache.io.resp.bits.req_info.vpn
273  hptw.io.req.bits.id := cache.io.resp.bits.toHptw.id
274  hptw.io.req.bits.source := cache.io.resp.bits.req_info.source
275  if (EnableSv48) {
276    hptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toHptw.l3Hit.get
277  }
278  hptw.io.req.bits.l2Hit := cache.io.resp.bits.toHptw.l2Hit
279  hptw.io.req.bits.l1Hit := cache.io.resp.bits.toHptw.l1Hit
280  hptw.io.req.bits.ppn := cache.io.resp.bits.toHptw.ppn
281  hptw.io.req.bits.bypassed := cache.io.resp.bits.toHptw.bypassed
282  hptw.io.sfence := sfence_dup(8)
283  hptw.io.csr := csr_dup(7)
284  // mem req
285  def blockBytes_align(addr: UInt) = {
286    Cat(addr(PAddrBits - 1, log2Up(l2tlbParams.blockBytes)), 0.U(log2Up(l2tlbParams.blockBytes).W))
287  }
288  def addr_low_from_vpn(vpn: UInt) = {
289    vpn(log2Ceil(l2tlbParams.blockBytes)-log2Ceil(XLEN/8)-1, 0)
290  }
291  def addr_low_from_paddr(paddr: UInt) = {
292    paddr(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
293  }
294  def from_llptw(id: UInt) = {
295    id < l2tlbParams.llptwsize.U
296  }
297  def from_ptw(id: UInt) = {
298    id === l2tlbParams.llptwsize.U
299  }
300  def from_hptw(id: UInt) = {
301    id === l2tlbParams.llptwsize.U + 1.U
302  }
303  val waiting_resp = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B)))
304  val flush_latch = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B)))
305  val hptw_bypassed = RegInit(false.B)
306  for (i <- waiting_resp.indices) {
307    assert(!flush_latch(i) || waiting_resp(i)) // when sfence_latch wait for mem resp, waiting_resp should be true
308  }
309
310  val llptw_out = llptw.io.out
311  val llptw_mem = llptw.io.mem
312  llptw_mem.flush_latch := flush_latch.take(l2tlbParams.llptwsize)
313  llptw_mem.req_mask := waiting_resp.take(l2tlbParams.llptwsize)
314  ptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize)
315  hptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize + 1)
316
317  val mem_arb = Module(new Arbiter(new L2TlbMemReqBundle(), 3))
318  mem_arb.io.in(0) <> ptw.io.mem.req
319  mem_arb.io.in(1) <> llptw_mem.req
320  mem_arb.io.in(2) <> hptw.io.mem.req
321  mem_arb.io.out.ready := mem.a.ready && !flush
322
323  // assert, should not send mem access at same addr for twice.
324  val last_resp_vpn = RegEnable(cache.io.refill.bits.req_info_dup(0).vpn, cache.io.refill.valid)
325  val last_resp_s2xlate = RegEnable(cache.io.refill.bits.req_info_dup(0).s2xlate, cache.io.refill.valid)
326  val last_resp_level = RegEnable(cache.io.refill.bits.level_dup(0), cache.io.refill.valid)
327  val last_resp_v = RegInit(false.B)
328  val last_has_invalid = !Cat(cache.io.refill.bits.ptes.asTypeOf(Vec(blockBits/XLEN, UInt(XLEN.W))).map(a => a(0))).andR || cache.io.refill.bits.sel_pte_dup(0).asTypeOf(new PteBundle).isAf()
329  when (cache.io.refill.valid) { last_resp_v := !last_has_invalid}
330  when (flush) { last_resp_v := false.B }
331  XSError(last_resp_v && cache.io.refill.valid &&
332    (cache.io.refill.bits.req_info_dup(0).vpn === last_resp_vpn) &&
333    (cache.io.refill.bits.level_dup(0) === last_resp_level) &&
334    (cache.io.refill.bits.req_info_dup(0).s2xlate === last_resp_s2xlate),
335    "l2tlb should not access mem at same addr for twice")
336  // ATTENTION: this may wrongly assert when: a ptes is l2, last part is valid,
337  // but the current part is invalid, so one more mem access happened
338  // If this happened, remove the assert.
339
340  val req_addr_low = Reg(Vec(MemReqWidth, UInt((log2Up(l2tlbParams.blockBytes)-log2Up(XLEN/8)).W)))
341
342  when (llptw.io.in.fire) {
343    // when enq miss queue, set the req_addr_low to receive the mem resp data part
344    req_addr_low(llptw_mem.enq_ptr) := addr_low_from_vpn(llptw.io.in.bits.req_info.vpn)
345  }
346  when (mem_arb.io.out.fire) {
347    req_addr_low(mem_arb.io.out.bits.id) := addr_low_from_paddr(mem_arb.io.out.bits.addr)
348    waiting_resp(mem_arb.io.out.bits.id) := true.B
349    hptw_bypassed := from_hptw(mem_arb.io.out.bits.id) && mem_arb.io.out.bits.hptw_bypassed
350  }
351  // mem read
352  val memRead =  edge.Get(
353    fromSource = mem_arb.io.out.bits.id,
354    // toAddress  = memAddr(log2Up(CacheLineSize / 2 / 8) - 1, 0),
355    toAddress  = blockBytes_align(mem_arb.io.out.bits.addr),
356    lgSize     = log2Up(l2tlbParams.blockBytes).U
357  )._2
358  mem.a.bits := memRead
359  mem.a.valid := mem_arb.io.out.valid && !flush
360  mem.a.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.PTW.id.U)
361  mem.d.ready := true.B
362  // mem -> data buffer
363  val refill_data = RegInit(VecInit.fill(blockBits / l1BusDataWidth)(0.U(l1BusDataWidth.W)))
364  val refill_helper = edge.firstlastHelper(mem.d.bits, mem.d.fire)
365  val mem_resp_done = refill_helper._3
366  val mem_resp_from_llptw = from_llptw(mem.d.bits.source)
367  val mem_resp_from_ptw = from_ptw(mem.d.bits.source)
368  val mem_resp_from_hptw = from_hptw(mem.d.bits.source)
369  when (mem.d.valid) {
370    assert(mem.d.bits.source < MemReqWidth.U)
371    refill_data(refill_helper._4) := mem.d.bits.data
372  }
373  // refill_data_tmp is the wire fork of refill_data, but one cycle earlier
374  val refill_data_tmp = WireInit(refill_data)
375  refill_data_tmp(refill_helper._4) := mem.d.bits.data
376
377  // save only one pte for each id
378  // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache)
379  val resp_pte = VecInit((0 until MemReqWidth).map(i =>
380    if (i == l2tlbParams.llptwsize + 1) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data_tmp, req_addr_low(i))), mem_resp_done && mem_resp_from_hptw) }
381    else if (i == l2tlbParams.llptwsize) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data_tmp, req_addr_low(i))), mem_resp_done && mem_resp_from_ptw) }
382    else { Mux(llptw_mem.buffer_it(i), get_part(refill_data, req_addr_low(i)), RegEnable(get_part(refill_data, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data, req_addr_low(i))), llptw_mem.buffer_it(i))) }
383    // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle
384  ))
385
386  // save eight ptes for each id when sector tlb
387  // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache)
388  val resp_pte_sector = VecInit((0 until MemReqWidth).map(i =>
389    if (i == l2tlbParams.llptwsize + 1) {RegEnable(refill_data_tmp, 0.U.asTypeOf(refill_data_tmp), mem_resp_done && mem_resp_from_hptw) }
390    else if (i == l2tlbParams.llptwsize) {RegEnable(refill_data_tmp, 0.U.asTypeOf(refill_data_tmp), mem_resp_done && mem_resp_from_ptw) }
391    else { Mux(llptw_mem.buffer_it(i), refill_data, RegEnable(refill_data, 0.U.asTypeOf(refill_data), llptw_mem.buffer_it(i))) }
392    // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle
393  ))
394
395  // mem -> llptw
396  llptw_mem.resp.valid := mem_resp_done && mem_resp_from_llptw
397  llptw_mem.resp.bits.id := DataHoldBypass(mem.d.bits.source, mem.d.valid)
398  llptw_mem.resp.bits.value := DataHoldBypass(refill_data_tmp.asUInt, mem.d.valid)
399  // mem -> ptw
400  ptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_ptw
401  ptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize)
402  // mem -> hptw
403  hptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_hptw
404  hptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize + 1)
405  // mem -> cache
406  val refill_from_llptw = mem_resp_from_llptw
407  val refill_from_ptw = mem_resp_from_ptw
408  val refill_from_hptw = mem_resp_from_hptw
409  val refill_level = Mux(refill_from_llptw, 0.U, Mux(refill_from_ptw, RegEnable(ptw.io.refill.level, 0.U, ptw.io.mem.req.fire), RegEnable(hptw.io.refill.level, 0.U, hptw.io.mem.req.fire)))
410  val refill_valid = mem_resp_done && !flush && !flush_latch(mem.d.bits.source) && !hptw_bypassed
411
412  cache.io.refill.valid := GatedValidRegNext(refill_valid, false.B)
413  cache.io.refill.bits.ptes := refill_data.asUInt
414  cache.io.refill.bits.req_info_dup.map(_ := RegEnable(Mux(refill_from_llptw, llptw_mem.refill, Mux(refill_from_ptw, ptw.io.refill.req_info, hptw.io.refill.req_info)), refill_valid))
415  cache.io.refill.bits.level_dup.map(_ := RegEnable(refill_level, refill_valid))
416  cache.io.refill.bits.levelOH(refill_level, refill_valid)
417  cache.io.refill.bits.sel_pte_dup.map(_ := RegEnable(sel_data(refill_data_tmp.asUInt, req_addr_low(mem.d.bits.source)), refill_valid))
418
419  if (env.EnableDifftest) {
420    val difftest_ptw_addr = RegInit(VecInit(Seq.fill(MemReqWidth)(0.U(PAddrBits.W))))
421    when (mem.a.valid) {
422      difftest_ptw_addr(mem.a.bits.source) := mem.a.bits.address
423    }
424
425    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
426    difftest.coreid := io.hartId
427    difftest.index := 2.U
428    difftest.valid := cache.io.refill.valid
429    difftest.addr := difftest_ptw_addr(RegEnable(mem.d.bits.source, mem.d.valid))
430    difftest.data := refill_data.asTypeOf(difftest.data)
431    difftest.idtfr := DontCare
432  }
433
434  if (env.EnableDifftest) {
435    for (i <- 0 until PtwWidth) {
436      val difftest = DifftestModule(new DiffL2TLBEvent)
437      difftest.coreid := io.hartId
438      difftest.valid := io.tlb(i).resp.fire && !io.tlb(i).resp.bits.s1.af && !io.tlb(i).resp.bits.s2.gaf
439      difftest.index := i.U
440      difftest.vpn := Cat(io.tlb(i).resp.bits.s1.entry.tag, 0.U(sectortlbwidth.W))
441      difftest.pbmt := io.tlb(i).resp.bits.s1.entry.pbmt
442      difftest.g_pbmt := io.tlb(i).resp.bits.s2.entry.pbmt
443      for (j <- 0 until tlbcontiguous) {
444        difftest.ppn(j) := Cat(io.tlb(i).resp.bits.s1.entry.ppn, io.tlb(i).resp.bits.s1.ppn_low(j))
445        difftest.valididx(j) := io.tlb(i).resp.bits.s1.valididx(j)
446        difftest.pteidx(j) := io.tlb(i).resp.bits.s1.pteidx(j)
447      }
448      difftest.perm := io.tlb(i).resp.bits.s1.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt
449      difftest.level := io.tlb(i).resp.bits.s1.entry.level.getOrElse(0.U.asUInt)
450      difftest.pf := io.tlb(i).resp.bits.s1.pf
451      difftest.satp := Cat(io.csr.tlb.satp.mode, io.csr.tlb.satp.asid, io.csr.tlb.satp.ppn)
452      difftest.vsatp := Cat(io.csr.tlb.vsatp.mode, io.csr.tlb.vsatp.asid, io.csr.tlb.vsatp.ppn)
453      difftest.hgatp := Cat(io.csr.tlb.hgatp.mode, io.csr.tlb.hgatp.vmid, io.csr.tlb.hgatp.ppn)
454      difftest.gvpn := io.tlb(i).resp.bits.s2.entry.tag
455      difftest.g_perm := io.tlb(i).resp.bits.s2.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt
456      difftest.g_level := io.tlb(i).resp.bits.s2.entry.level.getOrElse(0.U.asUInt)
457      difftest.s2ppn := io.tlb(i).resp.bits.s2.entry.ppn
458      difftest.gpf := io.tlb(i).resp.bits.s2.gpf
459      difftest.s2xlate := io.tlb(i).resp.bits.s2xlate
460    }
461  }
462
463  // pmp
464  pmp_check(0).req <> ptw.io.pmp.req
465  ptw.io.pmp.resp <> pmp_check(0).resp
466  pmp_check(1).req <> llptw.io.pmp.req
467  llptw.io.pmp.resp <> pmp_check(1).resp
468  pmp_check(2).req <> hptw.io.pmp.req
469  hptw.io.pmp.resp <> pmp_check(2).resp
470
471  llptw_out.ready := outReady(llptw_out.bits.req_info.source, outArbMqPort)
472
473  // hptw and page cache -> ptw and llptw
474  val HptwRespArbCachePort = 0
475  val HptwRespArbHptw = 1
476  hptw_resp_arb.io.in(HptwRespArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq
477  hptw_resp_arb.io.in(HptwRespArbCachePort).bits.id := cache.io.resp.bits.toHptw.id
478  hptw_resp_arb.io.in(HptwRespArbCachePort).bits.resp := cache.io.resp.bits.toHptw.resp
479  hptw_resp_arb.io.in(HptwRespArbHptw).valid := hptw.io.resp.valid
480  hptw_resp_arb.io.in(HptwRespArbHptw).bits.id := hptw.io.resp.bits.id
481  hptw_resp_arb.io.in(HptwRespArbHptw).bits.resp := hptw.io.resp.bits.resp
482  hptw.io.resp.ready := hptw_resp_arb.io.in(HptwRespArbHptw).ready
483
484  ptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id === FsmReqID.U
485  ptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp
486  llptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id =/= FsmReqID.U
487  llptw.io.hptw.resp.bits.id := hptw_resp_arb.io.out.bits.id
488  llptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp
489  hptw_resp_arb.io.out.ready := true.B
490
491  // Timing: Maybe need to do some optimization or even add one more cycle
492  for (i <- 0 until PtwWidth) {
493    mergeArb(i).in(outArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.req_info.source===i.U && !cache.io.resp.bits.isHptwReq
494    mergeArb(i).in(outArbCachePort).bits.s2xlate := cache.io.resp.bits.req_info.s2xlate
495    mergeArb(i).in(outArbCachePort).bits.s1 := cache.io.resp.bits.stage1
496    mergeArb(i).in(outArbCachePort).bits.s2 := cache.io.resp.bits.toHptw.resp
497    mergeArb(i).in(outArbFsmPort).valid := ptw.io.resp.valid && ptw.io.resp.bits.source===i.U
498    mergeArb(i).in(outArbFsmPort).bits.s2xlate := ptw.io.resp.bits.s2xlate
499    mergeArb(i).in(outArbFsmPort).bits.s1 := ptw.io.resp.bits.resp
500    mergeArb(i).in(outArbFsmPort).bits.s2 := ptw.io.resp.bits.h_resp
501    mergeArb(i).in(outArbMqPort).valid := llptw_out.valid && llptw_out.bits.req_info.source===i.U
502    mergeArb(i).in(outArbMqPort).bits.s2xlate := llptw_out.bits.req_info.s2xlate
503    mergeArb(i).in(outArbMqPort).bits.s1 := Mux(llptw_out.bits.first_s2xlate_fault, llptw_stage1(llptw_out.bits.id), contiguous_pte_to_merge_ptwResp(resp_pte_sector(llptw_out.bits.id).asUInt, llptw_out.bits.req_info.vpn, llptw_out.bits.af, true, s2xlate = llptw_out.bits.req_info.s2xlate))
504    mergeArb(i).in(outArbMqPort).bits.s2 := llptw_out.bits.h_resp
505    mergeArb(i).out.ready := outArb(i).in(0).ready
506  }
507
508  for (i <- 0 until PtwWidth) {
509    outArb(i).in(0).valid := mergeArb(i).out.valid
510    outArb(i).in(0).bits.s2xlate := mergeArb(i).out.bits.s2xlate
511    outArb(i).in(0).bits.s1 := merge_ptwResp_to_sector_ptwResp(mergeArb(i).out.bits.s1)
512    outArb(i).in(0).bits.s2 := mergeArb(i).out.bits.s2
513  }
514
515  // io.tlb.map(_.resp) <> outArb.map(_.out)
516  io.tlb.map(_.resp).zip(outArb.map(_.out)).map{
517    case (resp, out) => resp <> out
518  }
519
520  // sfence
521  when (flush) {
522    for (i <- 0 until MemReqWidth) {
523      when (waiting_resp(i)) {
524        flush_latch(i) := true.B
525      }
526    }
527  }
528  // mem -> control signal
529  // waiting_resp and sfence_latch will be reset when mem_resp_done
530  when (mem_resp_done) {
531    waiting_resp(mem.d.bits.source) := false.B
532    flush_latch(mem.d.bits.source) := false.B
533  }
534
535  def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = {
536    sink.valid   := source.valid && !block_signal
537    source.ready := sink.ready   && !block_signal
538    sink.bits    := source.bits
539  }
540
541  def get_part(data: Vec[UInt], index: UInt): UInt = {
542    val inner_data = data.asTypeOf(Vec(data.getWidth / XLEN, UInt(XLEN.W)))
543    inner_data(index)
544  }
545
546  // not_super means that this is a normal page
547  // valididx(i) will be all true when super page to be convenient for l1 tlb matching
548  def contiguous_pte_to_merge_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean, not_super: Boolean = true, s2xlate: UInt) : PtwMergeResp = {
549    assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!")
550    val ptw_merge_resp = Wire(new PtwMergeResp())
551    val hasS2xlate = s2xlate =/= noS2xlate
552    for (i <- 0 until tlbcontiguous) {
553      val pte_in = pte(64 * i + 63, 64 * i).asTypeOf(new PteBundle())
554      val ptw_resp = Wire(new PtwMergeEntry(tagLen = sectorvpnLen, hasPerm = true, hasLevel = true))
555      ptw_resp.ppn := pte_in.getPPN()(ptePPNLen - 1, sectortlbwidth)
556      ptw_resp.ppn_low := pte_in.getPPN()(sectortlbwidth - 1, 0)
557      ptw_resp.level.map(_ := 0.U)
558      ptw_resp.pbmt := pte_in.pbmt
559      ptw_resp.perm.map(_ := pte_in.getPerm())
560      ptw_resp.tag := vpn(vpnLen - 1, sectortlbwidth)
561      ptw_resp.pf := (if (af_first) !af else true.B) && (pte_in.isPf(0.U) || !pte_in.isLeaf())
562      ptw_resp.af := (if (!af_first) pte_in.isPf(0.U) else true.B) && (af || Mux(s2xlate === allStage, false.B, pte_in.isAf()))
563      ptw_resp.v := !ptw_resp.pf
564      ptw_resp.prefetch := DontCare
565      ptw_resp.asid := Mux(hasS2xlate, vsatp.asid, satp.asid)
566      ptw_resp.vmid.map(_ := hgatp.vmid)
567      ptw_merge_resp.entry(i) := ptw_resp
568    }
569    ptw_merge_resp.pteidx := UIntToOH(vpn(sectortlbwidth - 1, 0)).asBools
570    ptw_merge_resp.not_super := not_super.B
571    ptw_merge_resp
572  }
573
574  def merge_ptwResp_to_sector_ptwResp(pte: PtwMergeResp) : PtwSectorResp = {
575    assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!")
576    val ptw_sector_resp = Wire(new PtwSectorResp)
577    ptw_sector_resp.entry.tag := pte.entry(OHToUInt(pte.pteidx)).tag
578    ptw_sector_resp.entry.asid := pte.entry(OHToUInt(pte.pteidx)).asid
579    ptw_sector_resp.entry.vmid.map(_ := pte.entry(OHToUInt(pte.pteidx)).vmid.getOrElse(0.U))
580    ptw_sector_resp.entry.ppn := pte.entry(OHToUInt(pte.pteidx)).ppn
581    ptw_sector_resp.entry.pbmt := pte.entry(OHToUInt(pte.pteidx)).pbmt
582    ptw_sector_resp.entry.perm.map(_ := pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)))
583    ptw_sector_resp.entry.level.map(_ := pte.entry(OHToUInt(pte.pteidx)).level.getOrElse(0.U(log2Up(Level + 1).W)))
584    ptw_sector_resp.entry.prefetch := pte.entry(OHToUInt(pte.pteidx)).prefetch
585    ptw_sector_resp.entry.v := pte.entry(OHToUInt(pte.pteidx)).v
586    ptw_sector_resp.af := pte.entry(OHToUInt(pte.pteidx)).af
587    ptw_sector_resp.pf := pte.entry(OHToUInt(pte.pteidx)).pf
588    ptw_sector_resp.addr_low := OHToUInt(pte.pteidx)
589    ptw_sector_resp.pteidx := pte.pteidx
590    for (i <- 0 until tlbcontiguous) {
591      val ppn_equal = pte.entry(i).ppn === pte.entry(OHToUInt(pte.pteidx)).ppn
592      val pbmt_equal = pte.entry(i).pbmt === pte.entry(OHToUInt(pte.pteidx)).pbmt
593      val perm_equal = pte.entry(i).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt === pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt
594      val v_equal = pte.entry(i).v === pte.entry(OHToUInt(pte.pteidx)).v
595      val af_equal = pte.entry(i).af === pte.entry(OHToUInt(pte.pteidx)).af
596      val pf_equal = pte.entry(i).pf === pte.entry(OHToUInt(pte.pteidx)).pf
597      ptw_sector_resp.valididx(i) := (ppn_equal && pbmt_equal && perm_equal && v_equal && af_equal && pf_equal) || !pte.not_super
598      ptw_sector_resp.ppn_low(i) := pte.entry(i).ppn_low
599    }
600    ptw_sector_resp.valididx(OHToUInt(pte.pteidx)) := true.B
601    ptw_sector_resp
602  }
603
604  def outReady(source: UInt, port: Int): Bool = {
605    MuxLookup(source, true.B)((0 until PtwWidth).map(i => i.U -> mergeArb(i).in(port).ready))
606  }
607
608  // debug info
609  for (i <- 0 until PtwWidth) {
610    XSDebug(p"[io.tlb(${i.U})] ${io.tlb(i)}\n")
611  }
612  XSDebug(p"[sfence] ${io.sfence}\n")
613  XSDebug(p"[io.csr.tlb] ${io.csr.tlb}\n")
614
615  for (i <- 0 until PtwWidth) {
616    XSPerfAccumulate(s"req_count${i}", io.tlb(i).req(0).fire)
617    XSPerfAccumulate(s"req_blocked_count_${i}", io.tlb(i).req(0).valid && !io.tlb(i).req(0).ready)
618  }
619  XSPerfAccumulate(s"req_blocked_by_mq", arb1.io.out.valid && missQueue.io.out.valid)
620  for (i <- 0 until (MemReqWidth + 1)) {
621    XSPerfAccumulate(s"mem_req_util${i}", PopCount(waiting_resp) === i.U)
622  }
623  XSPerfAccumulate("mem_cycle", PopCount(waiting_resp) =/= 0.U)
624  XSPerfAccumulate("mem_count", mem.a.fire)
625  for (i <- 0 until PtwWidth) {
626    XSPerfAccumulate(s"llptw_ppn_af${i}", mergeArb(i).in(outArbMqPort).valid && mergeArb(i).in(outArbMqPort).bits.s1.entry(OHToUInt(mergeArb(i).in(outArbMqPort).bits.s1.pteidx)).af && !llptw_out.bits.af)
627    XSPerfAccumulate(s"access_fault${i}", io.tlb(i).resp.fire && io.tlb(i).resp.bits.s1.af)
628  }
629
630  // print configs
631  println(s"${l2tlbParams.name}: a ptw, a llptw with size ${l2tlbParams.llptwsize}, miss queue size ${MissQueueSize} l2:${l2tlbParams.l2Size} fa l1: nSets ${l2tlbParams.l1nSets} nWays ${l2tlbParams.l1nWays} l0: ${l2tlbParams.l0nSets} nWays ${l2tlbParams.l0nWays} blockBytes:${l2tlbParams.blockBytes}")
632
633  // time out assert
634  for (i <- 0 until MemReqWidth) {
635    TimeOutAssert(waiting_resp(i), timeOutThreshold, s"ptw mem resp time out wait_resp${i}")
636    TimeOutAssert(flush_latch(i), timeOutThreshold, s"ptw mem resp time out flush_latch${i}")
637  }
638
639
640  val perfEvents  = Seq(llptw, cache, ptw).flatMap(_.getPerfEvents)
641  generatePerfEvent()
642
643  val isWriteL1TlbTable = Constantin.createRecord(s"isWriteL1TlbTable$hartId")
644  val L1TlbTable = ChiselDB.createTable(s"L1Tlb_hart$hartId", new L1TlbDB)
645  val ITlbReqDB, DTlbReqDB, ITlbRespDB, DTlbRespDB = Wire(new L1TlbDB)
646  ITlbReqDB.vpn := io.tlb(0).req(0).bits.vpn
647  DTlbReqDB.vpn := io.tlb(1).req(0).bits.vpn
648  ITlbRespDB.vpn := io.tlb(0).resp.bits.s1.entry.tag
649  DTlbRespDB.vpn := io.tlb(1).resp.bits.s1.entry.tag
650  L1TlbTable.log(ITlbReqDB, isWriteL1TlbTable.orR && io.tlb(0).req(0).fire, "ITlbReq", clock, reset)
651  L1TlbTable.log(DTlbReqDB, isWriteL1TlbTable.orR && io.tlb(1).req(0).fire, "DTlbReq", clock, reset)
652  L1TlbTable.log(ITlbRespDB, isWriteL1TlbTable.orR && io.tlb(0).resp.fire, "ITlbResp", clock, reset)
653  L1TlbTable.log(DTlbRespDB, isWriteL1TlbTable.orR && io.tlb(1).resp.fire, "DTlbResp", clock, reset)
654
655  val isWritePageCacheTable = Constantin.createRecord(s"isWritePageCacheTable$hartId")
656  val PageCacheTable = ChiselDB.createTable(s"PageCache_hart$hartId", new PageCacheDB)
657  val PageCacheDB = Wire(new PageCacheDB)
658  PageCacheDB.vpn := Cat(cache.io.resp.bits.stage1.entry(0).tag, OHToUInt(cache.io.resp.bits.stage1.pteidx))
659  PageCacheDB.source := cache.io.resp.bits.req_info.source
660  PageCacheDB.bypassed := cache.io.resp.bits.bypassed
661  PageCacheDB.is_first := cache.io.resp.bits.isFirst
662  PageCacheDB.prefetched := cache.io.resp.bits.stage1.entry(0).prefetch
663  PageCacheDB.prefetch := cache.io.resp.bits.prefetch
664  PageCacheDB.l2Hit := cache.io.resp.bits.toFsm.l2Hit
665  PageCacheDB.l1Hit := cache.io.resp.bits.toFsm.l1Hit
666  PageCacheDB.hit := cache.io.resp.bits.hit
667  PageCacheTable.log(PageCacheDB, isWritePageCacheTable.orR && cache.io.resp.fire, "PageCache", clock, reset)
668
669  val isWritePTWTable = Constantin.createRecord(s"isWritePTWTable$hartId")
670  val PTWTable = ChiselDB.createTable(s"PTW_hart$hartId", new PTWDB)
671  val PTWReqDB, PTWRespDB, LLPTWReqDB, LLPTWRespDB = Wire(new PTWDB)
672  PTWReqDB.vpn := ptw.io.req.bits.req_info.vpn
673  PTWReqDB.source := ptw.io.req.bits.req_info.source
674  PTWRespDB.vpn := ptw.io.refill.req_info.vpn
675  PTWRespDB.source := ptw.io.refill.req_info.source
676  LLPTWReqDB.vpn := llptw.io.in.bits.req_info.vpn
677  LLPTWReqDB.source := llptw.io.in.bits.req_info.source
678  LLPTWRespDB.vpn := llptw.io.mem.refill.vpn
679  LLPTWRespDB.source := llptw.io.mem.refill.source
680  PTWTable.log(PTWReqDB, isWritePTWTable.orR && ptw.io.req.fire, "PTWReq", clock, reset)
681  PTWTable.log(PTWRespDB, isWritePTWTable.orR && ptw.io.mem.resp.fire, "PTWResp", clock, reset)
682  PTWTable.log(LLPTWReqDB, isWritePTWTable.orR && llptw.io.in.fire, "LLPTWReq", clock, reset)
683  PTWTable.log(LLPTWRespDB, isWritePTWTable.orR && llptw.io.mem.resp.fire, "LLPTWResp", clock, reset)
684
685  val isWriteL2TlbMissQueueTable = Constantin.createRecord(s"isWriteL2TlbMissQueueTable$hartId")
686  val L2TlbMissQueueTable = ChiselDB.createTable(s"L2TlbMissQueue_hart$hartId", new L2TlbMissQueueDB)
687  val L2TlbMissQueueInDB, L2TlbMissQueueOutDB = Wire(new L2TlbMissQueueDB)
688  L2TlbMissQueueInDB.vpn := missQueue.io.in.bits.req_info.vpn
689  L2TlbMissQueueOutDB.vpn := missQueue.io.out.bits.req_info.vpn
690  L2TlbMissQueueTable.log(L2TlbMissQueueInDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.in.fire, "L2TlbMissQueueIn", clock, reset)
691  L2TlbMissQueueTable.log(L2TlbMissQueueOutDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.out.fire, "L2TlbMissQueueOut", clock, reset)
692}
693
694/** BlockHelper, block missqueue, not to send too many req to cache
695 *  Parameter:
696 *    enable: enable BlockHelper, mq should not send too many reqs
697 *    start: when miss queue out fire and need, block miss queue's out
698 *    block: block miss queue's out
699 *    latency: last missqueue out's cache access latency
700 */
701class BlockHelper(latency: Int)(implicit p: Parameters) extends XSModule {
702  val io = IO(new Bundle {
703    val enable = Input(Bool())
704    val start = Input(Bool())
705    val block = Output(Bool())
706  })
707
708  val count = RegInit(0.U(log2Ceil(latency).W))
709  val valid = RegInit(false.B)
710  val work = RegInit(true.B)
711
712  io.block := valid
713
714  when (io.start && work) { valid := true.B }
715  when (valid) { count := count + 1.U }
716  when (count === (latency.U) || io.enable) {
717    valid := false.B
718    work := io.enable
719    count := 0.U
720  }
721}
722
723class PTEHelper() extends ExtModule {
724  val clock  = IO(Input(Clock()))
725  val enable = IO(Input(Bool()))
726  val satp   = IO(Input(UInt(64.W)))
727  val vpn    = IO(Input(UInt(64.W)))
728  val pte    = IO(Output(UInt(64.W)))
729  val level  = IO(Output(UInt(8.W)))
730  val pf     = IO(Output(UInt(8.W)))
731}
732
733class PTWDelayN[T <: Data](gen: T, n: Int, flush: Bool) extends Module {
734  val io = IO(new Bundle() {
735    val in = Input(gen)
736    val out = Output(gen)
737    val ptwflush = Input(flush.cloneType)
738  })
739  val out = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen))))
740  val t = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen))))
741  out(0) := io.in
742  if (n == 1) {
743    io.out := out(0)
744  } else {
745    when (io.ptwflush) {
746      for (i <- 0 until n) {
747        t(i) := 0.U.asTypeOf(gen)
748        out(i) := 0.U.asTypeOf(gen)
749      }
750      io.out := 0.U.asTypeOf(gen)
751    } .otherwise {
752      for (i <- 1 until n) {
753        t(i-1) := out(i-1)
754        out(i) := t(i-1)
755      }
756      io.out := out(n-1)
757    }
758  }
759}
760
761object PTWDelayN {
762  def apply[T <: Data](in: T, n: Int, flush: Bool): T = {
763    val delay = Module(new PTWDelayN(in.cloneType, n, flush))
764    delay.io.in := in
765    delay.io.ptwflush := flush
766    delay.io.out
767  }
768}
769
770class FakePTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
771  val io = IO(new L2TLBIO)
772  val flush = VecInit(Seq.fill(PtwWidth)(false.B))
773  flush(0) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, itlbParams.fenceDelay)
774  flush(1) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, ldtlbParams.fenceDelay)
775  for (i <- 0 until PtwWidth) {
776    val helper = Module(new PTEHelper())
777    helper.clock := clock
778    helper.satp := io.csr.tlb.satp.ppn
779
780    if (coreParams.softPTWDelay == 1) {
781      helper.enable := io.tlb(i).req(0).fire
782      helper.vpn := io.tlb(i).req(0).bits.vpn
783    } else {
784      helper.enable := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay - 1, flush(i))
785      helper.vpn := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay - 1, flush(i))
786    }
787
788    val pte = helper.pte.asTypeOf(new PteBundle)
789    val level = helper.level
790    val pf = helper.pf
791    val empty = RegInit(true.B)
792    when (io.tlb(i).req(0).fire) {
793      empty := false.B
794    } .elsewhen (io.tlb(i).resp.fire || flush(i)) {
795      empty := true.B
796    }
797
798    io.tlb(i).req(0).ready := empty || io.tlb(i).resp.fire
799    io.tlb(i).resp.valid := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay, flush(i))
800    assert(!io.tlb(i).resp.valid || io.tlb(i).resp.ready)
801    io.tlb(i).resp.bits.s1.entry.tag := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay, flush(i))
802    io.tlb(i).resp.bits.s1.entry.pbmt := pte.pbmt
803    io.tlb(i).resp.bits.s1.entry.ppn := pte.ppn
804    io.tlb(i).resp.bits.s1.entry.perm.map(_ := pte.getPerm())
805    io.tlb(i).resp.bits.s1.entry.level.map(_ := level)
806    io.tlb(i).resp.bits.s1.pf := pf
807    io.tlb(i).resp.bits.s1.af := DontCare // TODO: implement it
808    io.tlb(i).resp.bits.s1.entry.v := !pf
809    io.tlb(i).resp.bits.s1.entry.prefetch := DontCare
810    io.tlb(i).resp.bits.s1.entry.asid := io.csr.tlb.satp.asid
811  }
812}
813
814class L2TLBWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter {
815  override def shouldBeInlined: Boolean = false
816  val useSoftPTW = coreParams.softPTW
817  val node = if (!useSoftPTW) TLIdentityNode() else null
818  val ptw = if (!useSoftPTW) LazyModule(new L2TLB()) else null
819  if (!useSoftPTW) {
820    node := ptw.node
821  }
822
823  class L2TLBWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents {
824    val io = IO(new L2TLBIO)
825    val perfEvents = if (useSoftPTW) {
826      val fake_ptw = Module(new FakePTW())
827      io <> fake_ptw.io
828      Seq()
829    }
830    else {
831        io <> ptw.module.io
832        ptw.module.getPerfEvents
833    }
834    generatePerfEvent()
835  }
836
837  lazy val module = new L2TLBWrapperImp(this)
838}
839