xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/L2TLB.scala (revision a0c65233389cccd2fdffe58236fb0a7dedf6d54f)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.experimental.ExtModule
22import chisel3.util._
23import chisel3.internal.naming.chiselName
24import xiangshan._
25import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
26import utils._
27import utility._
28import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp}
29import freechips.rocketchip.tilelink._
30import xiangshan.backend.fu.{PMP, PMPChecker, PMPReqBundle, PMPRespBundle}
31import xiangshan.backend.fu.util.HasCSRConst
32import difftest._
33
34class L2TLB()(implicit p: Parameters) extends LazyModule with HasPtwConst {
35  override def shouldBeInlined: Boolean = false
36
37  val node = TLClientNode(Seq(TLMasterPortParameters.v1(
38    clients = Seq(TLMasterParameters.v1(
39      "ptw",
40      sourceId = IdRange(0, MemReqWidth)
41    )),
42    requestFields = Seq(ReqSourceField())
43  )))
44
45  lazy val module = new L2TLBImp(this)
46}
47
48@chiselName
49class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) with HasCSRConst with HasPerfEvents {
50
51  val (mem, edge) = outer.node.out.head
52
53  val io = IO(new L2TLBIO)
54  val difftestIO = IO(new Bundle() {
55    val ptwResp = Output(Bool())
56    val ptwAddr = Output(UInt(64.W))
57    val ptwData = Output(Vec(4, UInt(64.W)))
58  })
59
60  /* Ptw processes multiple requests
61   * Divide Ptw procedure into two stages: cache access ; mem access if cache miss
62   *           miss queue itlb       dtlb
63   *               |       |         |
64   *               ------arbiter------
65   *                            |
66   *                    l1 - l2 - l3 - sp
67   *                            |
68   *          -------------------------------------------
69   *    miss  |  queue                                  | hit
70   *    [][][][][][]                                    |
71   *          |                                         |
72   *    state machine accessing mem                     |
73   *          |                                         |
74   *          ---------------arbiter---------------------
75   *                 |                    |
76   *                itlb                 dtlb
77   */
78
79  difftestIO <> DontCare
80
81  val sfence_tmp = DelayN(io.sfence, 1)
82  val csr_tmp    = DelayN(io.csr.tlb, 1)
83  val sfence_dup = Seq.fill(8)(RegNext(sfence_tmp))
84  val csr_dup = Seq.fill(7)(RegNext(csr_tmp))
85  val satp   = csr_dup(0).satp
86  val priv   = csr_dup(0).priv
87  val flush  = sfence_dup(0).valid || satp.changed
88
89  val pmp = Module(new PMP())
90  val pmp_check = VecInit(Seq.fill(2)(Module(new PMPChecker(lgMaxSize = 3, sameCycle = true)).io))
91  pmp.io.distribute_csr := io.csr.distribute_csr
92  pmp_check.foreach(_.check_env.apply(ModeS, pmp.io.pmp, pmp.io.pma))
93
94  val missQueue = Module(new L2TlbMissQueue)
95  val cache = Module(new PtwCache)
96  val ptw = Module(new PTW)
97  val llptw = Module(new LLPTW)
98  val blockmq = Module(new BlockHelper(3))
99  val arb1 = Module(new Arbiter(new PtwReq, PtwWidth))
100  val arb2 = Module(new Arbiter(new Bundle {
101    val vpn = UInt(vpnLen.W)
102    val source = UInt(bSourceWidth.W)
103  }, if (l2tlbParams.enablePrefetch) 4 else 3))
104  val outArb = (0 until PtwWidth).map(i => Module(new Arbiter(new PtwSectorResp, 1)).io)
105  val mergeArb = (0 until PtwWidth).map(i => Module(new Arbiter(new PtwMergeResp, 3)).io)
106  val outArbCachePort = 0
107  val outArbFsmPort = 1
108  val outArbMqPort = 2
109
110  // arb2 input port
111  val InArbPTWPort = 0
112  val InArbMissQueuePort = 1
113  val InArbTlbPort = 2
114  val InArbPrefetchPort = 3
115  // NOTE: when cache out but miss and ptw doesnt accept,
116  arb1.io.in <> VecInit(io.tlb.map(_.req(0)))
117  arb1.io.out.ready := arb2.io.in(InArbTlbPort).ready
118
119  arb2.io.in(InArbPTWPort).valid := ptw.io.llptw.valid
120  arb2.io.in(InArbPTWPort).bits.vpn := ptw.io.llptw.bits.req_info.vpn
121  arb2.io.in(InArbPTWPort).bits.source := ptw.io.llptw.bits.req_info.source
122  ptw.io.llptw.ready := arb2.io.in(InArbPTWPort).ready
123  block_decoupled(missQueue.io.out, arb2.io.in(InArbMissQueuePort), !ptw.io.req.ready)
124
125  arb2.io.in(InArbTlbPort).valid := arb1.io.out.valid
126  arb2.io.in(InArbTlbPort).bits.vpn := arb1.io.out.bits.vpn
127  arb2.io.in(InArbTlbPort).bits.source := arb1.io.chosen
128  if (l2tlbParams.enablePrefetch) {
129    val prefetch = Module(new L2TlbPrefetch())
130    val recv = cache.io.resp
131    // NOTE: 1. prefetch doesn't gen prefetch 2. req from mq doesn't gen prefetch
132    // NOTE: 1. miss req gen prefetch 2. hit but prefetched gen prefetch
133    prefetch.io.in.valid := recv.fire() && !from_pre(recv.bits.req_info.source) && (!recv.bits.hit  ||
134      recv.bits.prefetch) && recv.bits.isFirst
135    prefetch.io.in.bits.vpn := recv.bits.req_info.vpn
136    prefetch.io.sfence := sfence_dup(0)
137    prefetch.io.csr := csr_dup(0)
138    arb2.io.in(InArbPrefetchPort) <> prefetch.io.out
139
140    val isWriteL2TlbPrefetchTable = WireInit(Constantin.createRecord("isWriteL2TlbPrefetchTable" + p(XSCoreParamsKey).HartId.toString))
141    val L2TlbPrefetchTable = ChiselDB.createTable("L2TlbPrefetch_hart" + p(XSCoreParamsKey).HartId.toString, new L2TlbPrefetchDB)
142    val L2TlbPrefetchDB = Wire(new L2TlbPrefetchDB)
143    L2TlbPrefetchDB.vpn := prefetch.io.out.bits.vpn
144    L2TlbPrefetchTable.log(L2TlbPrefetchDB, isWriteL2TlbPrefetchTable.orR && prefetch.io.out.fire, "L2TlbPrefetch", clock, reset)
145  }
146  arb2.io.out.ready := cache.io.req.ready
147
148
149  val mq_arb = Module(new Arbiter(new L2TlbInnerBundle, 2))
150  mq_arb.io.in(0).valid := cache.io.resp.valid && !cache.io.resp.bits.hit &&
151    (!cache.io.resp.bits.toFsm.l2Hit || cache.io.resp.bits.bypassed) &&
152    !from_pre(cache.io.resp.bits.req_info.source) &&
153    (cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst || !ptw.io.req.ready)
154  mq_arb.io.in(0).bits :=  cache.io.resp.bits.req_info
155  mq_arb.io.in(1) <> llptw.io.cache
156  missQueue.io.in <> mq_arb.io.out
157  missQueue.io.sfence  := sfence_dup(6)
158  missQueue.io.csr := csr_dup(5)
159
160  blockmq.io.start := missQueue.io.out.fire
161  blockmq.io.enable := ptw.io.req.fire()
162
163  llptw.io.in.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && cache.io.resp.bits.toFsm.l2Hit && !cache.io.resp.bits.bypassed
164  llptw.io.in.bits.req_info := cache.io.resp.bits.req_info
165  llptw.io.in.bits.ppn := cache.io.resp.bits.toFsm.ppn
166  llptw.io.sfence := sfence_dup(1)
167  llptw.io.csr := csr_dup(1)
168
169  cache.io.req.valid := arb2.io.out.valid
170  cache.io.req.bits.req_info.vpn := arb2.io.out.bits.vpn
171  cache.io.req.bits.req_info.source := arb2.io.out.bits.source
172  cache.io.req.bits.isFirst := arb2.io.chosen =/= InArbMissQueuePort.U
173  cache.io.req.bits.bypassed.map(_ := false.B)
174  cache.io.sfence := sfence_dup(2)
175  cache.io.csr := csr_dup(2)
176  cache.io.sfence_dup.zip(sfence_dup.drop(2).take(4)).map(s => s._1 := s._2)
177  cache.io.csr_dup.zip(csr_dup.drop(2).take(3)).map(c => c._1 := c._2)
178  cache.io.resp.ready := Mux(cache.io.resp.bits.hit,
179    outReady(cache.io.resp.bits.req_info.source, outArbCachePort),
180    Mux(cache.io.resp.bits.toFsm.l2Hit && !cache.io.resp.bits.bypassed, llptw.io.in.ready,
181    Mux(cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst, mq_arb.io.in(0).ready, mq_arb.io.in(0).ready || ptw.io.req.ready)))
182
183  // NOTE: missQueue req has higher priority
184  ptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.toFsm.l2Hit &&
185    !cache.io.resp.bits.bypassed &&
186    !cache.io.resp.bits.isFirst
187  ptw.io.req.bits.req_info := cache.io.resp.bits.req_info
188  ptw.io.req.bits.l1Hit := cache.io.resp.bits.toFsm.l1Hit
189  ptw.io.req.bits.ppn := cache.io.resp.bits.toFsm.ppn
190  ptw.io.sfence := sfence_dup(7)
191  ptw.io.csr := csr_dup(6)
192  ptw.io.resp.ready := outReady(ptw.io.resp.bits.source, outArbFsmPort)
193
194  // mem req
195  def blockBytes_align(addr: UInt) = {
196    Cat(addr(PAddrBits - 1, log2Up(l2tlbParams.blockBytes)), 0.U(log2Up(l2tlbParams.blockBytes).W))
197  }
198  def addr_low_from_vpn(vpn: UInt) = {
199    vpn(log2Ceil(l2tlbParams.blockBytes)-log2Ceil(XLEN/8)-1, 0)
200  }
201  def addr_low_from_paddr(paddr: UInt) = {
202    paddr(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8))
203  }
204  def from_missqueue(id: UInt) = {
205    (id =/= l2tlbParams.llptwsize.U)
206  }
207  val waiting_resp = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B)))
208  val flush_latch = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B)))
209  for (i <- waiting_resp.indices) {
210    assert(!flush_latch(i) || waiting_resp(i)) // when sfence_latch wait for mem resp, waiting_resp should be true
211  }
212
213  val llptw_out = llptw.io.out
214  val llptw_mem = llptw.io.mem
215  llptw_mem.req_mask := waiting_resp.take(l2tlbParams.llptwsize)
216  ptw.io.mem.mask := waiting_resp.last
217
218  val mem_arb = Module(new Arbiter(new L2TlbMemReqBundle(), 2))
219  mem_arb.io.in(0) <> ptw.io.mem.req
220  mem_arb.io.in(1) <> llptw_mem.req
221  mem_arb.io.out.ready := mem.a.ready && !flush
222
223  // assert, should not send mem access at same addr for twice.
224  val last_resp_vpn = RegEnable(cache.io.refill.bits.req_info_dup(0).vpn, cache.io.refill.valid)
225  val last_resp_level = RegEnable(cache.io.refill.bits.level_dup(0), cache.io.refill.valid)
226  val last_resp_v = RegInit(false.B)
227  val last_has_invalid = !Cat(cache.io.refill.bits.ptes.asTypeOf(Vec(blockBits/XLEN, UInt(XLEN.W))).map(a => a(0))).andR || cache.io.refill.bits.sel_pte_dup(0).asTypeOf(new PteBundle).isAf()
228  when (cache.io.refill.valid) { last_resp_v := !last_has_invalid}
229  when (flush) { last_resp_v := false.B }
230  XSError(last_resp_v && cache.io.refill.valid &&
231    (cache.io.refill.bits.req_info_dup(0).vpn === last_resp_vpn) &&
232    (cache.io.refill.bits.level_dup(0) === last_resp_level),
233    "l2tlb should not access mem at same addr for twice")
234  // ATTENTION: this may wronngly assert when: a ptes is l2, last part is valid,
235  // but the current part is invalid, so one more mem access happened
236  // If this happened, remove the assert.
237
238  val req_addr_low = Reg(Vec(MemReqWidth, UInt((log2Up(l2tlbParams.blockBytes)-log2Up(XLEN/8)).W)))
239
240  when (llptw.io.in.fire()) {
241    // when enq miss queue, set the req_addr_low to receive the mem resp data part
242    req_addr_low(llptw_mem.enq_ptr) := addr_low_from_vpn(llptw.io.in.bits.req_info.vpn)
243  }
244  when (mem_arb.io.out.fire()) {
245    req_addr_low(mem_arb.io.out.bits.id) := addr_low_from_paddr(mem_arb.io.out.bits.addr)
246    waiting_resp(mem_arb.io.out.bits.id) := true.B
247  }
248  // mem read
249  val memRead =  edge.Get(
250    fromSource = mem_arb.io.out.bits.id,
251    // toAddress  = memAddr(log2Up(CacheLineSize / 2 / 8) - 1, 0),
252    toAddress  = blockBytes_align(mem_arb.io.out.bits.addr),
253    lgSize     = log2Up(l2tlbParams.blockBytes).U
254  )._2
255  mem.a.bits := memRead
256  mem.a.valid := mem_arb.io.out.valid && !flush
257  mem.a.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.PTW.id.U)
258  mem.d.ready := true.B
259  // mem -> data buffer
260  val refill_data = Reg(Vec(blockBits / l1BusDataWidth, UInt(l1BusDataWidth.W)))
261  val refill_helper = edge.firstlastHelper(mem.d.bits, mem.d.fire())
262  val mem_resp_done = refill_helper._3
263  val mem_resp_from_mq = from_missqueue(mem.d.bits.source)
264  when (mem.d.valid) {
265    assert(mem.d.bits.source <= l2tlbParams.llptwsize.U)
266    refill_data(refill_helper._4) := mem.d.bits.data
267  }
268  // refill_data_tmp is the wire fork of refill_data, but one cycle earlier
269  val refill_data_tmp = WireInit(refill_data)
270  refill_data_tmp(refill_helper._4) := mem.d.bits.data
271
272  // save only one pte for each id
273  // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache)
274  val resp_pte = VecInit((0 until MemReqWidth).map(i =>
275    if (i == l2tlbParams.llptwsize) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), mem_resp_done && !mem_resp_from_mq) }
276    else { DataHoldBypass(get_part(refill_data, req_addr_low(i)), llptw_mem.buffer_it(i)) }
277    // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle
278  ))
279
280  // save eight ptes for each id when sector tlb
281  // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache)
282  val resp_pte_sector = VecInit((0 until MemReqWidth).map(i =>
283    if (i == l2tlbParams.llptwsize) {RegEnable(refill_data_tmp, mem_resp_done && !mem_resp_from_mq) }
284    else { DataHoldBypass(refill_data, llptw_mem.buffer_it(i)) }
285    // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle
286  ))
287
288  // mem -> miss queue
289  llptw_mem.resp.valid := mem_resp_done && mem_resp_from_mq
290  llptw_mem.resp.bits.id := DataHoldBypass(mem.d.bits.source, mem.d.valid)
291  // mem -> ptw
292  ptw.io.mem.req.ready := mem.a.ready
293  ptw.io.mem.resp.valid := mem_resp_done && !mem_resp_from_mq
294  ptw.io.mem.resp.bits := resp_pte.last
295  // mem -> cache
296  val refill_from_mq = mem_resp_from_mq
297  val refill_level = Mux(refill_from_mq, 2.U, RegEnable(ptw.io.refill.level, init = 0.U, ptw.io.mem.req.fire()))
298  val refill_valid = mem_resp_done && !flush && !flush_latch(mem.d.bits.source)
299
300  cache.io.refill.valid := RegNext(refill_valid, false.B)
301  cache.io.refill.bits.ptes := refill_data.asUInt
302  cache.io.refill.bits.req_info_dup.map(_ := RegEnable(Mux(refill_from_mq, llptw_mem.refill, ptw.io.refill.req_info), refill_valid))
303  cache.io.refill.bits.level_dup.map(_ := RegEnable(refill_level, refill_valid))
304  cache.io.refill.bits.levelOH(refill_level, refill_valid)
305  cache.io.refill.bits.sel_pte_dup.map(_ := RegNext(sel_data(refill_data_tmp.asUInt, req_addr_low(mem.d.bits.source))))
306
307  if (env.EnableDifftest) {
308    val difftest_ptw_addr = RegInit(VecInit(Seq.fill(MemReqWidth)(0.U(PAddrBits.W))))
309    when (mem.a.valid) {
310      difftest_ptw_addr(mem.a.bits.source) := mem.a.bits.address
311    }
312
313    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
314    difftest.coreid := p(XSCoreParamsKey).HartId.asUInt
315    difftest.index := 2.U
316    difftest.valid := cache.io.refill.valid
317    difftest.addr := difftest_ptw_addr(RegNext(mem.d.bits.source))
318    difftest.data := refill_data.asTypeOf(difftest.data)
319  }
320
321  if (env.EnableDifftest) {
322    for (i <- 0 until PtwWidth) {
323      val difftest = DifftestModule(new DiffL2TLBEvent)
324      difftest.coreid := p(XSCoreParamsKey).HartId.asUInt
325      difftest.valid := io.tlb(i).resp.fire && !io.tlb(i).resp.bits.af
326      difftest.index := i.U
327      difftest.satp := io.csr.tlb.satp.ppn
328      difftest.vpn := Cat(io.tlb(i).resp.bits.entry.tag, 0.U(sectortlbwidth.W))
329      for (j <- 0 until tlbcontiguous) {
330        difftest.ppn(j) := Cat(io.tlb(i).resp.bits.entry.ppn, io.tlb(i).resp.bits.ppn_low(j))
331        difftest.valididx(j) := io.tlb(i).resp.bits.valididx(j)
332      }
333      difftest.perm := io.tlb(i).resp.bits.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt
334      difftest.level := io.tlb(i).resp.bits.entry.level.getOrElse(0.U.asUInt)
335      difftest.pf := io.tlb(i).resp.bits.pf
336    }
337  }
338
339  // pmp
340  pmp_check(0).req <> ptw.io.pmp.req
341  ptw.io.pmp.resp <> pmp_check(0).resp
342  pmp_check(1).req <> llptw.io.pmp.req
343  llptw.io.pmp.resp <> pmp_check(1).resp
344
345  llptw_out.ready := outReady(llptw_out.bits.req_info.source, outArbMqPort)
346
347  // Timing: Maybe need to do some optimization or even add one more cycle
348  for (i <- 0 until PtwWidth) {
349    mergeArb(i).in(outArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.req_info.source===i.U
350    mergeArb(i).in(outArbCachePort).bits := cache.io.resp.bits.toTlb
351    mergeArb(i).in(outArbFsmPort).valid := ptw.io.resp.valid && ptw.io.resp.bits.source===i.U
352    mergeArb(i).in(outArbFsmPort).bits := ptw.io.resp.bits.resp
353    mergeArb(i).in(outArbMqPort).valid := llptw_out.valid && llptw_out.bits.req_info.source===i.U
354    mergeArb(i).in(outArbMqPort).bits := contiguous_pte_to_merge_ptwResp(resp_pte_sector(llptw_out.bits.id).asUInt, llptw_out.bits.req_info.vpn, llptw_out.bits.af, true)
355    mergeArb(i).out.ready := outArb(i).in(0).ready
356  }
357
358  for (i <- 0 until PtwWidth) {
359    outArb(i).in(0).valid := mergeArb(i).out.valid
360    outArb(i).in(0).bits := merge_ptwResp_to_sector_ptwResp(mergeArb(i).out.bits)
361  }
362
363  // io.tlb.map(_.resp) <> outArb.map(_.out)
364  io.tlb.map(_.resp).zip(outArb.map(_.out)).map{
365    case (resp, out) => resp <> out
366  }
367
368  // sfence
369  when (flush) {
370    for (i <- 0 until MemReqWidth) {
371      when (waiting_resp(i)) {
372        flush_latch(i) := true.B
373      }
374    }
375  }
376  // mem -> control signal
377  // waiting_resp and sfence_latch will be reset when mem_resp_done
378  when (mem_resp_done) {
379    waiting_resp(mem.d.bits.source) := false.B
380    flush_latch(mem.d.bits.source) := false.B
381  }
382
383  def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = {
384    sink.valid   := source.valid && !block_signal
385    source.ready := sink.ready   && !block_signal
386    sink.bits    := source.bits
387  }
388
389  def get_part(data: Vec[UInt], index: UInt): UInt = {
390    val inner_data = data.asTypeOf(Vec(data.getWidth / XLEN, UInt(XLEN.W)))
391    inner_data(index)
392  }
393
394  def pte_to_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean) : PtwResp = {
395    val pte_in = pte.asTypeOf(new PteBundle())
396    val ptw_resp = Wire(new PtwResp())
397    ptw_resp.entry.ppn := pte_in.ppn
398    ptw_resp.entry.level.map(_ := 2.U)
399    ptw_resp.entry.perm.map(_ := pte_in.getPerm())
400    ptw_resp.entry.tag := vpn
401    ptw_resp.pf := (if (af_first) !af else true.B) && pte_in.isPf(2.U)
402    ptw_resp.af := (if (!af_first) pte_in.isPf(2.U) else true.B) && (af || pte_in.isAf())
403    ptw_resp.entry.v := !ptw_resp.pf
404    ptw_resp.entry.prefetch := DontCare
405    ptw_resp.entry.asid := satp.asid
406    ptw_resp
407  }
408
409  // not_super means that this is a normal page
410  // valididx(i) will be all true when super page to be convenient for l1 tlb matching
411  def contiguous_pte_to_merge_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean, not_super: Boolean = true) : PtwMergeResp = {
412    assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!")
413    val ptw_merge_resp = Wire(new PtwMergeResp())
414    for (i <- 0 until tlbcontiguous) {
415      val pte_in = pte(64 * i + 63, 64 * i).asTypeOf(new PteBundle())
416      val ptw_resp = Wire(new PtwMergeEntry(tagLen = sectorvpnLen, hasPerm = true, hasLevel = true))
417      ptw_resp.ppn := pte_in.ppn(ppnLen - 1, sectortlbwidth)
418      ptw_resp.ppn_low := pte_in.ppn(sectortlbwidth - 1, 0)
419      ptw_resp.level.map(_ := 2.U)
420      ptw_resp.perm.map(_ := pte_in.getPerm())
421      ptw_resp.tag := vpn(vpnLen - 1, sectortlbwidth)
422      ptw_resp.pf := (if (af_first) !af else true.B) && pte_in.isPf(2.U)
423      ptw_resp.af := (if (!af_first) pte_in.isPf(2.U) else true.B) && (af || pte_in.isAf())
424      ptw_resp.v := !ptw_resp.pf
425      ptw_resp.prefetch := DontCare
426      ptw_resp.asid := satp.asid
427      ptw_merge_resp.entry(i) := ptw_resp
428    }
429    ptw_merge_resp.pteidx := UIntToOH(vpn(sectortlbwidth - 1, 0)).asBools
430    ptw_merge_resp.not_super := not_super.B
431    ptw_merge_resp
432  }
433
434  def merge_ptwResp_to_sector_ptwResp(pte: PtwMergeResp) : PtwSectorResp = {
435    assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!")
436    val ptw_sector_resp = Wire(new PtwSectorResp)
437    ptw_sector_resp.entry.tag := pte.entry(OHToUInt(pte.pteidx)).tag
438    ptw_sector_resp.entry.asid := pte.entry(OHToUInt(pte.pteidx)).asid
439    ptw_sector_resp.entry.ppn := pte.entry(OHToUInt(pte.pteidx)).ppn
440    ptw_sector_resp.entry.perm.map(_ := pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)))
441    ptw_sector_resp.entry.level.map(_ := pte.entry(OHToUInt(pte.pteidx)).level.getOrElse(0.U(2.W)))
442    ptw_sector_resp.entry.prefetch := pte.entry(OHToUInt(pte.pteidx)).prefetch
443    ptw_sector_resp.entry.v := pte.entry(OHToUInt(pte.pteidx)).v
444    ptw_sector_resp.af := pte.entry(OHToUInt(pte.pteidx)).af
445    ptw_sector_resp.pf := pte.entry(OHToUInt(pte.pteidx)).pf
446    ptw_sector_resp.addr_low := OHToUInt(pte.pteidx)
447    ptw_sector_resp.pteidx := pte.pteidx
448    for (i <- 0 until tlbcontiguous) {
449      val ppn_equal = pte.entry(i).ppn === pte.entry(OHToUInt(pte.pteidx)).ppn
450      val perm_equal = pte.entry(i).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt === pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt
451      val v_equal = pte.entry(i).v === pte.entry(OHToUInt(pte.pteidx)).v
452      val af_equal = pte.entry(i).af === pte.entry(OHToUInt(pte.pteidx)).af
453      val pf_equal = pte.entry(i).pf === pte.entry(OHToUInt(pte.pteidx)).pf
454      ptw_sector_resp.valididx(i) := (ppn_equal && perm_equal && v_equal && af_equal && pf_equal) || !pte.not_super
455      ptw_sector_resp.ppn_low(i) := pte.entry(i).ppn_low
456    }
457    ptw_sector_resp.valididx(OHToUInt(pte.pteidx)) := true.B
458    ptw_sector_resp
459  }
460
461  def outReady(source: UInt, port: Int): Bool = {
462    MuxLookup(source, true.B,
463      (0 until PtwWidth).map(i => i.U -> mergeArb(i).in(port).ready))
464  }
465
466  // debug info
467  for (i <- 0 until PtwWidth) {
468    XSDebug(p"[io.tlb(${i.U})] ${io.tlb(i)}\n")
469  }
470  XSDebug(p"[sfence] ${io.sfence}\n")
471  XSDebug(p"[io.csr.tlb] ${io.csr.tlb}\n")
472
473  for (i <- 0 until PtwWidth) {
474    XSPerfAccumulate(s"req_count${i}", io.tlb(i).req(0).fire())
475    XSPerfAccumulate(s"req_blocked_count_${i}", io.tlb(i).req(0).valid && !io.tlb(i).req(0).ready)
476  }
477  XSPerfAccumulate(s"req_blocked_by_mq", arb1.io.out.valid && missQueue.io.out.valid)
478  for (i <- 0 until (MemReqWidth + 1)) {
479    XSPerfAccumulate(s"mem_req_util${i}", PopCount(waiting_resp) === i.U)
480  }
481  XSPerfAccumulate("mem_cycle", PopCount(waiting_resp) =/= 0.U)
482  XSPerfAccumulate("mem_count", mem.a.fire())
483  for (i <- 0 until PtwWidth) {
484    XSPerfAccumulate(s"llptw_ppn_af${i}", mergeArb(i).in(outArbMqPort).valid && mergeArb(i).in(outArbMqPort).bits.entry(OHToUInt(mergeArb(i).in(outArbMqPort).bits.pteidx)).af && !llptw_out.bits.af)
485    XSPerfAccumulate(s"access_fault${i}", io.tlb(i).resp.fire && io.tlb(i).resp.bits.af)
486  }
487
488  // print configs
489  println(s"${l2tlbParams.name}: a ptw, a llptw with size ${l2tlbParams.llptwsize}, miss queue size ${MissQueueSize} l1:${l2tlbParams.l1Size} fa l2: nSets ${l2tlbParams.l2nSets} nWays ${l2tlbParams.l2nWays} l3: ${l2tlbParams.l3nSets} nWays ${l2tlbParams.l3nWays} blockBytes:${l2tlbParams.blockBytes}")
490
491  // time out assert
492  for (i <- 0 until MemReqWidth) {
493    TimeOutAssert(waiting_resp(i), timeOutThreshold, s"ptw mem resp time out wait_resp${i}")
494    TimeOutAssert(flush_latch(i), timeOutThreshold, s"ptw mem resp time out flush_latch${i}")
495  }
496
497
498  val perfEvents  = Seq(llptw, cache, ptw).flatMap(_.getPerfEvents)
499  generatePerfEvent()
500
501  val isWriteL1TlbTable = WireInit(Constantin.createRecord("isWriteL1TlbTable" + p(XSCoreParamsKey).HartId.toString))
502  val L1TlbTable = ChiselDB.createTable("L1Tlb_hart" + p(XSCoreParamsKey).HartId.toString, new L1TlbDB)
503  val ITlbReqDB, DTlbReqDB, ITlbRespDB, DTlbRespDB = Wire(new L1TlbDB)
504  ITlbReqDB.vpn := io.tlb(0).req(0).bits.vpn
505  DTlbReqDB.vpn := io.tlb(1).req(0).bits.vpn
506  ITlbRespDB.vpn := io.tlb(0).resp.bits.entry.tag
507  DTlbRespDB.vpn := io.tlb(1).resp.bits.entry.tag
508  L1TlbTable.log(ITlbReqDB, isWriteL1TlbTable.orR && io.tlb(0).req(0).fire, "ITlbReq", clock, reset)
509  L1TlbTable.log(DTlbReqDB, isWriteL1TlbTable.orR && io.tlb(1).req(0).fire, "DTlbReq", clock, reset)
510  L1TlbTable.log(ITlbRespDB, isWriteL1TlbTable.orR && io.tlb(0).resp.fire, "ITlbResp", clock, reset)
511  L1TlbTable.log(DTlbRespDB, isWriteL1TlbTable.orR && io.tlb(1).resp.fire, "DTlbResp", clock, reset)
512
513  val isWritePageCacheTable = WireInit(Constantin.createRecord("isWritePageCacheTable" + p(XSCoreParamsKey).HartId.toString))
514  val PageCacheTable = ChiselDB.createTable("PageCache_hart" + p(XSCoreParamsKey).HartId.toString, new PageCacheDB)
515  val PageCacheDB = Wire(new PageCacheDB)
516  PageCacheDB.vpn := Cat(cache.io.resp.bits.toTlb.entry(0).tag, OHToUInt(cache.io.resp.bits.toTlb.pteidx))
517  PageCacheDB.source := cache.io.resp.bits.req_info.source
518  PageCacheDB.bypassed := cache.io.resp.bits.bypassed
519  PageCacheDB.is_first := cache.io.resp.bits.isFirst
520  PageCacheDB.prefetched := cache.io.resp.bits.toTlb.entry(0).prefetch
521  PageCacheDB.prefetch := cache.io.resp.bits.prefetch
522  PageCacheDB.l2Hit := cache.io.resp.bits.toFsm.l2Hit
523  PageCacheDB.l1Hit := cache.io.resp.bits.toFsm.l1Hit
524  PageCacheDB.hit := cache.io.resp.bits.hit
525  PageCacheTable.log(PageCacheDB, isWritePageCacheTable.orR && cache.io.resp.fire, "PageCache", clock, reset)
526
527  val isWritePTWTable = WireInit(Constantin.createRecord("isWritePTWTable" + p(XSCoreParamsKey).HartId.toString))
528  val PTWTable = ChiselDB.createTable("PTW_hart" + p(XSCoreParamsKey).HartId.toString, new PTWDB)
529  val PTWReqDB, PTWRespDB, LLPTWReqDB, LLPTWRespDB = Wire(new PTWDB)
530  PTWReqDB.vpn := ptw.io.req.bits.req_info.vpn
531  PTWReqDB.source := ptw.io.req.bits.req_info.source
532  PTWRespDB.vpn := ptw.io.refill.req_info.vpn
533  PTWRespDB.source := ptw.io.refill.req_info.source
534  LLPTWReqDB.vpn := llptw.io.in.bits.req_info.vpn
535  LLPTWReqDB.source := llptw.io.in.bits.req_info.source
536  LLPTWRespDB.vpn := llptw.io.mem.refill.vpn
537  LLPTWRespDB.source := llptw.io.mem.refill.source
538  PTWTable.log(PTWReqDB, isWritePTWTable.orR && ptw.io.req.fire, "PTWReq", clock, reset)
539  PTWTable.log(PTWRespDB, isWritePTWTable.orR && ptw.io.mem.resp.fire, "PTWResp", clock, reset)
540  PTWTable.log(LLPTWReqDB, isWritePTWTable.orR && llptw.io.in.fire, "LLPTWReq", clock, reset)
541  PTWTable.log(LLPTWRespDB, isWritePTWTable.orR && llptw.io.mem.resp.fire, "LLPTWResp", clock, reset)
542
543  val isWriteL2TlbMissQueueTable = WireInit(Constantin.createRecord("isWriteL2TlbMissQueueTable" + p(XSCoreParamsKey).HartId.toString))
544  val L2TlbMissQueueTable = ChiselDB.createTable("L2TlbMissQueue_hart" + p(XSCoreParamsKey).HartId.toString, new L2TlbMissQueueDB)
545  val L2TlbMissQueueInDB, L2TlbMissQueueOutDB = Wire(new L2TlbMissQueueDB)
546  L2TlbMissQueueInDB.vpn := missQueue.io.in.bits.vpn
547  L2TlbMissQueueOutDB.vpn := missQueue.io.out.bits.vpn
548  L2TlbMissQueueTable.log(L2TlbMissQueueInDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.in.fire, "L2TlbMissQueueIn", clock, reset)
549  L2TlbMissQueueTable.log(L2TlbMissQueueOutDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.out.fire, "L2TlbMissQueueOut", clock, reset)
550}
551
552/** BlockHelper, block missqueue, not to send too many req to cache
553 *  Parameter:
554 *    enable: enable BlockHelper, mq should not send too many reqs
555 *    start: when miss queue out fire and need, block miss queue's out
556 *    block: block miss queue's out
557 *    latency: last missqueue out's cache access latency
558 */
559class BlockHelper(latency: Int)(implicit p: Parameters) extends XSModule {
560  val io = IO(new Bundle {
561    val enable = Input(Bool())
562    val start = Input(Bool())
563    val block = Output(Bool())
564  })
565
566  val count = RegInit(0.U(log2Ceil(latency).W))
567  val valid = RegInit(false.B)
568  val work = RegInit(true.B)
569
570  io.block := valid
571
572  when (io.start && work) { valid := true.B }
573  when (valid) { count := count + 1.U }
574  when (count === (latency.U) || io.enable) {
575    valid := false.B
576    work := io.enable
577    count := 0.U
578  }
579}
580
581class PTEHelper() extends ExtModule {
582  val clock  = IO(Input(Clock()))
583  val enable = IO(Input(Bool()))
584  val satp   = IO(Input(UInt(64.W)))
585  val vpn    = IO(Input(UInt(64.W)))
586  val pte    = IO(Output(UInt(64.W)))
587  val level  = IO(Output(UInt(8.W)))
588  val pf     = IO(Output(UInt(8.W)))
589}
590
591class PTWDelayN[T <: Data](gen: T, n: Int, flush: Bool) extends Module {
592  val io = IO(new Bundle() {
593    val in = Input(gen)
594    val out = Output(gen)
595    val ptwflush = Input(flush.cloneType)
596  })
597  val out = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen))))
598  val t = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen))))
599  out(0) := io.in
600  if (n == 1) {
601    io.out := out(0)
602  } else {
603    when (io.ptwflush) {
604      for (i <- 0 until n) {
605        t(i) := 0.U.asTypeOf(gen)
606        out(i) := 0.U.asTypeOf(gen)
607      }
608      io.out := 0.U.asTypeOf(gen)
609    } .otherwise {
610      for (i <- 1 until n) {
611        t(i-1) := out(i-1)
612        out(i) := t(i-1)
613      }
614      io.out := out(n-1)
615    }
616  }
617}
618
619object PTWDelayN {
620  def apply[T <: Data](in: T, n: Int, flush: Bool): T = {
621    val delay = Module(new PTWDelayN(in.cloneType, n, flush))
622    delay.io.in := in
623    delay.io.ptwflush := flush
624    delay.io.out
625  }
626}
627
628class FakePTW()(implicit p: Parameters) extends XSModule with HasPtwConst {
629  val io = IO(new L2TLBIO)
630  val flush = VecInit(Seq.fill(PtwWidth)(false.B))
631  flush(0) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, itlbParams.fenceDelay)
632  flush(1) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, ldtlbParams.fenceDelay)
633  for (i <- 0 until PtwWidth) {
634    val helper = Module(new PTEHelper())
635    helper.clock := clock
636    helper.satp := io.csr.tlb.satp.ppn
637
638    if (coreParams.softPTWDelay == 1) {
639      helper.enable := io.tlb(i).req(0).fire
640      helper.vpn := io.tlb(i).req(0).bits.vpn
641    } else {
642      helper.enable := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay - 1, flush(i))
643      helper.vpn := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay - 1, flush(i))
644    }
645
646    val pte = helper.pte.asTypeOf(new PteBundle)
647    val level = helper.level
648    val pf = helper.pf
649    val empty = RegInit(true.B)
650    when (io.tlb(i).req(0).fire) {
651      empty := false.B
652    } .elsewhen (io.tlb(i).resp.fire || flush(i)) {
653      empty := true.B
654    }
655
656    io.tlb(i).req(0).ready := empty || io.tlb(i).resp.fire
657    io.tlb(i).resp.valid := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay, flush(i))
658    assert(!io.tlb(i).resp.valid || io.tlb(i).resp.ready)
659    io.tlb(i).resp.bits.entry.tag := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay, flush(i))
660    io.tlb(i).resp.bits.entry.ppn := pte.ppn
661    io.tlb(i).resp.bits.entry.perm.map(_ := pte.getPerm())
662    io.tlb(i).resp.bits.entry.level.map(_ := level)
663    io.tlb(i).resp.bits.pf := pf
664    io.tlb(i).resp.bits.af := DontCare // TODO: implement it
665    io.tlb(i).resp.bits.entry.v := !pf
666    io.tlb(i).resp.bits.entry.prefetch := DontCare
667    io.tlb(i).resp.bits.entry.asid := io.csr.tlb.satp.asid
668  }
669}
670
671class L2TLBWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter {
672  override def shouldBeInlined: Boolean = false
673  val useSoftPTW = coreParams.softPTW
674  val node = if (!useSoftPTW) TLIdentityNode() else null
675  val ptw = if (!useSoftPTW) LazyModule(new L2TLB()) else null
676  if (!useSoftPTW) {
677    node := ptw.node
678  }
679
680  lazy val module = new LazyModuleImp(this) with HasPerfEvents {
681    val io = IO(new L2TLBIO)
682    val perfEvents = if (useSoftPTW) {
683      val fake_ptw = Module(new FakePTW())
684      io <> fake_ptw.io
685      Seq()
686    }
687    else {
688        io <> ptw.module.io
689        ptw.module.getPerfEvents
690    }
691    generatePerfEvent()
692  }
693}
694