xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/TLB.scala (revision 9473e04d5cab97eaf63add958b2392eec3d876a2)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.internal.naming.chiselName
22import chisel3.util._
23import difftest._
24import freechips.rocketchip.util.SRAMAnnotation
25import xiangshan._
26import utils._
27import utility._
28import xiangshan.backend.fu.{PMPChecker, PMPReqBundle, PMPConfig => XSPMPConfig}
29import xiangshan.backend.rob.RobPtr
30import xiangshan.backend.fu.util.HasCSRConst
31import firrtl.FirrtlProtos.Firrtl.Module.ExternalModule.Parameter
32import freechips.rocketchip.rocket.PMPConfig
33
34/** TLB module
35  * support block request and non-block request io at the same time
36  * return paddr at next cycle, then go for pmp/pma check
37  * @param Width: The number of requestors
38  * @param Block: Blocked or not for each requestor ports
39  * @param q: TLB Parameters, like entry number, each TLB has its own parameters
40  * @param p: XiangShan Paramemters, like XLEN
41  */
42
43@chiselName
44class TLB(Width: Int, nRespDups: Int = 1, Block: Seq[Boolean], q: TLBParameters)(implicit p: Parameters) extends TlbModule
45  with HasCSRConst
46  with HasPerfEvents
47{
48  val io = IO(new TlbIO(Width, nRespDups, q))
49
50  val req = io.requestor.map(_.req)
51  val resp = io.requestor.map(_.resp)
52  val ptw = io.ptw
53  val pmp = io.pmp
54  val refill_to_mem = io.refill_to_mem
55
56  /** Sfence.vma & Svinval
57    * Sfence.vma will 1. flush old entries 2. flush inflight 3. flush pipe
58    * Svinval will 1. flush old entries 2. flush inflight
59    * So, Svinval will not flush pipe, which means
60    * it should not drop reqs from pipe and should return right resp
61    */
62  val sfence = DelayN(io.sfence, q.fenceDelay)
63  val csr = io.csr
64  val satp = DelayN(io.csr.satp, q.fenceDelay)
65  val flush_mmu = DelayN(sfence.valid || csr.satp.changed, q.fenceDelay)
66  val mmu_flush_pipe = DelayN(sfence.valid && sfence.bits.flushPipe, q.fenceDelay) // for svinval, won't flush pipe
67  val flush_pipe = io.flushPipe
68
69  // ATTENTION: csr and flush from backend are delayed. csr should not be later than flush.
70  // because, csr will influence tlb behavior.
71  val ifecth = if (q.fetchi) true.B else false.B
72  val mode = if (q.useDmode) csr.priv.dmode else csr.priv.imode
73  // val vmEnable = satp.mode === 8.U // && (mode < ModeM) // FIXME: fix me when boot xv6/linux...
74  val vmEnable = if (EnbaleTlbDebug) (satp.mode === 8.U)
75    else (satp.mode === 8.U && (mode < ModeM))
76  val portTranslateEnable = (0 until Width).map(i => vmEnable && !req(i).bits.no_translate)
77
78  val req_in = req
79  val req_out = req.map(a => RegEnable(a.bits, a.fire()))
80  val req_out_v = (0 until Width).map(i => ValidHold(req_in(i).fire && !req_in(i).bits.kill, resp(i).fire, flush_pipe(i)))
81
82  val refill = ptw.resp.fire() && !flush_mmu && vmEnable
83  refill_to_mem.valid := refill
84  refill_to_mem.memidx := ptw.resp.bits.memidx
85
86  val entries = Module(new TlbStorageWrapper(Width, q, nRespDups))
87  entries.io.base_connect(sfence, csr, satp)
88  if (q.outReplace) { io.replace <> entries.io.replace }
89  for (i <- 0 until Width) {
90    entries.io.r_req_apply(io.requestor(i).req.valid, get_pn(req_in(i).bits.vaddr), i)
91    entries.io.w_apply(refill, ptw.resp.bits, io.ptw_replenish)
92    resp(i).bits.debug.isFirstIssue := RegNext(req(i).bits.debug.isFirstIssue)
93    resp(i).bits.debug.robIdx := RegNext(req(i).bits.debug.robIdx)
94  }
95
96  // read TLB, get hit/miss, paddr, perm bits
97  val readResult = (0 until Width).map(TLBRead(_))
98  val hitVec = readResult.map(_._1)
99  val missVec = readResult.map(_._2)
100  val pmp_addr = readResult.map(_._3)
101  val static_pm = readResult.map(_._4)
102  val static_pm_v = readResult.map(_._5)
103  val perm = readResult.map(_._6)
104
105  // check pmp use paddr (for timing optization, use pmp_addr here)
106  // check permisson
107  (0 until Width).foreach{i =>
108    pmp_check(pmp_addr(i), req_out(i).size, req_out(i).cmd, i)
109    for (d <- 0 until nRespDups) {
110      perm_check(perm(i)(d), req_out(i).cmd, static_pm(i), static_pm_v(i), i, d)
111    }
112  }
113
114  // handle block or non-block io
115  // for non-block io, just return the above result, send miss to ptw
116  // for block io, hold the request, send miss to ptw,
117  //   when ptw back, return the result
118  (0 until Width) foreach {i =>
119    if (Block(i)) handle_block(i)
120    else handle_nonblock(i)
121  }
122  io.ptw.resp.ready := true.B
123
124  /************************  main body above | method/log/perf below ****************************/
125  def TLBRead(i: Int) = {
126    val (e_hit, e_ppn, e_perm, e_super_hit, e_super_ppn, static_pm) = entries.io.r_resp_apply(i)
127    val (p_hit, p_ppn, p_perm) = ptw_resp_bypass(get_pn(req_in(i).bits.vaddr))
128    val enable = portTranslateEnable(i)
129
130    val hit = e_hit || p_hit
131    val miss = !hit && enable
132    val fast_miss = !(e_super_hit || p_hit) && enable
133    hit.suggestName(s"hit_read_${i}")
134    miss.suggestName(s"miss_read_${i}")
135
136    val vaddr = SignExt(req_out(i).vaddr, PAddrBits)
137    resp(i).bits.miss := miss
138    resp(i).bits.fast_miss := fast_miss
139    resp(i).bits.ptwBack := ptw.resp.fire()
140    resp(i).bits.memidx := RegNext(req_in(i).bits.memidx)
141
142    val ppn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ppnLen.W))))
143    val perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle))))
144
145    for (d <- 0 until nRespDups) {
146      ppn(d) := Mux(p_hit, p_ppn, e_ppn(d))
147      perm(d) := Mux(p_hit, p_perm, e_perm(d))
148
149      val paddr = Cat(ppn(d), get_off(req_out(i).vaddr))
150      resp(i).bits.paddr(d) := Mux(enable, paddr, vaddr)
151    }
152
153    XSDebug(req_out_v(i), p"(${i.U}) hit:${hit} miss:${miss} ppn:${Hexadecimal(ppn(0))} perm:${perm(0)}\n")
154
155    val pmp_paddr = Mux(enable, Cat(Mux(p_hit, p_ppn, e_super_ppn), get_off(req_out(i).vaddr)), vaddr)
156    // pmp_paddr seems same to paddr functionally. It abandons normal_ppn for timing optimization.
157    // val pmp_paddr = Mux(enable, paddr, vaddr)
158    val static_pm_valid = !(e_super_hit || p_hit) && enable && q.partialStaticPMP.B
159
160    (hit, miss, pmp_paddr, static_pm, static_pm_valid, perm)
161  }
162
163  def pmp_check(addr: UInt, size: UInt, cmd: UInt, idx: Int): Unit = {
164    pmp(idx).valid := resp(idx).valid
165    pmp(idx).bits.addr := addr
166    pmp(idx).bits.size := size
167    pmp(idx).bits.cmd := cmd
168  }
169
170  def perm_check(perm: TlbPermBundle, cmd: UInt, spm: TlbPMBundle, spm_v: Bool, idx: Int, nDups: Int) = {
171    // for timing optimization, pmp check is divided into dynamic and static
172    // dynamic: superpage (or full-connected reg entries) -> check pmp when translation done
173    // static: 4K pages (or sram entries) -> check pmp with pre-checked results
174    val af = perm.af
175    val pf = perm.pf
176    val ldUpdate = !perm.a && TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) // update A/D through exception
177    val stUpdate = (!perm.a || !perm.d) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) // update A/D through exception
178    val instrUpdate = !perm.a && TlbCmd.isExec(cmd) // update A/D through exception
179    val modeCheck = !(mode === ModeU && !perm.u || mode === ModeS && perm.u && (!io.csr.priv.sum || ifecth))
180    val ldPermFail = !(modeCheck && (perm.r || io.csr.priv.mxr && perm.x))
181    val stPermFail = !(modeCheck && perm.w)
182    val instrPermFail = !(modeCheck && perm.x)
183    val ldPf = (ldPermFail || pf) && (TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd))
184    val stPf = (stPermFail || pf) && (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd))
185    val instrPf = (instrPermFail || pf) && TlbCmd.isExec(cmd)
186    val fault_valid = portTranslateEnable(idx)
187    resp(idx).bits.excp(nDups).pf.ld := (ldPf || ldUpdate) && fault_valid && !af
188    resp(idx).bits.excp(nDups).pf.st := (stPf || stUpdate) && fault_valid && !af
189    resp(idx).bits.excp(nDups).pf.instr := (instrPf || instrUpdate) && fault_valid && !af
190    // NOTE: pf need && with !af, page fault has higher priority than access fault
191    // but ptw may also have access fault, then af happens, the translation is wrong.
192    // In this case, pf has lower priority than af
193
194    resp(idx).bits.excp(nDups).af.ld    := (af || (spm_v && !spm.r)) && TlbCmd.isRead(cmd) && fault_valid
195    resp(idx).bits.excp(nDups).af.st    := (af || (spm_v && !spm.w)) && TlbCmd.isWrite(cmd) && fault_valid
196    resp(idx).bits.excp(nDups).af.instr := (af || (spm_v && !spm.x)) && TlbCmd.isExec(cmd) && fault_valid
197    resp(idx).bits.static_pm.valid := spm_v && fault_valid // ls/st unit should use this mmio, not the result from pmp
198    resp(idx).bits.static_pm.bits := !spm.c
199  }
200
201  def handle_nonblock(idx: Int): Unit = {
202    io.requestor(idx).resp.valid := req_out_v(idx)
203    io.requestor(idx).req.ready := io.requestor(idx).resp.ready // should always be true
204    XSError(!io.requestor(idx).resp.ready, s"${q.name} port ${idx} is non-block, resp.ready must be true.B")
205
206    val ptw_just_back = ptw.resp.fire && ptw.resp.bits.entry.hit(get_pn(req_out(idx).vaddr), asid = io.csr.satp.asid, allType = true)
207    io.ptw.req(idx).valid :=  RegNext(req_out_v(idx) && missVec(idx) && !ptw_just_back, false.B) // TODO: remove the regnext, timing
208    when (RegEnable(io.requestor(idx).req_kill, RegNext(io.requestor(idx).req.fire))) {
209      io.ptw.req(idx).valid := false.B
210    }
211    io.ptw.req(idx).bits.vpn := RegNext(get_pn(req_out(idx).vaddr))
212    io.ptw.req(idx).bits.memidx := RegNext(req_out(idx).memidx)
213  }
214
215  def handle_block(idx: Int): Unit = {
216    // three valid: 1.if exist a entry; 2.if sent to ptw; 3.unset resp.valid
217    io.requestor(idx).req.ready := !req_out_v(idx) || io.requestor(idx).resp.fire()
218    // req_out_v for if there is a request, may long latency, fixme
219
220    // miss request entries
221    val miss_req_vpn = get_pn(req_out(idx).vaddr)
222    val miss_req_memidx = req_out(idx).memidx
223    val hit = io.ptw.resp.bits.entry.hit(miss_req_vpn, io.csr.satp.asid, allType = true) && io.ptw.resp.valid
224
225    val new_coming = RegNext(req_in(idx).fire && !req_in(idx).bits.kill && !flush_pipe(idx), false.B)
226    val miss_wire = new_coming && missVec(idx)
227    val miss_v = ValidHoldBypass(miss_wire, resp(idx).fire(), flush_pipe(idx))
228    val miss_req_v = ValidHoldBypass(miss_wire || (miss_v && flush_mmu && !mmu_flush_pipe),
229      io.ptw.req(idx).fire() || resp(idx).fire(), flush_pipe(idx))
230
231    // when ptw resp, check if hit, reset miss_v, resp to lsu/ifu
232    resp(idx).valid := req_out_v(idx) && !(miss_v && portTranslateEnable(idx))
233    when (io.ptw.resp.fire() && hit && req_out_v(idx) && portTranslateEnable(idx)) {
234      val pte = io.ptw.resp.bits
235      resp(idx).valid := true.B
236      resp(idx).bits.miss := false.B // for blocked tlb, this is useless
237      for (d <- 0 until nRespDups) {
238        resp(idx).bits.paddr(d) := Cat(pte.entry.genPPN(get_pn(req_out(idx).vaddr)), get_off(req_out(idx).vaddr))
239        perm_check(pte, req_out(idx).cmd, 0.U.asTypeOf(new TlbPMBundle), false.B, idx, d)
240      }
241      pmp_check(resp(idx).bits.paddr(0), req_out(idx).size, req_out(idx).cmd, idx)
242
243      // NOTE: the unfiltered req would be handled by Repeater
244    }
245    assert(RegNext(!resp(idx).valid || resp(idx).ready, true.B), "when tlb resp valid, ready should be true, must")
246    assert(RegNext(req_out_v(idx) || !(miss_v || miss_req_v), true.B), "when not req_out_v, should not set miss_v/miss_req_v")
247
248    val ptw_req = io.ptw.req(idx)
249    ptw_req.valid := miss_req_v
250    ptw_req.bits.vpn := miss_req_vpn
251    ptw_req.bits.memidx := miss_req_memidx
252
253    // NOTE: when flush pipe, tlb should abandon last req
254    // however, some outside modules like icache, dont care flushPipe, and still waiting for tlb resp
255    // just resp valid and raise page fault to go through. The pipe(ifu) will abandon it.
256    if (!q.outsideRecvFlush) {
257      when (req_out_v(idx) && flush_pipe(idx) && portTranslateEnable(idx)) {
258        resp(idx).valid := true.B
259        for (d <- 0 until nRespDups) {
260          resp(idx).bits.excp(d).pf.ld := true.B // sfence happened, pf for not to use this addr
261          resp(idx).bits.excp(d).pf.st := true.B
262          resp(idx).bits.excp(d).pf.instr := true.B
263        }
264      }
265    }
266  }
267
268  // when ptw resp, tlb at refill_idx maybe set to miss by force.
269  // Bypass ptw resp to check.
270  def ptw_resp_bypass(vpn: UInt) = {
271    val p_hit = RegNext(ptw.resp.bits.entry.hit(vpn, io.csr.satp.asid, allType = true) && io.ptw.resp.fire)
272    val p_ppn = RegEnable(ptw.resp.bits.entry.genPPN(vpn), io.ptw.resp.fire)
273    val p_perm = RegEnable(ptwresp_to_tlbperm(ptw.resp.bits), io.ptw.resp.fire)
274    (p_hit, p_ppn, p_perm)
275  }
276
277  // assert
278  for(i <- 0 until Width) {
279    TimeOutAssert(req_out_v(i) && !resp(i).valid, timeOutThreshold, s"{q.name} port{i} long time no resp valid.")
280  }
281
282  // perf event
283  val result_ok = req_in.map(a => RegNext(a.fire()))
284  val perfEvents =
285    Seq(
286      ("access", PopCount((0 until Width).map{i => if (Block(i)) io.requestor(i).req.fire() else portTranslateEnable(i) && result_ok(i) })),
287      ("miss  ", PopCount((0 until Width).map{i => if (Block(i)) portTranslateEnable(i) && result_ok(i) && missVec(i) else ptw.req(i).fire() })),
288    )
289  generatePerfEvent()
290
291  // perf log
292  for (i <- 0 until Width) {
293    if (Block(i)) {
294      XSPerfAccumulate(s"access${i}",result_ok(i) && portTranslateEnable(i))
295      XSPerfAccumulate(s"miss${i}", result_ok(i) && missVec(i))
296    } else {
297      XSPerfAccumulate("first_access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && RegNext(req(i).bits.debug.isFirstIssue))
298      XSPerfAccumulate("access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i))
299      XSPerfAccumulate("first_miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i) && RegNext(req(i).bits.debug.isFirstIssue))
300      XSPerfAccumulate("miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i))
301    }
302  }
303  XSPerfAccumulate("ptw_resp_count", ptw.resp.fire())
304  XSPerfAccumulate("ptw_resp_pf_count", ptw.resp.fire() && ptw.resp.bits.pf)
305
306  // Log
307  for(i <- 0 until Width) {
308    XSDebug(req(i).valid, p"req(${i.U}): (${req(i).valid} ${req(i).ready}) ${req(i).bits}\n")
309    XSDebug(resp(i).valid, p"resp(${i.U}): (${resp(i).valid} ${resp(i).ready}) ${resp(i).bits}\n")
310  }
311
312  XSDebug(io.sfence.valid, p"Sfence: ${io.sfence}\n")
313  XSDebug(ParallelOR(req_out_v) || ptw.resp.valid, p"vmEnable:${vmEnable} hit:${Binary(VecInit(hitVec).asUInt)} miss:${Binary(VecInit(missVec).asUInt)}\n")
314  for (i <- ptw.req.indices) {
315    XSDebug(ptw.req(i).fire(), p"L2TLB req:${ptw.req(i).bits}\n")
316  }
317  XSDebug(ptw.resp.valid, p"L2TLB resp:${ptw.resp.bits} (v:${ptw.resp.valid}r:${ptw.resp.ready}) \n")
318
319  println(s"${q.name}: normal page: ${q.normalNWays} ${q.normalAssociative} ${q.normalReplacer.get} super page: ${q.superNWays} ${q.superAssociative} ${q.superReplacer.get}")
320
321  if (env.EnableDifftest) {
322    val l1tlbid = Wire(UInt(2.W))
323    if (q.name == "itlb") {
324      l1tlbid := 0.U
325    } else if (q.name == "ldtlb") {
326      l1tlbid := 1.U
327    } else if (q.name == "sttlb") {
328      l1tlbid := 2.U
329    } else {
330      l1tlbid := 3.U
331    }
332
333    for (i <- 0 until Width) {
334      val pf = io.requestor(i).resp.bits.excp(0).pf.instr || io.requestor(i).resp.bits.excp(0).pf.st || io.requestor(i).resp.bits.excp(0).pf.ld
335      val af = io.requestor(i).resp.bits.excp(0).af.instr || io.requestor(i).resp.bits.excp(0).af.st || io.requestor(i).resp.bits.excp(0).af.ld
336      val difftest = Module(new DifftestL1TLBEvent)
337      difftest.io.clock := clock
338      difftest.io.coreid := p(XSCoreParamsKey).HartId.asUInt
339      difftest.io.valid := l1tlbid =/= 3.U && RegNext(io.requestor(i).req.fire) && !RegNext(io.requestor(i).req_kill) && io.requestor(i).resp.fire && !io.requestor(i).resp.bits.miss && !pf && !af && portTranslateEnable(i)
340      difftest.io.index := i.U
341      difftest.io.l1tlbid := l1tlbid
342      difftest.io.satp := io.csr.satp.ppn
343      difftest.io.vpn := RegNext(get_pn(req_in(i).bits.vaddr))
344      difftest.io.ppn := get_pn(io.requestor(i).resp.bits.paddr(0))
345    }
346  }
347
348}
349
350class TLBNonBlock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(false), q)
351class TLBBLock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(true), q)
352
353class TlbReplace(Width: Int, q: TLBParameters)(implicit p: Parameters) extends TlbModule {
354  val io = IO(new TlbReplaceIO(Width, q))
355
356  if (q.normalAssociative == "fa") {
357    val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNWays)
358    re.access(io.normalPage.access.map(_.touch_ways))
359    io.normalPage.refillIdx := re.way
360  } else { // set-acco && plru
361    val re = ReplacementPolicy.fromString(q.normalReplacer, q.normalNSets, q.normalNWays)
362    re.access(io.normalPage.access.map(_.sets), io.normalPage.access.map(_.touch_ways))
363    io.normalPage.refillIdx := { if (q.normalNWays == 1) 0.U else re.way(io.normalPage.chosen_set) }
364  }
365
366  if (q.superAssociative == "fa") {
367    val re = ReplacementPolicy.fromString(q.superReplacer, q.superNWays)
368    re.access(io.superPage.access.map(_.touch_ways))
369    io.superPage.refillIdx := re.way
370  } else { // set-acco && plru
371    val re = ReplacementPolicy.fromString(q.superReplacer, q.superNSets, q.superNWays)
372    re.access(io.superPage.access.map(_.sets), io.superPage.access.map(_.touch_ways))
373    io.superPage.refillIdx := { if (q.superNWays == 1) 0.U else re.way(io.superPage.chosen_set) }
374  }
375}
376