1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15* 16* 17* Acknowledgement 18* 19* This implementation is inspired by several key papers: 20* [1] Binh Pham, Viswanathan Vaidyanathan, Aamer Jaleel, and Abhishek Bhattacharjee. "[Colt: Coalesced large-reach 21* tlbs.](https://doi.org/10.1109/MICRO.2012.32)" 45th Annual IEEE/ACM International Symposium on Microarchitecture 22* (MICRO). 2012. 23***************************************************************************************/ 24 25package xiangshan.cache.mmu 26 27import org.chipsalliance.cde.config.Parameters 28import chisel3._ 29import chisel3.util._ 30import difftest._ 31import freechips.rocketchip.util.SRAMAnnotation 32import xiangshan._ 33import utils._ 34import utility._ 35import xiangshan.backend.fu.{PMPChecker, PMPReqBundle, PMPConfig => XSPMPConfig} 36import xiangshan.backend.rob.RobPtr 37import xiangshan.backend.fu.util.HasCSRConst 38import freechips.rocketchip.rocket.PMPConfig 39 40/** TLB module 41 * support block request and non-block request io at the same time 42 * return paddr at next cycle, then go for pmp/pma check 43 * @param Width: The number of requestors 44 * @param Block: Blocked or not for each requestor ports 45 * @param q: TLB Parameters, like entry number, each TLB has its own parameters 46 * @param p: XiangShan Paramemters, like XLEN 47 */ 48 49class TLB(Width: Int, nRespDups: Int = 1, Block: Seq[Boolean], q: TLBParameters)(implicit p: Parameters) extends TlbModule 50 with HasCSRConst 51 with HasPerfEvents 52{ 53 val io = IO(new TlbIO(Width, nRespDups, q)) 54 55 val req = io.requestor.map(_.req) 56 val resp = io.requestor.map(_.resp) 57 val ptw = io.ptw 58 val pmp = io.pmp 59 val refill_to_mem = io.refill_to_mem 60 61 /** Sfence.vma & Svinval 62 * Sfence.vma will 1. flush old entries 2. flush inflight 3. flush pipe 63 * Svinval will 1. flush old entries 2. flush inflight 64 * So, Svinval will not flush pipe, which means 65 * it should not drop reqs from pipe and should return right resp 66 */ 67 val sfence = DelayN(io.sfence, q.fenceDelay) 68 val csr = io.csr 69 val satp = DelayN(io.csr.satp, q.fenceDelay) 70 val vsatp = DelayN(io.csr.vsatp, q.fenceDelay) 71 val hgatp = DelayN(io.csr.hgatp, q.fenceDelay) 72 val mPBMTE = DelayN(io.csr.mPBMTE, q.fenceDelay) 73 val hPBMTE = DelayN(io.csr.hPBMTE, q.fenceDelay) 74 75 val flush_mmu = DelayN(sfence.valid || csr.satp.changed || csr.vsatp.changed || csr.hgatp.changed, q.fenceDelay) 76 val mmu_flush_pipe = DelayN(sfence.valid && sfence.bits.flushPipe, q.fenceDelay) // for svinval, won't flush pipe 77 val flush_pipe = io.flushPipe 78 val redirect = io.redirect 79 val EffectiveVa = Wire(Vec(Width, UInt(XLEN.W))) 80 val req_in = req 81 val req_out = Reg(Vec(Width, new TlbReq)) 82 for (i <- 0 until Width) { 83 when (req(i).fire) { 84 req_out(i) := req(i).bits 85 req_out(i).fullva := EffectiveVa(i) 86 } 87 } 88 val req_out_v = (0 until Width).map(i => ValidHold(req_in(i).fire && !req_in(i).bits.kill, resp(i).fire, flush_pipe(i))) 89 90 val isHyperInst = (0 until Width).map(i => req_out_v(i) && req_out(i).hyperinst) 91 92 // ATTENTION: csr and flush from backend are delayed. csr should not be later than flush. 93 // because, csr will influence tlb behavior. 94 val ifetch = if (q.fetchi) true.B else false.B 95 val mode_tmp = if (q.useDmode) csr.priv.dmode else csr.priv.imode 96 val mode = (0 until Width).map(i => Mux(isHyperInst(i), csr.priv.spvp, mode_tmp)) 97 val virt_in = csr.priv.virt 98 val virt_out = req.map(a => RegEnable(csr.priv.virt, a.fire)) 99 val sum = (0 until Width).map(i => Mux(virt_out(i) || isHyperInst(i), io.csr.priv.vsum, io.csr.priv.sum)) 100 val mxr = (0 until Width).map(i => Mux(virt_out(i) || isHyperInst(i), io.csr.priv.vmxr || io.csr.priv.mxr, io.csr.priv.mxr)) 101 val req_in_s2xlate = (0 until Width).map(i => MuxCase(noS2xlate, Seq( 102 (!(virt_in || req_in(i).bits.hyperinst)) -> noS2xlate, 103 (csr.vsatp.mode =/= 0.U && csr.hgatp.mode =/= 0.U) -> allStage, 104 (csr.vsatp.mode === 0.U) -> onlyStage2, 105 (csr.hgatp.mode === 0.U) -> onlyStage1 106 ))) 107 val req_out_s2xlate = (0 until Width).map(i => MuxCase(noS2xlate, Seq( 108 (!(virt_out(i) || isHyperInst(i))) -> noS2xlate, 109 (csr.vsatp.mode =/= 0.U && csr.hgatp.mode =/= 0.U) -> allStage, 110 (csr.vsatp.mode === 0.U) -> onlyStage2, 111 (csr.hgatp.mode === 0.U) -> onlyStage1 112 ))) 113 val need_gpa = RegInit(false.B) 114 val need_gpa_wire = WireInit(false.B) 115 val need_gpa_robidx = Reg(new RobPtr) 116 val need_gpa_vpn = Reg(UInt(vpnLen.W)) 117 val resp_gpa_gvpn = Reg(UInt(ptePPNLen.W)) 118 val resp_gpa_refill = RegInit(false.B) 119 val resp_s1_level = RegInit(0.U(log2Up(Level + 1).W)) 120 val resp_s1_isLeaf = RegInit(false.B) 121 val resp_s1_isFakePte = RegInit(false.B) 122 val hasGpf = Wire(Vec(Width, Bool())) 123 124 val Sv39Enable = satp.mode === 8.U 125 val Sv48Enable = satp.mode === 9.U 126 val Sv39x4Enable = vsatp.mode === 8.U || hgatp.mode === 8.U 127 val Sv48x4Enable = vsatp.mode === 9.U || hgatp.mode === 9.U 128 val vmEnable = (0 until Width).map(i => !(isHyperInst(i) || virt_out(i)) && ( 129 if (EnbaleTlbDebug) (Sv39Enable || Sv48Enable) 130 else (Sv39Enable || Sv48Enable) && (mode(i) < ModeM)) 131 ) 132 val s2xlateEnable = (0 until Width).map(i => (isHyperInst(i) || virt_out(i)) && (Sv39x4Enable || Sv48x4Enable) && (mode(i) < ModeM)) 133 val portTranslateEnable = (0 until Width).map(i => (vmEnable(i) || s2xlateEnable(i)) && RegEnable(!req(i).bits.no_translate, req(i).valid)) 134 135 // pre fault: check fault before real do translate 136 val prepf = WireInit(VecInit(Seq.fill(Width)(false.B))) 137 val pregpf = WireInit(VecInit(Seq.fill(Width)(false.B))) 138 val preaf = WireInit(VecInit(Seq.fill(Width)(false.B))) 139 val premode = (0 until Width).map(i => Mux(req_in(i).bits.hyperinst, csr.priv.spvp, mode_tmp)) 140 for (i <- 0 until Width) { 141 resp(i).bits.fullva := RegEnable(EffectiveVa(i), req(i).valid) 142 } 143 val prevmEnable = (0 until Width).map(i => !(virt_in || req_in(i).bits.hyperinst) && ( 144 if (EnbaleTlbDebug) (Sv39Enable || Sv48Enable) 145 else (Sv39Enable || Sv48Enable) && (premode(i) < ModeM)) 146 ) 147 val pres2xlateEnable = (0 until Width).map(i => (virt_in || req_in(i).bits.hyperinst) && (Sv39x4Enable || Sv48x4Enable) && (premode(i) < ModeM)) 148 149 (0 until Width).foreach{i => 150 151 val pmm = WireInit(0.U(2.W)) 152 153 when (ifetch || req(i).bits.hlvx) { 154 pmm := 0.U 155 } .elsewhen (premode(i) === ModeM) { 156 pmm := csr.pmm.mseccfg 157 } .elsewhen (!(virt_in || req_in(i).bits.hyperinst) && premode(i) === ModeS) { 158 pmm := csr.pmm.menvcfg 159 } .elsewhen ((virt_in || req_in(i).bits.hyperinst) && premode(i) === ModeS) { 160 pmm := csr.pmm.henvcfg 161 } .elsewhen (req_in(i).bits.hyperinst && csr.priv.imode === ModeU) { 162 pmm := csr.pmm.hstatus 163 } .elsewhen (premode(i) === ModeU) { 164 pmm := csr.pmm.senvcfg 165 } 166 167 when (prevmEnable(i) || (pres2xlateEnable(i) && vsatp.mode =/= 0.U)) { 168 when (pmm === PMLEN7) { 169 EffectiveVa(i) := SignExt(req_in(i).bits.fullva(56, 0), XLEN) 170 } .elsewhen (pmm === PMLEN16) { 171 EffectiveVa(i) := SignExt(req_in(i).bits.fullva(47, 0), XLEN) 172 } .otherwise { 173 EffectiveVa(i) := req_in(i).bits.fullva 174 } 175 } .otherwise { 176 when (pmm === PMLEN7) { 177 EffectiveVa(i) := ZeroExt(req_in(i).bits.fullva(56, 0), XLEN) 178 } .elsewhen (pmm === PMLEN16) { 179 EffectiveVa(i) := ZeroExt(req_in(i).bits.fullva(47, 0), XLEN) 180 } .otherwise { 181 EffectiveVa(i) := req_in(i).bits.fullva 182 } 183 } 184 185 val pf48 = SignExt(EffectiveVa(i)(47, 0), XLEN) =/= EffectiveVa(i) 186 val pf39 = SignExt(EffectiveVa(i)(38, 0), XLEN) =/= EffectiveVa(i) 187 val gpf48 = EffectiveVa(i)(XLEN - 1, 48 + 2) =/= 0.U 188 val gpf39 = EffectiveVa(i)(XLEN - 1, 39 + 2) =/= 0.U 189 val af = EffectiveVa(i)(XLEN - 1, PAddrBits) =/= 0.U 190 when (req(i).valid && req(i).bits.checkfullva) { 191 when (prevmEnable(i) || pres2xlateEnable(i)) { 192 when (req_in_s2xlate(i) === onlyStage2) { 193 when (Sv48x4Enable) { 194 pregpf(i) := gpf48 195 } .elsewhen (Sv39x4Enable) { 196 pregpf(i) := gpf39 197 } 198 } .otherwise { 199 when (Sv48Enable) { 200 prepf(i) := pf48 201 } .elsewhen (Sv39Enable) { 202 prepf(i) := pf39 203 } 204 } 205 } .otherwise { 206 preaf(i) := af 207 } 208 } 209 } 210 211 val refill = ptw.resp.fire && !(ptw.resp.bits.getGpa) && !need_gpa && !need_gpa_wire && !flush_mmu 212 // prevent ptw refill when: 1) it's a getGpa request; 2) l1tlb is in need_gpa state; 3) mmu is being flushed. 213 214 refill_to_mem := DontCare 215 val entries = Module(new TlbStorageWrapper(Width, q, nRespDups)) 216 entries.io.base_connect(sfence, csr, satp) 217 if (q.outReplace) { io.replace <> entries.io.replace } 218 for (i <- 0 until Width) { 219 entries.io.r_req_apply(io.requestor(i).req.valid, get_pn(req_in(i).bits.vaddr), i, req_in_s2xlate(i)) 220 entries.io.w_apply(refill, ptw.resp.bits) 221 // TODO: RegNext enable:req.valid 222 resp(i).bits.debug.isFirstIssue := RegEnable(req(i).bits.debug.isFirstIssue, req(i).valid) 223 resp(i).bits.debug.robIdx := RegEnable(req(i).bits.debug.robIdx, req(i).valid) 224 } 225 226 // read TLB, get hit/miss, paddr, perm bits 227 val readResult = (0 until Width).map(TLBRead(_)) 228 val hitVec = readResult.map(_._1) 229 val missVec = readResult.map(_._2) 230 val pmp_addr = readResult.map(_._3) 231 val perm = readResult.map(_._4) 232 val g_perm = readResult.map(_._5) 233 val pbmt = readResult.map(_._6) 234 val g_pbmt = readResult.map(_._7) 235 // check pmp use paddr (for timing optization, use pmp_addr here) 236 // check permisson 237 (0 until Width).foreach{i => 238 val noTranslateReg = RegNext(req(i).bits.no_translate) 239 val addr = Mux(noTranslateReg, req(i).bits.pmp_addr, pmp_addr(i)) 240 pmp_check(addr, req_out(i).size, req_out(i).cmd, noTranslateReg, i) 241 for (d <- 0 until nRespDups) { 242 pbmt_check(i, d, pbmt(i)(d), g_pbmt(i)(d), req_out_s2xlate(i)) 243 perm_check(perm(i)(d), req_out(i).cmd, i, d, g_perm(i)(d), req_out(i).hlvx, req_out_s2xlate(i), prepf(i), pregpf(i), preaf(i)) 244 } 245 hasGpf(i) := hitVec(i) && (resp(i).bits.excp(0).gpf.ld || resp(i).bits.excp(0).gpf.st || resp(i).bits.excp(0).gpf.instr) 246 } 247 248 // handle block or non-block io 249 // for non-block io, just return the above result, send miss to ptw 250 // for block io, hold the request, send miss to ptw, 251 // when ptw back, return the result 252 (0 until Width) foreach {i => 253 if (Block(i)) handle_block(i) 254 else handle_nonblock(i) 255 } 256 io.ptw.resp.ready := true.B 257 258 /************************ main body above | method/log/perf below ****************************/ 259 def TLBRead(i: Int) = { 260 val (e_hit, e_ppn, e_perm, e_g_perm, e_s2xlate, e_pbmt, e_g_pbmt) = entries.io.r_resp_apply(i) 261 val (p_hit, p_ppn, p_pbmt, p_perm, p_gvpn, p_g_pbmt, p_g_perm, p_s2xlate, p_s1_level, p_s1_isLeaf, p_s1_isFakePte) = ptw_resp_bypass(get_pn(req_in(i).bits.vaddr), req_in_s2xlate(i)) 262 val enable = portTranslateEnable(i) 263 val isOnlys2xlate = req_out_s2xlate(i) === onlyStage2 264 val need_gpa_vpn_hit = need_gpa_vpn === get_pn(req_out(i).vaddr) 265 val isitlb = TlbCmd.isExec(req_out(i).cmd) 266 val isPrefetch = req_out(i).isPrefetch 267 val currentRedirect = req_out(i).debug.robIdx.needFlush(redirect) 268 val lastCycleRedirect = req_out(i).debug.robIdx.needFlush(RegNext(redirect)) 269 270 when (!isitlb && need_gpa_robidx.needFlush(redirect) || isitlb && flush_pipe(i)){ 271 need_gpa := false.B 272 resp_gpa_refill := false.B 273 need_gpa_vpn := 0.U 274 }.elsewhen (req_out_v(i) && !p_hit && !(resp_gpa_refill && need_gpa_vpn_hit) && !isOnlys2xlate && hasGpf(i) && need_gpa === false.B && !io.requestor(i).req_kill && !isPrefetch && !currentRedirect && !lastCycleRedirect) { 275 need_gpa_wire := true.B 276 need_gpa := true.B 277 need_gpa_vpn := get_pn(req_out(i).vaddr) 278 resp_gpa_refill := false.B 279 need_gpa_robidx := req_out(i).debug.robIdx 280 }.elsewhen (ptw.resp.fire && need_gpa && need_gpa_vpn === ptw.resp.bits.getVpn(need_gpa_vpn)) { 281 resp_gpa_gvpn := Mux(ptw.resp.bits.s2xlate === onlyStage2, ptw.resp.bits.s2.entry.tag, ptw.resp.bits.s1.genGVPN(need_gpa_vpn)) 282 resp_s1_level := ptw.resp.bits.s1.entry.level.get 283 resp_s1_isLeaf := ptw.resp.bits.s1.isLeaf() 284 resp_s1_isFakePte := ptw.resp.bits.s1.isFakePte() 285 resp_gpa_refill := true.B 286 } 287 288 when (req_out_v(i) && hasGpf(i) && resp_gpa_refill && need_gpa_vpn_hit){ 289 need_gpa := false.B 290 } 291 292 val hit = e_hit || p_hit 293 val miss = (!hit && enable) || hasGpf(i) && !p_hit && !(resp_gpa_refill && need_gpa_vpn_hit) && !isOnlys2xlate && !isPrefetch && !lastCycleRedirect 294 hit.suggestName(s"hit_read_${i}") 295 miss.suggestName(s"miss_read_${i}") 296 297 val vaddr = SignExt(req_out(i).vaddr, PAddrBits) 298 resp(i).bits.miss := miss 299 resp(i).bits.ptwBack := ptw.resp.fire 300 resp(i).bits.memidx := RegEnable(req_in(i).bits.memidx, req_in(i).valid) 301 resp(i).bits.fastMiss := !hit && enable 302 303 val ppn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ppnLen.W)))) 304 val pbmt = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ptePbmtLen.W)))) 305 val perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle)))) 306 val gvpn = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ptePPNLen.W)))) 307 val level = WireInit(VecInit(Seq.fill(nRespDups)(0.U(log2Up(Level + 1).W)))) 308 val isLeaf = WireInit(VecInit(Seq.fill(nRespDups)(false.B))) 309 val isFakePte = WireInit(VecInit(Seq.fill(nRespDups)(false.B))) 310 val g_pbmt = WireInit(VecInit(Seq.fill(nRespDups)(0.U(ptePbmtLen.W)))) 311 val g_perm = WireInit(VecInit(Seq.fill(nRespDups)(0.U.asTypeOf(new TlbPermBundle)))) 312 val r_s2xlate = WireInit(VecInit(Seq.fill(nRespDups)(0.U(2.W)))) 313 for (d <- 0 until nRespDups) { 314 ppn(d) := Mux(p_hit, p_ppn, e_ppn(d)) 315 pbmt(d) := Mux(p_hit, p_pbmt, e_pbmt(d)) 316 perm(d) := Mux(p_hit, p_perm, e_perm(d)) 317 gvpn(d) := Mux(p_hit, p_gvpn, resp_gpa_gvpn) 318 level(d) := Mux(p_hit, p_s1_level, resp_s1_level) 319 isLeaf(d) := Mux(p_hit, p_s1_isLeaf, resp_s1_isLeaf) 320 isFakePte(d) := Mux(p_hit, p_s1_isFakePte, resp_s1_isFakePte) 321 g_pbmt(d) := Mux(p_hit, p_g_pbmt, e_g_pbmt(d)) 322 g_perm(d) := Mux(p_hit, p_g_perm, e_g_perm(d)) 323 r_s2xlate(d) := Mux(p_hit, p_s2xlate, e_s2xlate(d)) 324 val paddr = Cat(ppn(d), get_off(req_out(i).vaddr)) 325 val vpn_idx = Mux1H(Seq( 326 (isFakePte(d) && vsatp.mode === Sv39) -> 2.U, 327 (isFakePte(d) && vsatp.mode === Sv48) -> 3.U, 328 (!isFakePte(d)) -> (level(d) - 1.U), 329 )) 330 // We use `fullva` here when `isLeaf`, in order to cope with the situation of an unaligned load/store cross page 331 // for example, a `ld` instruction on address 0x81000ffb will be splited into two loads 332 // 1. ld 0x81000ff8. vaddr = 0x81000ff8, fullva = 0x80000ffb 333 // 2. ld 0x81001000. vaddr = 0x81001000, fullva = 0x80000ffb 334 // When load 1 trigger a guest page fault, we should use offset of fullva when generate gpaddr 335 // and when load 2 trigger a guest page fault, we should just use offset of vaddr(all zero). 336 // Also, when onlyS2, if crosspage, gpaddr = vaddr(start address of a new page), else gpaddr = fullva(original vaddr) 337 // By the way, frontend handles the cross page instruction fetch by itself, so TLB doesn't need to do anything extra. 338 // Also, the fullva of iTLB is not used and always zero. crossPageVaddr should never use fullva in iTLB. 339 val crossPageVaddr = Mux(isitlb || req_out(i).fullva(12) =/= vaddr(12), vaddr, req_out(i).fullva) 340 val gpaddr_offset = Mux(isLeaf(d), get_off(crossPageVaddr), Cat(getVpnn(get_pn(crossPageVaddr), vpn_idx), 0.U(log2Up(XLEN/8).W))) 341 val gpaddr = Cat(gvpn(d), gpaddr_offset) 342 resp(i).bits.paddr(d) := Mux(enable, paddr, vaddr) 343 resp(i).bits.gpaddr(d) := Mux(r_s2xlate(d) === onlyStage2, crossPageVaddr, gpaddr) 344 } 345 346 XSDebug(req_out_v(i), p"(${i.U}) hit:${hit} miss:${miss} ppn:${Hexadecimal(ppn(0))} perm:${perm(0)}\n") 347 348 val pmp_paddr = resp(i).bits.paddr(0) 349 350 (hit, miss, pmp_paddr, perm, g_perm, pbmt, g_pbmt) 351 } 352 353 def getVpnn(vpn: UInt, idx: UInt): UInt = { 354 MuxLookup(idx, 0.U)(Seq( 355 0.U -> vpn(vpnnLen - 1, 0), 356 1.U -> vpn(vpnnLen * 2 - 1, vpnnLen), 357 2.U -> vpn(vpnnLen * 3 - 1, vpnnLen * 2), 358 3.U -> vpn(vpnnLen * 4 - 1, vpnnLen * 3)) 359 ) 360 } 361 362 def pmp_check(addr: UInt, size: UInt, cmd: UInt, noTranslate: Bool, idx: Int): Unit = { 363 pmp(idx).valid := resp(idx).valid || noTranslate 364 pmp(idx).bits.addr := addr 365 pmp(idx).bits.size := size 366 pmp(idx).bits.cmd := cmd 367 } 368 369 def pbmt_check(idx: Int, d: Int, pbmt: UInt, g_pbmt: UInt, s2xlate: UInt):Unit = { 370 val onlyS1 = s2xlate === onlyStage1 || s2xlate === noS2xlate 371 val pbmtRes = pbmt 372 val gpbmtRes = g_pbmt 373 val res = MuxLookup(s2xlate, 0.U)(Seq( 374 onlyStage1 -> pbmtRes, 375 onlyStage2 -> gpbmtRes, 376 allStage -> Mux(pbmtRes =/= 0.U, pbmtRes, gpbmtRes), 377 noS2xlate -> pbmtRes 378 )) 379 resp(idx).bits.pbmt(d) := Mux(portTranslateEnable(idx), res, 0.U) 380 } 381 382 // for timing optimization, pmp check is divided into dynamic and static 383 def perm_check(perm: TlbPermBundle, cmd: UInt, idx: Int, nDups: Int, g_perm: TlbPermBundle, hlvx: Bool, s2xlate: UInt, prepf: Bool = false.B, pregpf: Bool = false.B, preaf: Bool = false.B) = { 384 // dynamic: superpage (or full-connected reg entries) -> check pmp when translation done 385 // static: 4K pages (or sram entries) -> check pmp with pre-checked results 386 val hasS2xlate = s2xlate =/= noS2xlate 387 val onlyS1 = s2xlate === onlyStage1 388 val onlyS2 = s2xlate === onlyStage2 389 val allS2xlate = s2xlate === allStage 390 // noS2xlate || onlyS1 -> perm.af 391 // onlyS2 -> g_perm.af 392 // allS2xlate -> perm.af || g_perm.af 393 val af = (!onlyS2 && perm.af) || ((onlyS2 || allS2xlate) && g_perm.af) 394 395 // Stage 1 perm check 396 val pf = perm.pf 397 val isLd = TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) 398 val isSt = TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd) 399 val isInst = TlbCmd.isExec(cmd) 400 val ldUpdate = !perm.a && isLd // update A/D through exception 401 val stUpdate = (!perm.a || !perm.d) && isSt // update A/D through exception 402 val instrUpdate = !perm.a && isInst // update A/D through exception 403 val modeCheck = !(mode(idx) === ModeU && !perm.u || mode(idx) === ModeS && perm.u && (!sum(idx) || ifetch)) 404 val ldPermFail = !(modeCheck && Mux(hlvx, perm.x, perm.r || mxr(idx) && perm.x)) 405 val stPermFail = !(modeCheck && perm.w) 406 val instrPermFail = !(modeCheck && perm.x) 407 val ldPf = (ldPermFail || pf) && isLd 408 val stPf = (stPermFail || pf) && isSt 409 val instrPf = (instrPermFail || pf) && isInst 410 val isFakePte = !perm.v && !perm.pf && !perm.af && !onlyS2 411 val isNonLeaf = !(perm.r || perm.w || perm.x) && perm.v && !perm.pf && !perm.af 412 val s1_valid = portTranslateEnable(idx) && !onlyS2 413 414 // Stage 2 perm check 415 val gpf = g_perm.pf 416 val g_ldUpdate = !g_perm.a && isLd 417 val g_stUpdate = (!g_perm.a || !g_perm.d) && isSt 418 val g_instrUpdate = !g_perm.a && isInst 419 val g_ldPermFail = !Mux(hlvx, g_perm.x, (g_perm.r || io.csr.priv.mxr && g_perm.x)) 420 val g_stPermFail = !g_perm.w 421 val g_instrPermFail = !g_perm.x 422 val ldGpf = (g_ldPermFail || gpf) && isLd 423 val stGpf = (g_stPermFail || gpf) && isSt 424 val instrGpf = (g_instrPermFail || gpf) && isInst 425 val s2_valid = portTranslateEnable(idx) && (onlyS2 || allS2xlate) 426 427 val fault_valid = s1_valid || s2_valid 428 429 // when pf and gpf can't happens simultaneously 430 val hasPf = (ldPf || ldUpdate || stPf || stUpdate || instrPf || instrUpdate) && s1_valid && !af && !isFakePte && !isNonLeaf 431 // Only lsu need check related to high address truncation 432 when (RegNext(prepf || pregpf || preaf)) { 433 resp(idx).bits.isForVSnonLeafPTE := false.B 434 resp(idx).bits.excp(nDups).pf.ld := RegNext(prepf) && isLd 435 resp(idx).bits.excp(nDups).pf.st := RegNext(prepf) && isSt 436 resp(idx).bits.excp(nDups).pf.instr := false.B 437 438 resp(idx).bits.excp(nDups).gpf.ld := RegNext(pregpf) && isLd 439 resp(idx).bits.excp(nDups).gpf.st := RegNext(pregpf) && isSt 440 resp(idx).bits.excp(nDups).gpf.instr := false.B 441 442 resp(idx).bits.excp(nDups).af.ld := RegNext(preaf) && TlbCmd.isRead(cmd) 443 resp(idx).bits.excp(nDups).af.st := RegNext(preaf) && TlbCmd.isWrite(cmd) 444 resp(idx).bits.excp(nDups).af.instr := false.B 445 446 resp(idx).bits.excp(nDups).vaNeedExt := false.B 447 // overwrite miss & gpaddr when exception related to high address truncation happens 448 resp(idx).bits.miss := false.B 449 resp(idx).bits.gpaddr(nDups) := req_out(idx).fullva 450 } .otherwise { 451 // isForVSnonLeafPTE is used only when gpf happens and it caused by a G-stage translation which supports VS-stage translation 452 // it will be sent to CSR in order to modify the m/htinst. 453 // Ref: The RISC-V Instruction Set Manual: Volume II: Privileged Architecture - 19.6.3. Transformed Instruction or Pseudoinstruction for mtinst or htinst 454 val isForVSnonLeafPTE = isNonLeaf || isFakePte 455 resp(idx).bits.isForVSnonLeafPTE := isForVSnonLeafPTE 456 resp(idx).bits.excp(nDups).pf.ld := (ldPf || ldUpdate) && s1_valid && !af && !isFakePte && !isNonLeaf 457 resp(idx).bits.excp(nDups).pf.st := (stPf || stUpdate) && s1_valid && !af && !isFakePte && !isNonLeaf 458 resp(idx).bits.excp(nDups).pf.instr := (instrPf || instrUpdate) && s1_valid && !af && !isFakePte && !isNonLeaf 459 // NOTE: pf need && with !af, page fault has higher priority than access fault 460 // but ptw may also have access fault, then af happens, the translation is wrong. 461 // In this case, pf has lower priority than af 462 463 resp(idx).bits.excp(nDups).gpf.ld := (ldGpf || g_ldUpdate) && s2_valid && !af && !hasPf 464 resp(idx).bits.excp(nDups).gpf.st := (stGpf || g_stUpdate) && s2_valid && !af && !hasPf 465 resp(idx).bits.excp(nDups).gpf.instr := (instrGpf || g_instrUpdate) && s2_valid && !af && !hasPf 466 467 resp(idx).bits.excp(nDups).af.ld := af && TlbCmd.isRead(cmd) && fault_valid 468 resp(idx).bits.excp(nDups).af.st := af && TlbCmd.isWrite(cmd) && fault_valid 469 resp(idx).bits.excp(nDups).af.instr := af && TlbCmd.isExec(cmd) && fault_valid 470 471 resp(idx).bits.excp(nDups).vaNeedExt := true.B 472 } 473 474 resp(idx).bits.excp(nDups).isHyper := isHyperInst(idx) 475 } 476 477 def handle_nonblock(idx: Int): Unit = { 478 io.requestor(idx).resp.valid := req_out_v(idx) 479 io.requestor(idx).req.ready := io.requestor(idx).resp.ready // should always be true 480 XSError(!io.requestor(idx).resp.ready, s"${q.name} port ${idx} is non-block, resp.ready must be true.B") 481 482 val req_need_gpa = hasGpf(idx) 483 val req_s2xlate = Wire(UInt(2.W)) 484 req_s2xlate := MuxCase(noS2xlate, Seq( 485 (!(virt_out(idx) || req_out(idx).hyperinst)) -> noS2xlate, 486 (csr.vsatp.mode =/= 0.U && csr.hgatp.mode =/= 0.U) -> allStage, 487 (csr.vsatp.mode === 0.U) -> onlyStage2, 488 (csr.hgatp.mode === 0.U || req_need_gpa) -> onlyStage1 489 )) 490 491 val ptw_just_back = ptw.resp.fire && req_s2xlate === ptw.resp.bits.s2xlate && ptw.resp.bits.hit(get_pn(req_out(idx).vaddr), io.csr.satp.asid, io.csr.vsatp.asid, io.csr.hgatp.vmid, true, false) 492 // TODO: RegNext enable: ptw.resp.valid ? req.valid 493 val ptw_resp_bits_reg = RegEnable(ptw.resp.bits, ptw.resp.valid) 494 val ptw_already_back = GatedValidRegNext(ptw.resp.fire) && req_s2xlate === ptw_resp_bits_reg.s2xlate && ptw_resp_bits_reg.hit(get_pn(req_out(idx).vaddr), io.csr.satp.asid, io.csr.vsatp.asid, io.csr.hgatp.vmid, allType = true) 495 val ptw_getGpa = req_need_gpa && hitVec(idx) 496 val need_gpa_vpn_hit = need_gpa_vpn === get_pn(req_out(idx).vaddr) 497 498 io.ptw.req(idx).valid := false.B; 499 io.tlbreplay(idx) := false.B; 500 501 when (req_out_v(idx) && missVec(idx)) { 502 // NOTE: for an miss tlb request: either send a ptw request, or ask for a replay 503 when (ptw_just_back || ptw_already_back) { 504 io.tlbreplay(idx) := true.B; 505 } .elsewhen (need_gpa && !need_gpa_vpn_hit && !resp_gpa_refill) { 506 // not send any unrelated ptw request when l1tlb is in need_gpa state 507 io.tlbreplay(idx) := true.B; 508 } .otherwise { 509 io.ptw.req(idx).valid := true.B; 510 } 511 } 512 513 when (io.requestor(idx).req_kill && GatedValidRegNext(io.requestor(idx).req.fire)) { 514 io.ptw.req(idx).valid := false.B 515 io.tlbreplay(idx) := true.B 516 } 517 518 io.ptw.req(idx).bits.vpn := get_pn(req_out(idx).vaddr) 519 io.ptw.req(idx).bits.s2xlate := req_s2xlate 520 io.ptw.req(idx).bits.getGpa := ptw_getGpa 521 io.ptw.req(idx).bits.memidx := req_out(idx).memidx 522 } 523 524 def handle_block(idx: Int): Unit = { 525 // three valid: 1.if exist a entry; 2.if sent to ptw; 3.unset resp.valid 526 io.requestor(idx).req.ready := !req_out_v(idx) || io.requestor(idx).resp.fire 527 // req_out_v for if there is a request, may long latency, fixme 528 529 // miss request entries 530 val req_need_gpa = hasGpf(idx) 531 val miss_req_vpn = get_pn(req_out(idx).vaddr) 532 val miss_req_memidx = req_out(idx).memidx 533 val miss_req_s2xlate = Wire(UInt(2.W)) 534 miss_req_s2xlate := MuxCase(noS2xlate, Seq( 535 (!(virt_out(idx) || req_out(idx).hyperinst)) -> noS2xlate, 536 (csr.vsatp.mode =/= 0.U && csr.hgatp.mode =/= 0.U) -> allStage, 537 (csr.vsatp.mode === 0.U) -> onlyStage2, 538 (csr.hgatp.mode === 0.U || req_need_gpa) -> onlyStage1 539 )) 540 val miss_req_s2xlate_reg = RegEnable(miss_req_s2xlate, io.ptw.req(idx).fire) 541 val hasS2xlate = miss_req_s2xlate_reg =/= noS2xlate 542 val onlyS2 = miss_req_s2xlate_reg === onlyStage2 543 val hit_s1 = io.ptw.resp.bits.s1.hit(miss_req_vpn, Mux(hasS2xlate, io.csr.vsatp.asid, io.csr.satp.asid), io.csr.hgatp.vmid, allType = true, false, hasS2xlate) 544 val hit_s2 = io.ptw.resp.bits.s2.hit(miss_req_vpn, io.csr.hgatp.vmid) 545 val hit = Mux(onlyS2, hit_s2, hit_s1) && io.ptw.resp.valid && miss_req_s2xlate_reg === io.ptw.resp.bits.s2xlate 546 547 val new_coming_valid = WireInit(false.B) 548 new_coming_valid := req_in(idx).fire && !req_in(idx).bits.kill && !flush_pipe(idx) 549 val new_coming = GatedValidRegNext(new_coming_valid) 550 val miss_wire = new_coming && missVec(idx) 551 val miss_v = ValidHoldBypass(miss_wire, resp(idx).fire, flush_pipe(idx)) 552 val miss_req_v = ValidHoldBypass(miss_wire || (miss_v && flush_mmu && !mmu_flush_pipe), 553 io.ptw.req(idx).fire || resp(idx).fire, flush_pipe(idx)) 554 555 // when ptw resp, check if hit, reset miss_v, resp to lsu/ifu 556 resp(idx).valid := req_out_v(idx) && !(miss_v && portTranslateEnable(idx)) 557 when (io.ptw.resp.fire && hit && req_out_v(idx) && portTranslateEnable(idx)) { 558 val stage1 = io.ptw.resp.bits.s1 559 val stage2 = io.ptw.resp.bits.s2 560 val s2xlate = io.ptw.resp.bits.s2xlate 561 resp(idx).valid := true.B 562 resp(idx).bits.miss := false.B 563 val s1_paddr = Cat(stage1.genPPN(get_pn(req_out(idx).vaddr)), get_off(req_out(idx).vaddr)) 564 val s2_paddr = Cat(stage2.genPPNS2(get_pn(req_out(idx).vaddr)), get_off(req_out(idx).vaddr)) 565 for (d <- 0 until nRespDups) { 566 resp(idx).bits.paddr(d) := Mux(s2xlate =/= noS2xlate, s2_paddr, s1_paddr) 567 resp(idx).bits.gpaddr(d) := s1_paddr 568 pbmt_check(idx, d, io.ptw.resp.bits.s1.entry.pbmt, io.ptw.resp.bits.s2.entry.pbmt, s2xlate) 569 perm_check(stage1, req_out(idx).cmd, idx, d, stage2, req_out(idx).hlvx, s2xlate) 570 } 571 pmp_check(resp(idx).bits.paddr(0), req_out(idx).size, req_out(idx).cmd, false.B, idx) 572 573 // NOTE: the unfiltered req would be handled by Repeater 574 } 575 assert(RegNext(!resp(idx).valid || resp(idx).ready, true.B), "when tlb resp valid, ready should be true, must") 576 assert(RegNext(req_out_v(idx) || !(miss_v || miss_req_v), true.B), "when not req_out_v, should not set miss_v/miss_req_v") 577 578 val ptw_req = io.ptw.req(idx) 579 ptw_req.valid := miss_req_v 580 ptw_req.bits.vpn := miss_req_vpn 581 ptw_req.bits.s2xlate := miss_req_s2xlate 582 ptw_req.bits.getGpa := req_need_gpa && hitVec(idx) 583 ptw_req.bits.memidx := miss_req_memidx 584 585 io.tlbreplay(idx) := false.B 586 587 // NOTE: when flush pipe, tlb should abandon last req 588 // however, some outside modules like icache, dont care flushPipe, and still waiting for tlb resp 589 // just resp valid and raise page fault to go through. The pipe(ifu) will abandon it. 590 if (!q.outsideRecvFlush) { 591 when (req_out_v(idx) && flush_pipe(idx) && portTranslateEnable(idx)) { 592 resp(idx).valid := true.B 593 for (d <- 0 until nRespDups) { 594 resp(idx).bits.pbmt(d) := 0.U 595 resp(idx).bits.excp(d).pf.ld := true.B // sfence happened, pf for not to use this addr 596 resp(idx).bits.excp(d).pf.st := true.B 597 resp(idx).bits.excp(d).pf.instr := true.B 598 } 599 } 600 } 601 } 602 603 // when ptw resp, tlb at refill_idx maybe set to miss by force. 604 // Bypass ptw resp to check. 605 def ptw_resp_bypass(vpn: UInt, s2xlate: UInt) = { 606 // TODO: RegNext enable: ptw.resp.valid 607 val hasS2xlate = s2xlate =/= noS2xlate 608 val onlyS2 = s2xlate === onlyStage2 609 val onlyS1 = s2xlate === onlyStage1 610 val s2xlate_hit = s2xlate === ptw.resp.bits.s2xlate 611 val resp_hit = ptw.resp.bits.hit(vpn, io.csr.satp.asid, io.csr.vsatp.asid, io.csr.hgatp.vmid, true, false) 612 val p_hit = GatedValidRegNext(resp_hit && io.ptw.resp.fire && s2xlate_hit) 613 val ppn_s1 = ptw.resp.bits.s1.genPPN(vpn) 614 val gvpn = Mux(onlyS2, vpn, ppn_s1) 615 val ppn_s2 = ptw.resp.bits.s2.genPPNS2(gvpn) 616 val p_ppn = RegEnable(Mux(s2xlate === onlyStage2 || s2xlate === allStage, ppn_s2, ppn_s1), io.ptw.resp.fire) 617 val p_pbmt = RegEnable(ptw.resp.bits.s1.entry.pbmt,io.ptw.resp.fire) 618 val p_perm = RegEnable(ptwresp_to_tlbperm(ptw.resp.bits.s1), io.ptw.resp.fire) 619 val p_gvpn = RegEnable(Mux(onlyS2, ptw.resp.bits.s2.entry.tag, ptw.resp.bits.s1.genGVPN(vpn)), io.ptw.resp.fire) 620 val p_g_pbmt = RegEnable(ptw.resp.bits.s2.entry.pbmt,io.ptw.resp.fire) 621 val p_g_perm = RegEnable(hptwresp_to_tlbperm(ptw.resp.bits.s2), io.ptw.resp.fire) 622 val p_s2xlate = RegEnable(ptw.resp.bits.s2xlate, io.ptw.resp.fire) 623 val p_s1_level = RegEnable(ptw.resp.bits.s1.entry.level.get, io.ptw.resp.fire) 624 val p_s1_isLeaf = RegEnable(ptw.resp.bits.s1.isLeaf(), io.ptw.resp.fire) 625 val p_s1_isFakePte = RegEnable(ptw.resp.bits.s1.isFakePte(), io.ptw.resp.fire) 626 (p_hit, p_ppn, p_pbmt, p_perm, p_gvpn, p_g_pbmt, p_g_perm, p_s2xlate, p_s1_level, p_s1_isLeaf, p_s1_isFakePte) 627 } 628 629 // perf event 630 val result_ok = req_in.map(a => GatedValidRegNext(a.fire)) 631 val perfEvents = 632 Seq( 633 ("access", PopCount((0 until Width).map{i => if (Block(i)) io.requestor(i).req.fire else portTranslateEnable(i) && result_ok(i) })), 634 ("miss ", PopCount((0 until Width).map{i => if (Block(i)) portTranslateEnable(i) && result_ok(i) && missVec(i) else ptw.req(i).fire })), 635 ) 636 generatePerfEvent() 637 638 // perf log 639 for (i <- 0 until Width) { 640 if (Block(i)) { 641 XSPerfAccumulate(s"access${i}",result_ok(i) && portTranslateEnable(i)) 642 XSPerfAccumulate(s"miss${i}", result_ok(i) && missVec(i)) 643 } else { 644 XSPerfAccumulate("first_access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && RegEnable(req(i).bits.debug.isFirstIssue, req(i).valid)) 645 XSPerfAccumulate("access" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i)) 646 XSPerfAccumulate("first_miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i) && RegEnable(req(i).bits.debug.isFirstIssue, req(i).valid)) 647 XSPerfAccumulate("miss" + Integer.toString(i, 10), result_ok(i) && portTranslateEnable(i) && missVec(i)) 648 } 649 } 650 XSPerfAccumulate("ptw_resp_count", ptw.resp.fire) 651 XSPerfAccumulate("ptw_resp_pf_count", ptw.resp.fire && ptw.resp.bits.s1.pf) 652 653 // Log 654 for(i <- 0 until Width) { 655 XSDebug(req(i).valid, p"req(${i.U}): (${req(i).valid} ${req(i).ready}) ${req(i).bits}\n") 656 XSDebug(resp(i).valid, p"resp(${i.U}): (${resp(i).valid} ${resp(i).ready}) ${resp(i).bits}\n") 657 } 658 659 XSDebug(io.sfence.valid, p"Sfence: ${io.sfence}\n") 660 XSDebug(ParallelOR(req_out_v) || ptw.resp.valid, p"vmEnable:${vmEnable} hit:${Binary(VecInit(hitVec).asUInt)} miss:${Binary(VecInit(missVec).asUInt)}\n") 661 for (i <- ptw.req.indices) { 662 XSDebug(ptw.req(i).fire, p"L2TLB req:${ptw.req(i).bits}\n") 663 } 664 XSDebug(ptw.resp.valid, p"L2TLB resp:${ptw.resp.bits} (v:${ptw.resp.valid}r:${ptw.resp.ready}) \n") 665 666 println(s"${q.name}: page: ${q.NWays} ${q.Associative} ${q.Replacer.get}") 667 668 if (env.EnableDifftest) { 669 for (i <- 0 until Width) { 670 val pf = io.requestor(i).resp.bits.excp(0).pf.instr || io.requestor(i).resp.bits.excp(0).pf.st || io.requestor(i).resp.bits.excp(0).pf.ld 671 val gpf = io.requestor(i).resp.bits.excp(0).gpf.instr || io.requestor(i).resp.bits.excp(0).gpf.st || io.requestor(i).resp.bits.excp(0).gpf.ld 672 val af = io.requestor(i).resp.bits.excp(0).af.instr || io.requestor(i).resp.bits.excp(0).af.st || io.requestor(i).resp.bits.excp(0).af.ld 673 val difftest = DifftestModule(new DiffL1TLBEvent) 674 difftest.coreid := io.hartId 675 difftest.valid := RegNext(io.requestor(i).req.fire) && !io.requestor(i).req_kill && io.requestor(i).resp.fire && !io.requestor(i).resp.bits.miss && !pf && !af && !gpf && portTranslateEnable(i) 676 if (!Seq("itlb", "ldtlb", "sttlb").contains(q.name)) { 677 difftest.valid := false.B 678 } 679 difftest.index := TLBDiffId(p(XSCoreParamsKey).HartId).U 680 difftest.vpn := RegEnable(get_pn(req_in(i).bits.vaddr), req_in(i).valid) 681 difftest.ppn := get_pn(io.requestor(i).resp.bits.paddr(0)) 682 difftest.satp := Cat(io.csr.satp.mode, io.csr.satp.asid, io.csr.satp.ppn) 683 difftest.vsatp := Cat(io.csr.vsatp.mode, io.csr.vsatp.asid, io.csr.vsatp.ppn) 684 difftest.hgatp := Cat(io.csr.hgatp.mode, io.csr.hgatp.vmid, io.csr.hgatp.ppn) 685 val req_need_gpa = gpf 686 val req_s2xlate = Wire(UInt(2.W)) 687 req_s2xlate := MuxCase(noS2xlate, Seq( 688 (!RegNext(virt_in || req_in(i).bits.hyperinst)) -> noS2xlate, 689 (vsatp.mode =/= 0.U && hgatp.mode =/= 0.U) -> allStage, 690 (vsatp.mode === 0.U) -> onlyStage2, 691 (hgatp.mode === 0.U || req_need_gpa) -> onlyStage1 692 )) 693 difftest.s2xlate := req_s2xlate 694 } 695 } 696} 697 698object TLBDiffId { 699 var i: Int = 0 700 var lastHartId: Int = -1 701 def apply(hartId: Int): Int = { 702 if (lastHartId != hartId) { 703 i = 0 704 lastHartId = hartId 705 } 706 i += 1 707 i - 1 708 } 709} 710 711class TLBNonBlock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(false), q) 712class TLBBLock(Width: Int, nRespDups: Int = 1, q: TLBParameters)(implicit p: Parameters) extends TLB(Width, nRespDups, Seq.fill(Width)(true), q) 713 714class TlbReplace(Width: Int, q: TLBParameters)(implicit p: Parameters) extends TlbModule { 715 val io = IO(new TlbReplaceIO(Width, q)) 716 717 if (q.Associative == "fa") { 718 val re = ReplacementPolicy.fromString(q.Replacer, q.NWays) 719 re.access(io.page.access.map(_.touch_ways)) 720 io.page.refillIdx := re.way 721 } else { // set-acco && plru 722 val re = ReplacementPolicy.fromString(q.Replacer, q.NSets, q.NWays) 723 re.access(io.page.access.map(_.sets), io.page.access.map(_.touch_ways)) 724 io.page.refillIdx := { if (q.NWays == 1) 0.U else re.way(io.page.chosen_set) } 725 } 726} 727