1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.experimental.ExtModule 22import chisel3.util._ 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import utility._ 27import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp} 28import freechips.rocketchip.tilelink._ 29import xiangshan.backend.fu.{PMP, PMPChecker, PMPReqBundle, PMPRespBundle} 30import xiangshan.backend.fu.util.HasCSRConst 31import difftest._ 32 33class L2TLB()(implicit p: Parameters) extends LazyModule with HasPtwConst { 34 override def shouldBeInlined: Boolean = false 35 36 val node = TLClientNode(Seq(TLMasterPortParameters.v1( 37 clients = Seq(TLMasterParameters.v1( 38 "ptw", 39 sourceId = IdRange(0, MemReqWidth) 40 )), 41 requestFields = Seq(ReqSourceField()) 42 ))) 43 44 lazy val module = new L2TLBImp(this) 45} 46 47class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) with HasCSRConst with HasPerfEvents { 48 49 val (mem, edge) = outer.node.out.head 50 51 val io = IO(new L2TLBIO) 52 val difftestIO = IO(new Bundle() { 53 val ptwResp = Output(Bool()) 54 val ptwAddr = Output(UInt(64.W)) 55 val ptwData = Output(Vec(4, UInt(64.W))) 56 }) 57 58 /* Ptw processes multiple requests 59 * Divide Ptw procedure into two stages: cache access ; mem access if cache miss 60 * miss queue itlb dtlb 61 * | | | 62 * ------arbiter------ 63 * | 64 * l1 - l2 - l3 - sp 65 * | 66 * ------------------------------------------- 67 * miss | queue | hit 68 * [][][][][][] | 69 * | | 70 * state machine accessing mem | 71 * | | 72 * ---------------arbiter--------------------- 73 * | | 74 * itlb dtlb 75 */ 76 77 difftestIO <> DontCare 78 79 val sfence_tmp = DelayN(io.sfence, 1) 80 val csr_tmp = DelayN(io.csr.tlb, 1) 81 val sfence_dup = Seq.fill(9)(RegNext(sfence_tmp)) 82 val csr_dup = Seq.fill(8)(RegNext(csr_tmp)) // TODO: add csr_modified? 83 val satp = csr_dup(0).satp 84 val vsatp = csr_dup(0).vsatp 85 val hgatp = csr_dup(0).hgatp 86 val priv = csr_dup(0).priv 87 val flush = sfence_dup(0).valid || satp.changed || vsatp.changed || hgatp.changed 88 89 val pmp = Module(new PMP()) 90 val pmp_check = VecInit(Seq.fill(3)(Module(new PMPChecker(lgMaxSize = 3, sameCycle = true)).io)) 91 pmp.io.distribute_csr := io.csr.distribute_csr 92 pmp_check.foreach(_.check_env.apply(ModeS, pmp.io.pmp, pmp.io.pma)) 93 94 val missQueue = Module(new L2TlbMissQueue) 95 val cache = Module(new PtwCache) 96 val ptw = Module(new PTW) 97 val hptw = Module(new HPTW) 98 val llptw = Module(new LLPTW) 99 val blockmq = Module(new BlockHelper(3)) 100 val arb1 = Module(new Arbiter(new PtwReq, PtwWidth)) 101 val arb2 = Module(new Arbiter(new L2TlbWithHptwIdBundle, ((if (l2tlbParams.enablePrefetch) 4 else 3) + (if(HasHExtension) 1 else 0)))) 102 val hptw_req_arb = Module(new Arbiter(new Bundle { 103 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 104 val source = UInt(bSourceWidth.W) 105 val gvpn = UInt(vpnLen.W) 106 }, 2)) 107 val hptw_resp_arb = Module(new Arbiter(new Bundle { 108 val resp = new HptwResp() 109 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 110 }, 2)) 111 val outArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 112 val s2xlate = UInt(2.W) 113 val s1 = new PtwSectorResp () 114 val s2 = new HptwResp() 115 }, 1)).io) 116 val mergeArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 117 val s2xlate = UInt(2.W) 118 val s1 = new PtwMergeResp() 119 val s2 = new HptwResp() 120 }, 3)).io) 121 val outArbCachePort = 0 122 val outArbFsmPort = 1 123 val outArbMqPort = 2 124 125 // hptw arb input port 126 val InHptwArbPTWPort = 0 127 val InHptwArbLLPTWPort = 1 128 hptw_req_arb.io.in(InHptwArbPTWPort).valid := ptw.io.hptw.req.valid 129 hptw_req_arb.io.in(InHptwArbPTWPort).bits.gvpn := ptw.io.hptw.req.bits.gvpn 130 hptw_req_arb.io.in(InHptwArbPTWPort).bits.id := ptw.io.hptw.req.bits.id 131 hptw_req_arb.io.in(InHptwArbPTWPort).bits.source := ptw.io.hptw.req.bits.source 132 ptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbPTWPort).ready 133 134 hptw_req_arb.io.in(InHptwArbLLPTWPort).valid := llptw.io.hptw.req.valid 135 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.gvpn := llptw.io.hptw.req.bits.gvpn 136 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.id := llptw.io.hptw.req.bits.id 137 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.source := llptw.io.hptw.req.bits.source 138 llptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbLLPTWPort).ready 139 140 // arb2 input port 141 val InArbHPTWPort = 0 142 val InArbPTWPort = 1 143 val InArbMissQueuePort = 2 144 val InArbTlbPort = 3 145 val InArbPrefetchPort = 4 146 // NOTE: when cache out but miss and ptw doesnt accept, 147 arb1.io.in <> VecInit(io.tlb.map(_.req(0))) 148 149 150 arb2.io.in(InArbPTWPort).valid := ptw.io.llptw.valid 151 arb2.io.in(InArbPTWPort).bits.req_info := ptw.io.llptw.bits.req_info 152 arb2.io.in(InArbPTWPort).bits.isHptwReq := false.B 153 arb2.io.in(InArbPTWPort).bits.isLLptw := false.B 154 arb2.io.in(InArbPTWPort).bits.hptwId := DontCare 155 ptw.io.llptw.ready := arb2.io.in(InArbPTWPort).ready 156 block_decoupled(missQueue.io.out, arb2.io.in(InArbMissQueuePort), Mux(missQueue.io.out.bits.isLLptw, !llptw.io.in.ready, !ptw.io.req.ready)) 157 158 arb2.io.in(InArbTlbPort).valid := arb1.io.out.valid 159 arb2.io.in(InArbTlbPort).bits.req_info.vpn := arb1.io.out.bits.vpn 160 arb2.io.in(InArbTlbPort).bits.req_info.s2xlate := arb1.io.out.bits.s2xlate 161 arb2.io.in(InArbTlbPort).bits.req_info.source := arb1.io.chosen 162 arb2.io.in(InArbTlbPort).bits.isHptwReq := false.B 163 arb2.io.in(InArbTlbPort).bits.isLLptw := false.B 164 arb2.io.in(InArbTlbPort).bits.hptwId := DontCare 165 arb1.io.out.ready := arb2.io.in(InArbTlbPort).ready 166 167 arb2.io.in(InArbHPTWPort).valid := hptw_req_arb.io.out.valid 168 arb2.io.in(InArbHPTWPort).bits.req_info.vpn := hptw_req_arb.io.out.bits.gvpn 169 arb2.io.in(InArbHPTWPort).bits.req_info.s2xlate := onlyStage2 170 arb2.io.in(InArbHPTWPort).bits.req_info.source := hptw_req_arb.io.out.bits.source 171 arb2.io.in(InArbHPTWPort).bits.isHptwReq := true.B 172 arb2.io.in(InArbHPTWPort).bits.isLLptw := false.B 173 arb2.io.in(InArbHPTWPort).bits.hptwId := hptw_req_arb.io.out.bits.id 174 hptw_req_arb.io.out.ready := arb2.io.in(InArbHPTWPort).ready 175 val hartId = p(XSCoreParamsKey).HartId 176 if (l2tlbParams.enablePrefetch) { 177 val prefetch = Module(new L2TlbPrefetch()) 178 val recv = cache.io.resp 179 // NOTE: 1. prefetch doesn't gen prefetch 2. req from mq doesn't gen prefetch 180 // NOTE: 1. miss req gen prefetch 2. hit but prefetched gen prefetch 181 prefetch.io.in.valid := recv.fire && !from_pre(recv.bits.req_info.source) && (!recv.bits.hit || 182 recv.bits.prefetch) && recv.bits.isFirst 183 prefetch.io.in.bits.vpn := recv.bits.req_info.vpn 184 prefetch.io.sfence := sfence_dup(0) 185 prefetch.io.csr := csr_dup(0) 186 arb2.io.in(InArbPrefetchPort) <> prefetch.io.out 187 188 val isWriteL2TlbPrefetchTable = Constantin.createRecord(s"isWriteL2TlbPrefetchTable$hartId") 189 val L2TlbPrefetchTable = ChiselDB.createTable(s"L2TlbPrefetch_hart$hartId", new L2TlbPrefetchDB) 190 val L2TlbPrefetchDB = Wire(new L2TlbPrefetchDB) 191 L2TlbPrefetchDB.vpn := prefetch.io.out.bits.req_info.vpn 192 L2TlbPrefetchTable.log(L2TlbPrefetchDB, isWriteL2TlbPrefetchTable.orR && prefetch.io.out.fire, "L2TlbPrefetch", clock, reset) 193 } 194 arb2.io.out.ready := cache.io.req.ready 195 196 197 val mq_arb = Module(new Arbiter(new L2TlbWithHptwIdBundle, 2)) 198 mq_arb.io.in(0).valid := cache.io.resp.valid && !cache.io.resp.bits.hit && 199 !from_pre(cache.io.resp.bits.req_info.source) && !cache.io.resp.bits.isHptwReq && // hptw reqs are not sent to missqueue 200 (cache.io.resp.bits.bypassed || ( 201 ((!cache.io.resp.bits.toFsm.l1Hit || cache.io.resp.bits.toFsm.stage1Hit) && !cache.io.resp.bits.isHptwReq && (cache.io.resp.bits.isFirst || !ptw.io.req.ready)) // send to ptw, is first or ptw is busy; 202 || (cache.io.resp.bits.toFsm.l1Hit && !llptw.io.in.ready) // send to llptw, llptw is full 203 )) 204 205 mq_arb.io.in(0).bits.req_info := cache.io.resp.bits.req_info 206 mq_arb.io.in(0).bits.isHptwReq := false.B 207 mq_arb.io.in(0).bits.hptwId := DontCare 208 mq_arb.io.in(0).bits.isLLptw := cache.io.resp.bits.toFsm.l1Hit 209 mq_arb.io.in(1).bits.req_info := llptw.io.cache.bits 210 mq_arb.io.in(1).bits.isHptwReq := false.B 211 mq_arb.io.in(1).bits.hptwId := DontCare 212 mq_arb.io.in(1).bits.isLLptw := false.B 213 mq_arb.io.in(1).valid := llptw.io.cache.valid 214 llptw.io.cache.ready := mq_arb.io.in(1).ready 215 missQueue.io.in <> mq_arb.io.out 216 missQueue.io.sfence := sfence_dup(6) 217 missQueue.io.csr := csr_dup(5) 218 219 blockmq.io.start := missQueue.io.out.fire 220 blockmq.io.enable := ptw.io.req.fire 221 222 llptw.io.in.valid := cache.io.resp.valid && 223 !cache.io.resp.bits.hit && 224 cache.io.resp.bits.toFsm.l1Hit && 225 !cache.io.resp.bits.bypassed && 226 !cache.io.resp.bits.isHptwReq 227 llptw.io.in.bits.req_info := cache.io.resp.bits.req_info 228 llptw.io.in.bits.ppn := cache.io.resp.bits.toFsm.ppn 229 llptw.io.sfence := sfence_dup(1) 230 llptw.io.csr := csr_dup(1) 231 val llptw_stage1 = Reg(Vec(l2tlbParams.llptwsize, new PtwMergeResp())) 232 when(llptw.io.in.fire){ 233 llptw_stage1(llptw.io.mem.enq_ptr) := cache.io.resp.bits.stage1 234 } 235 236 cache.io.req.valid := arb2.io.out.valid 237 cache.io.req.bits.req_info := arb2.io.out.bits.req_info 238 cache.io.req.bits.isFirst := (arb2.io.chosen =/= InArbMissQueuePort.U && !arb2.io.out.bits.isHptwReq) 239 cache.io.req.bits.isHptwReq := arb2.io.out.bits.isHptwReq 240 cache.io.req.bits.hptwId := arb2.io.out.bits.hptwId 241 cache.io.req.bits.bypassed.map(_ := false.B) 242 cache.io.sfence := sfence_dup(2) 243 cache.io.csr := csr_dup(2) 244 cache.io.sfence_dup.zip(sfence_dup.drop(2).take(4)).map(s => s._1 := s._2) 245 cache.io.csr_dup.zip(csr_dup.drop(2).take(3)).map(c => c._1 := c._2) 246 cache.io.resp.ready := MuxCase(mq_arb.io.in(0).ready || ptw.io.req.ready, Seq( 247 (!cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw.io.req.ready, 248 (cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw_resp_arb.io.in(HptwRespArbCachePort).ready, 249 cache.io.resp.bits.hit -> outReady(cache.io.resp.bits.req_info.source, outArbCachePort), 250 (cache.io.resp.bits.toFsm.l1Hit && !cache.io.resp.bits.bypassed && llptw.io.in.ready) -> llptw.io.in.ready, 251 (cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst) -> mq_arb.io.in(0).ready 252 )) 253 254 // NOTE: missQueue req has higher priority 255 ptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.toFsm.l1Hit && 256 !cache.io.resp.bits.bypassed && 257 !cache.io.resp.bits.isFirst && 258 !cache.io.resp.bits.isHptwReq 259 ptw.io.req.bits.req_info := cache.io.resp.bits.req_info 260 if (EnableSv48) { 261 ptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toFsm.l3Hit.get 262 } 263 ptw.io.req.bits.l2Hit := cache.io.resp.bits.toFsm.l2Hit 264 ptw.io.req.bits.ppn := cache.io.resp.bits.toFsm.ppn 265 ptw.io.req.bits.stage1Hit := cache.io.resp.bits.toFsm.stage1Hit 266 ptw.io.req.bits.stage1 := cache.io.resp.bits.stage1 267 ptw.io.sfence := sfence_dup(7) 268 ptw.io.csr := csr_dup(6) 269 ptw.io.resp.ready := outReady(ptw.io.resp.bits.source, outArbFsmPort) 270 271 hptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq 272 hptw.io.req.bits.gvpn := cache.io.resp.bits.req_info.vpn 273 hptw.io.req.bits.id := cache.io.resp.bits.toHptw.id 274 hptw.io.req.bits.source := cache.io.resp.bits.req_info.source 275 if (EnableSv48) { 276 hptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toHptw.l3Hit.get 277 } 278 hptw.io.req.bits.l2Hit := cache.io.resp.bits.toHptw.l2Hit 279 hptw.io.req.bits.l1Hit := cache.io.resp.bits.toHptw.l1Hit 280 hptw.io.req.bits.ppn := cache.io.resp.bits.toHptw.ppn 281 hptw.io.req.bits.bypassed := cache.io.resp.bits.toHptw.bypassed 282 hptw.io.sfence := sfence_dup(8) 283 hptw.io.csr := csr_dup(7) 284 // mem req 285 def blockBytes_align(addr: UInt) = { 286 Cat(addr(PAddrBits - 1, log2Up(l2tlbParams.blockBytes)), 0.U(log2Up(l2tlbParams.blockBytes).W)) 287 } 288 def addr_low_from_vpn(vpn: UInt) = { 289 vpn(log2Ceil(l2tlbParams.blockBytes)-log2Ceil(XLEN/8)-1, 0) 290 } 291 def addr_low_from_paddr(paddr: UInt) = { 292 paddr(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 293 } 294 def from_llptw(id: UInt) = { 295 id < l2tlbParams.llptwsize.U 296 } 297 def from_ptw(id: UInt) = { 298 id === l2tlbParams.llptwsize.U 299 } 300 def from_hptw(id: UInt) = { 301 id === l2tlbParams.llptwsize.U + 1.U 302 } 303 val waiting_resp = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 304 val flush_latch = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 305 val hptw_bypassed = RegInit(false.B) 306 for (i <- waiting_resp.indices) { 307 assert(!flush_latch(i) || waiting_resp(i)) // when sfence_latch wait for mem resp, waiting_resp should be true 308 } 309 310 val llptw_out = llptw.io.out 311 val llptw_mem = llptw.io.mem 312 llptw_mem.req_mask := waiting_resp.take(l2tlbParams.llptwsize) 313 ptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize) 314 hptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize + 1) 315 316 val mem_arb = Module(new Arbiter(new L2TlbMemReqBundle(), 3)) 317 mem_arb.io.in(0) <> ptw.io.mem.req 318 mem_arb.io.in(1) <> llptw_mem.req 319 mem_arb.io.in(2) <> hptw.io.mem.req 320 mem_arb.io.out.ready := mem.a.ready && !flush 321 322 // assert, should not send mem access at same addr for twice. 323 val last_resp_vpn = RegEnable(cache.io.refill.bits.req_info_dup(0).vpn, cache.io.refill.valid) 324 val last_resp_s2xlate = RegEnable(cache.io.refill.bits.req_info_dup(0).s2xlate, cache.io.refill.valid) 325 val last_resp_level = RegEnable(cache.io.refill.bits.level_dup(0), cache.io.refill.valid) 326 val last_resp_v = RegInit(false.B) 327 val last_has_invalid = !Cat(cache.io.refill.bits.ptes.asTypeOf(Vec(blockBits/XLEN, UInt(XLEN.W))).map(a => a(0))).andR || cache.io.refill.bits.sel_pte_dup(0).asTypeOf(new PteBundle).isAf() 328 when (cache.io.refill.valid) { last_resp_v := !last_has_invalid} 329 when (flush) { last_resp_v := false.B } 330 XSError(last_resp_v && cache.io.refill.valid && 331 (cache.io.refill.bits.req_info_dup(0).vpn === last_resp_vpn) && 332 (cache.io.refill.bits.level_dup(0) === last_resp_level) && 333 (cache.io.refill.bits.req_info_dup(0).s2xlate === last_resp_s2xlate), 334 "l2tlb should not access mem at same addr for twice") 335 // ATTENTION: this may wrongly assert when: a ptes is l2, last part is valid, 336 // but the current part is invalid, so one more mem access happened 337 // If this happened, remove the assert. 338 339 val req_addr_low = Reg(Vec(MemReqWidth, UInt((log2Up(l2tlbParams.blockBytes)-log2Up(XLEN/8)).W))) 340 341 when (llptw.io.in.fire) { 342 // when enq miss queue, set the req_addr_low to receive the mem resp data part 343 req_addr_low(llptw_mem.enq_ptr) := addr_low_from_vpn(llptw.io.in.bits.req_info.vpn) 344 } 345 when (mem_arb.io.out.fire) { 346 req_addr_low(mem_arb.io.out.bits.id) := addr_low_from_paddr(mem_arb.io.out.bits.addr) 347 waiting_resp(mem_arb.io.out.bits.id) := true.B 348 hptw_bypassed := from_hptw(mem_arb.io.out.bits.id) && mem_arb.io.out.bits.hptw_bypassed 349 } 350 // mem read 351 val memRead = edge.Get( 352 fromSource = mem_arb.io.out.bits.id, 353 // toAddress = memAddr(log2Up(CacheLineSize / 2 / 8) - 1, 0), 354 toAddress = blockBytes_align(mem_arb.io.out.bits.addr), 355 lgSize = log2Up(l2tlbParams.blockBytes).U 356 )._2 357 mem.a.bits := memRead 358 mem.a.valid := mem_arb.io.out.valid && !flush 359 mem.a.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.PTW.id.U) 360 mem.d.ready := true.B 361 // mem -> data buffer 362 val refill_data = Reg(Vec(blockBits / l1BusDataWidth, UInt(l1BusDataWidth.W))) 363 val refill_helper = edge.firstlastHelper(mem.d.bits, mem.d.fire) 364 val mem_resp_done = refill_helper._3 365 val mem_resp_from_llptw = from_llptw(mem.d.bits.source) 366 val mem_resp_from_ptw = from_ptw(mem.d.bits.source) 367 val mem_resp_from_hptw = from_hptw(mem.d.bits.source) 368 when (mem.d.valid) { 369 assert(mem.d.bits.source < MemReqWidth.U) 370 refill_data(refill_helper._4) := mem.d.bits.data 371 } 372 // refill_data_tmp is the wire fork of refill_data, but one cycle earlier 373 val refill_data_tmp = WireInit(refill_data) 374 refill_data_tmp(refill_helper._4) := mem.d.bits.data 375 376 // save only one pte for each id 377 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 378 val resp_pte = VecInit((0 until MemReqWidth).map(i => 379 if (i == l2tlbParams.llptwsize + 1) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), mem_resp_done && mem_resp_from_hptw) } 380 else if (i == l2tlbParams.llptwsize) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), mem_resp_done && mem_resp_from_ptw) } 381 else { DataHoldBypass(get_part(refill_data, req_addr_low(i)), llptw_mem.buffer_it(i)) } 382 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 383 )) 384 385 // save eight ptes for each id when sector tlb 386 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 387 val resp_pte_sector = VecInit((0 until MemReqWidth).map(i => 388 if (i == l2tlbParams.llptwsize + 1) {RegEnable(refill_data_tmp, mem_resp_done && mem_resp_from_hptw) } 389 else if (i == l2tlbParams.llptwsize) {RegEnable(refill_data_tmp, mem_resp_done && mem_resp_from_ptw) } 390 else { DataHoldBypass(refill_data, llptw_mem.buffer_it(i)) } 391 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 392 )) 393 394 // mem -> llptw 395 llptw_mem.resp.valid := mem_resp_done && mem_resp_from_llptw 396 llptw_mem.resp.bits.id := DataHoldBypass(mem.d.bits.source, mem.d.valid) 397 llptw_mem.resp.bits.value := DataHoldBypass(refill_data_tmp.asUInt, mem.d.valid) 398 // mem -> ptw 399 ptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_ptw 400 ptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize) 401 // mem -> hptw 402 hptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_hptw 403 hptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize + 1) 404 // mem -> cache 405 val refill_from_llptw = mem_resp_from_llptw 406 val refill_from_ptw = mem_resp_from_ptw 407 val refill_from_hptw = mem_resp_from_hptw 408 val refill_level = Mux(refill_from_llptw, 0.U, Mux(refill_from_ptw, RegEnable(ptw.io.refill.level, 0.U, ptw.io.mem.req.fire), RegEnable(hptw.io.refill.level, 0.U, hptw.io.mem.req.fire))) 409 val refill_valid = mem_resp_done && !flush && !flush_latch(mem.d.bits.source) && !hptw_bypassed 410 411 cache.io.refill.valid := GatedValidRegNext(refill_valid, false.B) 412 cache.io.refill.bits.ptes := refill_data.asUInt 413 cache.io.refill.bits.req_info_dup.map(_ := RegEnable(Mux(refill_from_llptw, llptw_mem.refill, Mux(refill_from_ptw, ptw.io.refill.req_info, hptw.io.refill.req_info)), refill_valid)) 414 cache.io.refill.bits.level_dup.map(_ := RegEnable(refill_level, refill_valid)) 415 cache.io.refill.bits.levelOH(refill_level, refill_valid) 416 cache.io.refill.bits.sel_pte_dup.map(_ := RegEnable(sel_data(refill_data_tmp.asUInt, req_addr_low(mem.d.bits.source)), refill_valid)) 417 418 if (env.EnableDifftest) { 419 val difftest_ptw_addr = RegInit(VecInit(Seq.fill(MemReqWidth)(0.U(PAddrBits.W)))) 420 when (mem.a.valid) { 421 difftest_ptw_addr(mem.a.bits.source) := mem.a.bits.address 422 } 423 424 val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) 425 difftest.coreid := io.hartId 426 difftest.index := 2.U 427 difftest.valid := cache.io.refill.valid 428 difftest.addr := difftest_ptw_addr(RegEnable(mem.d.bits.source, mem.d.valid)) 429 difftest.data := refill_data.asTypeOf(difftest.data) 430 difftest.idtfr := DontCare 431 } 432 433 if (env.EnableDifftest) { 434 for (i <- 0 until PtwWidth) { 435 val difftest = DifftestModule(new DiffL2TLBEvent) 436 difftest.coreid := io.hartId 437 difftest.valid := io.tlb(i).resp.fire && !io.tlb(i).resp.bits.s1.af && !io.tlb(i).resp.bits.s2.gaf 438 difftest.index := i.U 439 difftest.vpn := Cat(io.tlb(i).resp.bits.s1.entry.tag, 0.U(sectortlbwidth.W)) 440 for (j <- 0 until tlbcontiguous) { 441 difftest.ppn(j) := Cat(io.tlb(i).resp.bits.s1.entry.ppn, io.tlb(i).resp.bits.s1.ppn_low(j)) 442 difftest.valididx(j) := io.tlb(i).resp.bits.s1.valididx(j) 443 difftest.pteidx(j) := io.tlb(i).resp.bits.s1.pteidx(j) 444 } 445 difftest.perm := io.tlb(i).resp.bits.s1.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 446 difftest.level := io.tlb(i).resp.bits.s1.entry.level.getOrElse(0.U.asUInt) 447 difftest.pf := io.tlb(i).resp.bits.s1.pf 448 difftest.satp := Cat(io.csr.tlb.satp.mode, io.csr.tlb.satp.asid, io.csr.tlb.satp.ppn) 449 difftest.vsatp := Cat(io.csr.tlb.vsatp.mode, io.csr.tlb.vsatp.asid, io.csr.tlb.vsatp.ppn) 450 difftest.hgatp := Cat(io.csr.tlb.hgatp.mode, io.csr.tlb.hgatp.asid, io.csr.tlb.hgatp.ppn) 451 difftest.gvpn := io.tlb(i).resp.bits.s2.entry.tag 452 difftest.g_perm := io.tlb(i).resp.bits.s2.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 453 difftest.g_level := io.tlb(i).resp.bits.s2.entry.level.getOrElse(0.U.asUInt) 454 difftest.s2ppn := io.tlb(i).resp.bits.s2.entry.ppn 455 difftest.gpf := io.tlb(i).resp.bits.s2.gpf 456 difftest.s2xlate := io.tlb(i).resp.bits.s2xlate 457 } 458 } 459 460 // pmp 461 pmp_check(0).req <> ptw.io.pmp.req 462 ptw.io.pmp.resp <> pmp_check(0).resp 463 pmp_check(1).req <> llptw.io.pmp.req 464 llptw.io.pmp.resp <> pmp_check(1).resp 465 pmp_check(2).req <> hptw.io.pmp.req 466 hptw.io.pmp.resp <> pmp_check(2).resp 467 468 llptw_out.ready := outReady(llptw_out.bits.req_info.source, outArbMqPort) 469 470 // hptw and page cache -> ptw and llptw 471 val HptwRespArbCachePort = 0 472 val HptwRespArbHptw = 1 473 hptw_resp_arb.io.in(HptwRespArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq 474 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.id := cache.io.resp.bits.toHptw.id 475 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.resp := cache.io.resp.bits.toHptw.resp 476 hptw_resp_arb.io.in(HptwRespArbHptw).valid := hptw.io.resp.valid 477 hptw_resp_arb.io.in(HptwRespArbHptw).bits.id := hptw.io.resp.bits.id 478 hptw_resp_arb.io.in(HptwRespArbHptw).bits.resp := hptw.io.resp.bits.resp 479 hptw.io.resp.ready := hptw_resp_arb.io.in(HptwRespArbHptw).ready 480 481 ptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id === FsmReqID.U 482 ptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 483 llptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id =/= FsmReqID.U 484 llptw.io.hptw.resp.bits.id := hptw_resp_arb.io.out.bits.id 485 llptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 486 hptw_resp_arb.io.out.ready := true.B 487 488 // Timing: Maybe need to do some optimization or even add one more cycle 489 for (i <- 0 until PtwWidth) { 490 mergeArb(i).in(outArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.req_info.source===i.U && !cache.io.resp.bits.isHptwReq 491 mergeArb(i).in(outArbCachePort).bits.s2xlate := cache.io.resp.bits.req_info.s2xlate 492 mergeArb(i).in(outArbCachePort).bits.s1 := cache.io.resp.bits.stage1 493 mergeArb(i).in(outArbCachePort).bits.s2 := cache.io.resp.bits.toHptw.resp 494 mergeArb(i).in(outArbFsmPort).valid := ptw.io.resp.valid && ptw.io.resp.bits.source===i.U 495 mergeArb(i).in(outArbFsmPort).bits.s2xlate := ptw.io.resp.bits.s2xlate 496 mergeArb(i).in(outArbFsmPort).bits.s1 := ptw.io.resp.bits.resp 497 mergeArb(i).in(outArbFsmPort).bits.s2 := ptw.io.resp.bits.h_resp 498 mergeArb(i).in(outArbMqPort).valid := llptw_out.valid && llptw_out.bits.req_info.source===i.U 499 mergeArb(i).in(outArbMqPort).bits.s2xlate := llptw_out.bits.req_info.s2xlate 500 mergeArb(i).in(outArbMqPort).bits.s1 := Mux(llptw_out.bits.first_s2xlate_fault, llptw_stage1(llptw_out.bits.id), contiguous_pte_to_merge_ptwResp(resp_pte_sector(llptw_out.bits.id).asUInt, llptw_out.bits.req_info.vpn, llptw_out.bits.af, true, s2xlate = llptw_out.bits.req_info.s2xlate)) 501 mergeArb(i).in(outArbMqPort).bits.s2 := llptw_out.bits.h_resp 502 mergeArb(i).out.ready := outArb(i).in(0).ready 503 } 504 505 for (i <- 0 until PtwWidth) { 506 outArb(i).in(0).valid := mergeArb(i).out.valid 507 outArb(i).in(0).bits.s2xlate := mergeArb(i).out.bits.s2xlate 508 outArb(i).in(0).bits.s1 := merge_ptwResp_to_sector_ptwResp(mergeArb(i).out.bits.s1) 509 outArb(i).in(0).bits.s2 := mergeArb(i).out.bits.s2 510 } 511 512 // io.tlb.map(_.resp) <> outArb.map(_.out) 513 io.tlb.map(_.resp).zip(outArb.map(_.out)).map{ 514 case (resp, out) => resp <> out 515 } 516 517 // sfence 518 when (flush) { 519 for (i <- 0 until MemReqWidth) { 520 when (waiting_resp(i)) { 521 flush_latch(i) := true.B 522 } 523 } 524 } 525 // mem -> control signal 526 // waiting_resp and sfence_latch will be reset when mem_resp_done 527 when (mem_resp_done) { 528 waiting_resp(mem.d.bits.source) := false.B 529 flush_latch(mem.d.bits.source) := false.B 530 } 531 532 def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = { 533 sink.valid := source.valid && !block_signal 534 source.ready := sink.ready && !block_signal 535 sink.bits := source.bits 536 } 537 538 def get_part(data: Vec[UInt], index: UInt): UInt = { 539 val inner_data = data.asTypeOf(Vec(data.getWidth / XLEN, UInt(XLEN.W))) 540 inner_data(index) 541 } 542 543 // not_super means that this is a normal page 544 // valididx(i) will be all true when super page to be convenient for l1 tlb matching 545 def contiguous_pte_to_merge_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean, not_super: Boolean = true, s2xlate: UInt) : PtwMergeResp = { 546 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 547 val ptw_merge_resp = Wire(new PtwMergeResp()) 548 val hasS2xlate = s2xlate =/= noS2xlate 549 for (i <- 0 until tlbcontiguous) { 550 val pte_in = pte(64 * i + 63, 64 * i).asTypeOf(new PteBundle()) 551 val ptw_resp = Wire(new PtwMergeEntry(tagLen = sectorvpnLen, hasPerm = true, hasLevel = true)) 552 ptw_resp.ppn := pte_in.ppn(ppnLen - 1, sectortlbwidth) 553 ptw_resp.ppn_low := pte_in.ppn(sectortlbwidth - 1, 0) 554 ptw_resp.level.map(_ := 0.U) 555 ptw_resp.perm.map(_ := pte_in.getPerm()) 556 ptw_resp.tag := vpn(vpnLen - 1, sectortlbwidth) 557 ptw_resp.pf := (if (af_first) !af else true.B) && (pte_in.isPf(0.U) || !pte_in.isLeaf()) 558 ptw_resp.af := (if (!af_first) pte_in.isPf(0.U) else true.B) && (af || Mux(s2xlate === allStage, false.B, pte_in.isAf())) 559 ptw_resp.v := !ptw_resp.pf 560 ptw_resp.prefetch := DontCare 561 ptw_resp.asid := Mux(hasS2xlate, vsatp.asid, satp.asid) 562 ptw_resp.vmid.map(_ := hgatp.asid) 563 ptw_merge_resp.entry(i) := ptw_resp 564 } 565 ptw_merge_resp.pteidx := UIntToOH(vpn(sectortlbwidth - 1, 0)).asBools 566 ptw_merge_resp.not_super := not_super.B 567 ptw_merge_resp 568 } 569 570 def merge_ptwResp_to_sector_ptwResp(pte: PtwMergeResp) : PtwSectorResp = { 571 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 572 val ptw_sector_resp = Wire(new PtwSectorResp) 573 ptw_sector_resp.entry.tag := pte.entry(OHToUInt(pte.pteidx)).tag 574 ptw_sector_resp.entry.asid := pte.entry(OHToUInt(pte.pteidx)).asid 575 ptw_sector_resp.entry.vmid.map(_ := pte.entry(OHToUInt(pte.pteidx)).vmid.getOrElse(0.U)) 576 ptw_sector_resp.entry.ppn := pte.entry(OHToUInt(pte.pteidx)).ppn 577 ptw_sector_resp.entry.perm.map(_ := pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle))) 578 ptw_sector_resp.entry.level.map(_ := pte.entry(OHToUInt(pte.pteidx)).level.getOrElse(0.U(log2Up(Level + 1).W))) 579 ptw_sector_resp.entry.prefetch := pte.entry(OHToUInt(pte.pteidx)).prefetch 580 ptw_sector_resp.entry.v := pte.entry(OHToUInt(pte.pteidx)).v 581 ptw_sector_resp.af := pte.entry(OHToUInt(pte.pteidx)).af 582 ptw_sector_resp.pf := pte.entry(OHToUInt(pte.pteidx)).pf 583 ptw_sector_resp.addr_low := OHToUInt(pte.pteidx) 584 ptw_sector_resp.pteidx := pte.pteidx 585 for (i <- 0 until tlbcontiguous) { 586 val ppn_equal = pte.entry(i).ppn === pte.entry(OHToUInt(pte.pteidx)).ppn 587 val perm_equal = pte.entry(i).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt === pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 588 val v_equal = pte.entry(i).v === pte.entry(OHToUInt(pte.pteidx)).v 589 val af_equal = pte.entry(i).af === pte.entry(OHToUInt(pte.pteidx)).af 590 val pf_equal = pte.entry(i).pf === pte.entry(OHToUInt(pte.pteidx)).pf 591 ptw_sector_resp.valididx(i) := (ppn_equal && perm_equal && v_equal && af_equal && pf_equal) || !pte.not_super 592 ptw_sector_resp.ppn_low(i) := pte.entry(i).ppn_low 593 } 594 ptw_sector_resp.valididx(OHToUInt(pte.pteidx)) := true.B 595 ptw_sector_resp 596 } 597 598 def outReady(source: UInt, port: Int): Bool = { 599 MuxLookup(source, true.B)((0 until PtwWidth).map(i => i.U -> mergeArb(i).in(port).ready)) 600 } 601 602 // debug info 603 for (i <- 0 until PtwWidth) { 604 XSDebug(p"[io.tlb(${i.U})] ${io.tlb(i)}\n") 605 } 606 XSDebug(p"[sfence] ${io.sfence}\n") 607 XSDebug(p"[io.csr.tlb] ${io.csr.tlb}\n") 608 609 for (i <- 0 until PtwWidth) { 610 XSPerfAccumulate(s"req_count${i}", io.tlb(i).req(0).fire) 611 XSPerfAccumulate(s"req_blocked_count_${i}", io.tlb(i).req(0).valid && !io.tlb(i).req(0).ready) 612 } 613 XSPerfAccumulate(s"req_blocked_by_mq", arb1.io.out.valid && missQueue.io.out.valid) 614 for (i <- 0 until (MemReqWidth + 1)) { 615 XSPerfAccumulate(s"mem_req_util${i}", PopCount(waiting_resp) === i.U) 616 } 617 XSPerfAccumulate("mem_cycle", PopCount(waiting_resp) =/= 0.U) 618 XSPerfAccumulate("mem_count", mem.a.fire) 619 for (i <- 0 until PtwWidth) { 620 XSPerfAccumulate(s"llptw_ppn_af${i}", mergeArb(i).in(outArbMqPort).valid && mergeArb(i).in(outArbMqPort).bits.s1.entry(OHToUInt(mergeArb(i).in(outArbMqPort).bits.s1.pteidx)).af && !llptw_out.bits.af) 621 XSPerfAccumulate(s"access_fault${i}", io.tlb(i).resp.fire && io.tlb(i).resp.bits.s1.af) 622 } 623 624 // print configs 625 println(s"${l2tlbParams.name}: a ptw, a llptw with size ${l2tlbParams.llptwsize}, miss queue size ${MissQueueSize} l2:${l2tlbParams.l2Size} fa l1: nSets ${l2tlbParams.l1nSets} nWays ${l2tlbParams.l1nWays} l0: ${l2tlbParams.l0nSets} nWays ${l2tlbParams.l0nWays} blockBytes:${l2tlbParams.blockBytes}") 626 627 // time out assert 628 for (i <- 0 until MemReqWidth) { 629 TimeOutAssert(waiting_resp(i), timeOutThreshold, s"ptw mem resp time out wait_resp${i}") 630 TimeOutAssert(flush_latch(i), timeOutThreshold, s"ptw mem resp time out flush_latch${i}") 631 } 632 633 634 val perfEvents = Seq(llptw, cache, ptw).flatMap(_.getPerfEvents) 635 generatePerfEvent() 636 637 val isWriteL1TlbTable = Constantin.createRecord(s"isWriteL1TlbTable$hartId") 638 val L1TlbTable = ChiselDB.createTable(s"L1Tlb_hart$hartId", new L1TlbDB) 639 val ITlbReqDB, DTlbReqDB, ITlbRespDB, DTlbRespDB = Wire(new L1TlbDB) 640 ITlbReqDB.vpn := io.tlb(0).req(0).bits.vpn 641 DTlbReqDB.vpn := io.tlb(1).req(0).bits.vpn 642 ITlbRespDB.vpn := io.tlb(0).resp.bits.s1.entry.tag 643 DTlbRespDB.vpn := io.tlb(1).resp.bits.s1.entry.tag 644 L1TlbTable.log(ITlbReqDB, isWriteL1TlbTable.orR && io.tlb(0).req(0).fire, "ITlbReq", clock, reset) 645 L1TlbTable.log(DTlbReqDB, isWriteL1TlbTable.orR && io.tlb(1).req(0).fire, "DTlbReq", clock, reset) 646 L1TlbTable.log(ITlbRespDB, isWriteL1TlbTable.orR && io.tlb(0).resp.fire, "ITlbResp", clock, reset) 647 L1TlbTable.log(DTlbRespDB, isWriteL1TlbTable.orR && io.tlb(1).resp.fire, "DTlbResp", clock, reset) 648 649 val isWritePageCacheTable = Constantin.createRecord(s"isWritePageCacheTable$hartId") 650 val PageCacheTable = ChiselDB.createTable(s"PageCache_hart$hartId", new PageCacheDB) 651 val PageCacheDB = Wire(new PageCacheDB) 652 PageCacheDB.vpn := Cat(cache.io.resp.bits.stage1.entry(0).tag, OHToUInt(cache.io.resp.bits.stage1.pteidx)) 653 PageCacheDB.source := cache.io.resp.bits.req_info.source 654 PageCacheDB.bypassed := cache.io.resp.bits.bypassed 655 PageCacheDB.is_first := cache.io.resp.bits.isFirst 656 PageCacheDB.prefetched := cache.io.resp.bits.stage1.entry(0).prefetch 657 PageCacheDB.prefetch := cache.io.resp.bits.prefetch 658 PageCacheDB.l2Hit := cache.io.resp.bits.toFsm.l2Hit 659 PageCacheDB.l1Hit := cache.io.resp.bits.toFsm.l1Hit 660 PageCacheDB.hit := cache.io.resp.bits.hit 661 PageCacheTable.log(PageCacheDB, isWritePageCacheTable.orR && cache.io.resp.fire, "PageCache", clock, reset) 662 663 val isWritePTWTable = Constantin.createRecord(s"isWritePTWTable$hartId") 664 val PTWTable = ChiselDB.createTable(s"PTW_hart$hartId", new PTWDB) 665 val PTWReqDB, PTWRespDB, LLPTWReqDB, LLPTWRespDB = Wire(new PTWDB) 666 PTWReqDB.vpn := ptw.io.req.bits.req_info.vpn 667 PTWReqDB.source := ptw.io.req.bits.req_info.source 668 PTWRespDB.vpn := ptw.io.refill.req_info.vpn 669 PTWRespDB.source := ptw.io.refill.req_info.source 670 LLPTWReqDB.vpn := llptw.io.in.bits.req_info.vpn 671 LLPTWReqDB.source := llptw.io.in.bits.req_info.source 672 LLPTWRespDB.vpn := llptw.io.mem.refill.vpn 673 LLPTWRespDB.source := llptw.io.mem.refill.source 674 PTWTable.log(PTWReqDB, isWritePTWTable.orR && ptw.io.req.fire, "PTWReq", clock, reset) 675 PTWTable.log(PTWRespDB, isWritePTWTable.orR && ptw.io.mem.resp.fire, "PTWResp", clock, reset) 676 PTWTable.log(LLPTWReqDB, isWritePTWTable.orR && llptw.io.in.fire, "LLPTWReq", clock, reset) 677 PTWTable.log(LLPTWRespDB, isWritePTWTable.orR && llptw.io.mem.resp.fire, "LLPTWResp", clock, reset) 678 679 val isWriteL2TlbMissQueueTable = Constantin.createRecord(s"isWriteL2TlbMissQueueTable$hartId") 680 val L2TlbMissQueueTable = ChiselDB.createTable(s"L2TlbMissQueue_hart$hartId", new L2TlbMissQueueDB) 681 val L2TlbMissQueueInDB, L2TlbMissQueueOutDB = Wire(new L2TlbMissQueueDB) 682 L2TlbMissQueueInDB.vpn := missQueue.io.in.bits.req_info.vpn 683 L2TlbMissQueueOutDB.vpn := missQueue.io.out.bits.req_info.vpn 684 L2TlbMissQueueTable.log(L2TlbMissQueueInDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.in.fire, "L2TlbMissQueueIn", clock, reset) 685 L2TlbMissQueueTable.log(L2TlbMissQueueOutDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.out.fire, "L2TlbMissQueueOut", clock, reset) 686} 687 688/** BlockHelper, block missqueue, not to send too many req to cache 689 * Parameter: 690 * enable: enable BlockHelper, mq should not send too many reqs 691 * start: when miss queue out fire and need, block miss queue's out 692 * block: block miss queue's out 693 * latency: last missqueue out's cache access latency 694 */ 695class BlockHelper(latency: Int)(implicit p: Parameters) extends XSModule { 696 val io = IO(new Bundle { 697 val enable = Input(Bool()) 698 val start = Input(Bool()) 699 val block = Output(Bool()) 700 }) 701 702 val count = RegInit(0.U(log2Ceil(latency).W)) 703 val valid = RegInit(false.B) 704 val work = RegInit(true.B) 705 706 io.block := valid 707 708 when (io.start && work) { valid := true.B } 709 when (valid) { count := count + 1.U } 710 when (count === (latency.U) || io.enable) { 711 valid := false.B 712 work := io.enable 713 count := 0.U 714 } 715} 716 717class PTEHelper() extends ExtModule { 718 val clock = IO(Input(Clock())) 719 val enable = IO(Input(Bool())) 720 val satp = IO(Input(UInt(64.W))) 721 val vpn = IO(Input(UInt(64.W))) 722 val pte = IO(Output(UInt(64.W))) 723 val level = IO(Output(UInt(8.W))) 724 val pf = IO(Output(UInt(8.W))) 725} 726 727class PTWDelayN[T <: Data](gen: T, n: Int, flush: Bool) extends Module { 728 val io = IO(new Bundle() { 729 val in = Input(gen) 730 val out = Output(gen) 731 val ptwflush = Input(flush.cloneType) 732 }) 733 val out = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 734 val t = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 735 out(0) := io.in 736 if (n == 1) { 737 io.out := out(0) 738 } else { 739 when (io.ptwflush) { 740 for (i <- 0 until n) { 741 t(i) := 0.U.asTypeOf(gen) 742 out(i) := 0.U.asTypeOf(gen) 743 } 744 io.out := 0.U.asTypeOf(gen) 745 } .otherwise { 746 for (i <- 1 until n) { 747 t(i-1) := out(i-1) 748 out(i) := t(i-1) 749 } 750 io.out := out(n-1) 751 } 752 } 753} 754 755object PTWDelayN { 756 def apply[T <: Data](in: T, n: Int, flush: Bool): T = { 757 val delay = Module(new PTWDelayN(in.cloneType, n, flush)) 758 delay.io.in := in 759 delay.io.ptwflush := flush 760 delay.io.out 761 } 762} 763 764class FakePTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 765 val io = IO(new L2TLBIO) 766 val flush = VecInit(Seq.fill(PtwWidth)(false.B)) 767 flush(0) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, itlbParams.fenceDelay) 768 flush(1) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, ldtlbParams.fenceDelay) 769 for (i <- 0 until PtwWidth) { 770 val helper = Module(new PTEHelper()) 771 helper.clock := clock 772 helper.satp := io.csr.tlb.satp.ppn 773 774 if (coreParams.softPTWDelay == 1) { 775 helper.enable := io.tlb(i).req(0).fire 776 helper.vpn := io.tlb(i).req(0).bits.vpn 777 } else { 778 helper.enable := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay - 1, flush(i)) 779 helper.vpn := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay - 1, flush(i)) 780 } 781 782 val pte = helper.pte.asTypeOf(new PteBundle) 783 val level = helper.level 784 val pf = helper.pf 785 val empty = RegInit(true.B) 786 when (io.tlb(i).req(0).fire) { 787 empty := false.B 788 } .elsewhen (io.tlb(i).resp.fire || flush(i)) { 789 empty := true.B 790 } 791 792 io.tlb(i).req(0).ready := empty || io.tlb(i).resp.fire 793 io.tlb(i).resp.valid := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay, flush(i)) 794 assert(!io.tlb(i).resp.valid || io.tlb(i).resp.ready) 795 io.tlb(i).resp.bits.s1.entry.tag := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay, flush(i)) 796 io.tlb(i).resp.bits.s1.entry.ppn := pte.ppn 797 io.tlb(i).resp.bits.s1.entry.perm.map(_ := pte.getPerm()) 798 io.tlb(i).resp.bits.s1.entry.level.map(_ := level) 799 io.tlb(i).resp.bits.s1.pf := pf 800 io.tlb(i).resp.bits.s1.af := DontCare // TODO: implement it 801 io.tlb(i).resp.bits.s1.entry.v := !pf 802 io.tlb(i).resp.bits.s1.entry.prefetch := DontCare 803 io.tlb(i).resp.bits.s1.entry.asid := io.csr.tlb.satp.asid 804 } 805} 806 807class L2TLBWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter { 808 override def shouldBeInlined: Boolean = false 809 val useSoftPTW = coreParams.softPTW 810 val node = if (!useSoftPTW) TLIdentityNode() else null 811 val ptw = if (!useSoftPTW) LazyModule(new L2TLB()) else null 812 if (!useSoftPTW) { 813 node := ptw.node 814 } 815 816 class L2TLBWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents { 817 val io = IO(new L2TLBIO) 818 val perfEvents = if (useSoftPTW) { 819 val fake_ptw = Module(new FakePTW()) 820 io <> fake_ptw.io 821 Seq() 822 } 823 else { 824 io <> ptw.module.io 825 ptw.module.getPerfEvents 826 } 827 generatePerfEvent() 828 } 829 830 lazy val module = new L2TLBWrapperImp(this) 831} 832