1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.experimental.ExtModule 22import chisel3.util._ 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import utility._ 27import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp} 28import freechips.rocketchip.tilelink._ 29import xiangshan.backend.fu.{PMP, PMPChecker, PMPReqBundle, PMPRespBundle} 30import xiangshan.backend.fu.util.HasCSRConst 31import difftest._ 32 33class L2TLB()(implicit p: Parameters) extends LazyModule with HasPtwConst { 34 override def shouldBeInlined: Boolean = false 35 36 val node = TLClientNode(Seq(TLMasterPortParameters.v1( 37 clients = Seq(TLMasterParameters.v1( 38 "ptw", 39 sourceId = IdRange(0, MemReqWidth) 40 )), 41 requestFields = Seq(ReqSourceField()) 42 ))) 43 44 lazy val module = new L2TLBImp(this) 45} 46 47class L2TLBImp(outer: L2TLB)(implicit p: Parameters) extends PtwModule(outer) with HasCSRConst with HasPerfEvents { 48 49 val (mem, edge) = outer.node.out.head 50 51 val io = IO(new L2TLBIO) 52 val difftestIO = IO(new Bundle() { 53 val ptwResp = Output(Bool()) 54 val ptwAddr = Output(UInt(64.W)) 55 val ptwData = Output(Vec(4, UInt(64.W))) 56 }) 57 58 /* Ptw processes multiple requests 59 * Divide Ptw procedure into two stages: cache access ; mem access if cache miss 60 * miss queue itlb dtlb 61 * | | | 62 * ------arbiter------ 63 * | 64 * l1 - l2 - l3 - sp 65 * | 66 * ------------------------------------------- 67 * miss | queue | hit 68 * [][][][][][] | 69 * | | 70 * state machine accessing mem | 71 * | | 72 * ---------------arbiter--------------------- 73 * | | 74 * itlb dtlb 75 */ 76 77 difftestIO <> DontCare 78 79 val sfence_tmp = DelayN(io.sfence, 1) 80 val csr_tmp = DelayN(io.csr.tlb, 1) 81 val sfence_dup = Seq.fill(9)(RegNext(sfence_tmp)) 82 val csr_dup = Seq.fill(8)(RegNext(csr_tmp)) // TODO: add csr_modified? 83 val satp = csr_dup(0).satp 84 val vsatp = csr_dup(0).vsatp 85 val hgatp = csr_dup(0).hgatp 86 val priv = csr_dup(0).priv 87 val mPBMTE = csr_dup(0).mPBMTE 88 val hPBMTE = csr_dup(0).hPBMTE 89 val flush = sfence_dup(0).valid || satp.changed || vsatp.changed || hgatp.changed 90 91 val pmp = Module(new PMP()) 92 val pmp_check = VecInit(Seq.fill(3)(Module(new PMPChecker(lgMaxSize = 3, sameCycle = true)).io)) 93 pmp.io.distribute_csr := io.csr.distribute_csr 94 pmp_check.foreach(_.check_env.apply(ModeS, pmp.io.pmp, pmp.io.pma)) 95 96 val missQueue = Module(new L2TlbMissQueue) 97 val cache = Module(new PtwCache) 98 val ptw = Module(new PTW) 99 val hptw = Module(new HPTW) 100 val llptw = Module(new LLPTW) 101 val blockmq = Module(new BlockHelper(3)) 102 val arb1 = Module(new Arbiter(new PtwReq, PtwWidth)) 103 val arb2 = Module(new Arbiter(new L2TlbWithHptwIdBundle, ((if (l2tlbParams.enablePrefetch) 4 else 3) + (if(HasHExtension) 1 else 0)))) 104 val hptw_req_arb = Module(new Arbiter(new Bundle { 105 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 106 val source = UInt(bSourceWidth.W) 107 val gvpn = UInt(gvpnLen.W) 108 }, 2)) 109 val hptw_resp_arb = Module(new Arbiter(new Bundle { 110 val resp = new HptwResp() 111 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 112 }, 2)) 113 val outArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 114 val s2xlate = UInt(2.W) 115 val s1 = new PtwSectorResp () 116 val s2 = new HptwResp() 117 }, 1)).io) 118 val mergeArb = (0 until PtwWidth).map(i => Module(new Arbiter(new Bundle { 119 val s2xlate = UInt(2.W) 120 val s1 = new PtwMergeResp() 121 val s2 = new HptwResp() 122 }, 3)).io) 123 val outArbCachePort = 0 124 val outArbFsmPort = 1 125 val outArbMqPort = 2 126 127 // hptw arb input port 128 val InHptwArbPTWPort = 0 129 val InHptwArbLLPTWPort = 1 130 hptw_req_arb.io.in(InHptwArbPTWPort).valid := ptw.io.hptw.req.valid 131 hptw_req_arb.io.in(InHptwArbPTWPort).bits.gvpn := ptw.io.hptw.req.bits.gvpn 132 hptw_req_arb.io.in(InHptwArbPTWPort).bits.id := ptw.io.hptw.req.bits.id 133 hptw_req_arb.io.in(InHptwArbPTWPort).bits.source := ptw.io.hptw.req.bits.source 134 ptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbPTWPort).ready 135 136 hptw_req_arb.io.in(InHptwArbLLPTWPort).valid := llptw.io.hptw.req.valid 137 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.gvpn := llptw.io.hptw.req.bits.gvpn 138 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.id := llptw.io.hptw.req.bits.id 139 hptw_req_arb.io.in(InHptwArbLLPTWPort).bits.source := llptw.io.hptw.req.bits.source 140 llptw.io.hptw.req.ready := hptw_req_arb.io.in(InHptwArbLLPTWPort).ready 141 142 // arb2 input port 143 val InArbHPTWPort = 0 144 val InArbPTWPort = 1 145 val InArbMissQueuePort = 2 146 val InArbTlbPort = 3 147 val InArbPrefetchPort = 4 148 // NOTE: when cache out but miss and ptw doesnt accept, 149 arb1.io.in <> VecInit(io.tlb.map(_.req(0))) 150 151 152 arb2.io.in(InArbPTWPort).valid := ptw.io.llptw.valid 153 arb2.io.in(InArbPTWPort).bits.req_info := ptw.io.llptw.bits.req_info 154 arb2.io.in(InArbPTWPort).bits.isHptwReq := false.B 155 arb2.io.in(InArbPTWPort).bits.isLLptw := false.B 156 arb2.io.in(InArbPTWPort).bits.hptwId := DontCare 157 ptw.io.llptw.ready := arb2.io.in(InArbPTWPort).ready 158 block_decoupled(missQueue.io.out, arb2.io.in(InArbMissQueuePort), Mux(missQueue.io.out.bits.isLLptw, !llptw.io.in.ready, !ptw.io.req.ready)) 159 160 arb2.io.in(InArbTlbPort).valid := arb1.io.out.valid 161 arb2.io.in(InArbTlbPort).bits.req_info.vpn := arb1.io.out.bits.vpn 162 arb2.io.in(InArbTlbPort).bits.req_info.s2xlate := arb1.io.out.bits.s2xlate 163 arb2.io.in(InArbTlbPort).bits.req_info.source := arb1.io.chosen 164 arb2.io.in(InArbTlbPort).bits.isHptwReq := false.B 165 arb2.io.in(InArbTlbPort).bits.isLLptw := false.B 166 arb2.io.in(InArbTlbPort).bits.hptwId := DontCare 167 arb1.io.out.ready := arb2.io.in(InArbTlbPort).ready 168 169 arb2.io.in(InArbHPTWPort).valid := hptw_req_arb.io.out.valid 170 arb2.io.in(InArbHPTWPort).bits.req_info.vpn := hptw_req_arb.io.out.bits.gvpn 171 arb2.io.in(InArbHPTWPort).bits.req_info.s2xlate := onlyStage2 172 arb2.io.in(InArbHPTWPort).bits.req_info.source := hptw_req_arb.io.out.bits.source 173 arb2.io.in(InArbHPTWPort).bits.isHptwReq := true.B 174 arb2.io.in(InArbHPTWPort).bits.isLLptw := false.B 175 arb2.io.in(InArbHPTWPort).bits.hptwId := hptw_req_arb.io.out.bits.id 176 hptw_req_arb.io.out.ready := arb2.io.in(InArbHPTWPort).ready 177 val hartId = p(XSCoreParamsKey).HartId 178 if (l2tlbParams.enablePrefetch) { 179 val prefetch = Module(new L2TlbPrefetch()) 180 val recv = cache.io.resp 181 // NOTE: 1. prefetch doesn't gen prefetch 2. req from mq doesn't gen prefetch 182 // NOTE: 1. miss req gen prefetch 2. hit but prefetched gen prefetch 183 prefetch.io.in.valid := recv.fire && !from_pre(recv.bits.req_info.source) && (!recv.bits.hit || 184 recv.bits.prefetch) && recv.bits.isFirst 185 prefetch.io.in.bits.vpn := recv.bits.req_info.vpn 186 prefetch.io.sfence := sfence_dup(0) 187 prefetch.io.csr := csr_dup(0) 188 arb2.io.in(InArbPrefetchPort) <> prefetch.io.out 189 190 val isWriteL2TlbPrefetchTable = Constantin.createRecord(s"isWriteL2TlbPrefetchTable$hartId") 191 val L2TlbPrefetchTable = ChiselDB.createTable(s"L2TlbPrefetch_hart$hartId", new L2TlbPrefetchDB) 192 val L2TlbPrefetchDB = Wire(new L2TlbPrefetchDB) 193 L2TlbPrefetchDB.vpn := prefetch.io.out.bits.req_info.vpn 194 L2TlbPrefetchTable.log(L2TlbPrefetchDB, isWriteL2TlbPrefetchTable.orR && prefetch.io.out.fire, "L2TlbPrefetch", clock, reset) 195 } 196 arb2.io.out.ready := cache.io.req.ready 197 198 199 val mq_arb = Module(new Arbiter(new L2TlbWithHptwIdBundle, 2)) 200 mq_arb.io.in(0).valid := cache.io.resp.valid && !cache.io.resp.bits.hit && 201 !from_pre(cache.io.resp.bits.req_info.source) && !cache.io.resp.bits.isHptwReq && // hptw reqs are not sent to missqueue 202 (cache.io.resp.bits.bypassed || ( 203 ((!cache.io.resp.bits.toFsm.l1Hit || cache.io.resp.bits.toFsm.stage1Hit) && !cache.io.resp.bits.isHptwReq && (cache.io.resp.bits.isFirst || !ptw.io.req.ready)) // send to ptw, is first or ptw is busy; 204 || (cache.io.resp.bits.toFsm.l1Hit && !llptw.io.in.ready) // send to llptw, llptw is full 205 )) 206 207 mq_arb.io.in(0).bits.req_info := cache.io.resp.bits.req_info 208 mq_arb.io.in(0).bits.isHptwReq := false.B 209 mq_arb.io.in(0).bits.hptwId := DontCare 210 mq_arb.io.in(0).bits.isLLptw := cache.io.resp.bits.toFsm.l1Hit 211 mq_arb.io.in(1).bits.req_info := llptw.io.cache.bits 212 mq_arb.io.in(1).bits.isHptwReq := false.B 213 mq_arb.io.in(1).bits.hptwId := DontCare 214 mq_arb.io.in(1).bits.isLLptw := false.B 215 mq_arb.io.in(1).valid := llptw.io.cache.valid 216 llptw.io.cache.ready := mq_arb.io.in(1).ready 217 missQueue.io.in <> mq_arb.io.out 218 missQueue.io.sfence := sfence_dup(6) 219 missQueue.io.csr := csr_dup(5) 220 221 blockmq.io.start := missQueue.io.out.fire 222 blockmq.io.enable := ptw.io.req.fire 223 224 llptw.io.in.valid := cache.io.resp.valid && 225 !cache.io.resp.bits.hit && 226 cache.io.resp.bits.toFsm.l1Hit && 227 !cache.io.resp.bits.bypassed && 228 !cache.io.resp.bits.isHptwReq 229 llptw.io.in.bits.req_info := cache.io.resp.bits.req_info 230 llptw.io.in.bits.ppn := cache.io.resp.bits.toFsm.ppn 231 llptw.io.sfence := sfence_dup(1) 232 llptw.io.csr := csr_dup(1) 233 val llptw_stage1 = Reg(Vec(l2tlbParams.llptwsize, new PtwMergeResp())) 234 when(llptw.io.in.fire){ 235 llptw_stage1(llptw.io.mem.enq_ptr) := cache.io.resp.bits.stage1 236 } 237 238 cache.io.req.valid := arb2.io.out.valid 239 cache.io.req.bits.req_info := arb2.io.out.bits.req_info 240 cache.io.req.bits.isFirst := (arb2.io.chosen =/= InArbMissQueuePort.U && !arb2.io.out.bits.isHptwReq) 241 cache.io.req.bits.isHptwReq := arb2.io.out.bits.isHptwReq 242 cache.io.req.bits.hptwId := arb2.io.out.bits.hptwId 243 cache.io.req.bits.bypassed.map(_ := false.B) 244 cache.io.sfence := sfence_dup(2) 245 cache.io.csr := csr_dup(2) 246 cache.io.sfence_dup.zip(sfence_dup.drop(2).take(4)).map(s => s._1 := s._2) 247 cache.io.csr_dup.zip(csr_dup.drop(2).take(3)).map(c => c._1 := c._2) 248 cache.io.resp.ready := MuxCase(mq_arb.io.in(0).ready || ptw.io.req.ready, Seq( 249 (!cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw.io.req.ready, 250 (cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq) -> hptw_resp_arb.io.in(HptwRespArbCachePort).ready, 251 cache.io.resp.bits.hit -> outReady(cache.io.resp.bits.req_info.source, outArbCachePort), 252 (cache.io.resp.bits.toFsm.l1Hit && !cache.io.resp.bits.bypassed && llptw.io.in.ready) -> llptw.io.in.ready, 253 (cache.io.resp.bits.bypassed || cache.io.resp.bits.isFirst) -> mq_arb.io.in(0).ready 254 )) 255 256 // NOTE: missQueue req has higher priority 257 ptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && !cache.io.resp.bits.toFsm.l1Hit && 258 !cache.io.resp.bits.bypassed && 259 !cache.io.resp.bits.isFirst && 260 !cache.io.resp.bits.isHptwReq 261 ptw.io.req.bits.req_info := cache.io.resp.bits.req_info 262 if (EnableSv48) { 263 ptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toFsm.l3Hit.get 264 } 265 ptw.io.req.bits.l2Hit := cache.io.resp.bits.toFsm.l2Hit 266 ptw.io.req.bits.ppn := cache.io.resp.bits.toFsm.ppn 267 ptw.io.req.bits.stage1Hit := cache.io.resp.bits.toFsm.stage1Hit 268 ptw.io.req.bits.stage1 := cache.io.resp.bits.stage1 269 ptw.io.sfence := sfence_dup(7) 270 ptw.io.csr := csr_dup(6) 271 ptw.io.resp.ready := outReady(ptw.io.resp.bits.source, outArbFsmPort) 272 273 hptw.io.req.valid := cache.io.resp.valid && !cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq 274 hptw.io.req.bits.gvpn := cache.io.resp.bits.req_info.vpn 275 hptw.io.req.bits.id := cache.io.resp.bits.toHptw.id 276 hptw.io.req.bits.source := cache.io.resp.bits.req_info.source 277 if (EnableSv48) { 278 hptw.io.req.bits.l3Hit.get := cache.io.resp.bits.toHptw.l3Hit.get 279 } 280 hptw.io.req.bits.l2Hit := cache.io.resp.bits.toHptw.l2Hit 281 hptw.io.req.bits.l1Hit := cache.io.resp.bits.toHptw.l1Hit 282 hptw.io.req.bits.ppn := cache.io.resp.bits.toHptw.ppn 283 hptw.io.req.bits.bypassed := cache.io.resp.bits.toHptw.bypassed 284 hptw.io.sfence := sfence_dup(8) 285 hptw.io.csr := csr_dup(7) 286 // mem req 287 def blockBytes_align(addr: UInt) = { 288 Cat(addr(PAddrBits - 1, log2Up(l2tlbParams.blockBytes)), 0.U(log2Up(l2tlbParams.blockBytes).W)) 289 } 290 def addr_low_from_vpn(vpn: UInt) = { 291 vpn(log2Ceil(l2tlbParams.blockBytes)-log2Ceil(XLEN/8)-1, 0) 292 } 293 def addr_low_from_paddr(paddr: UInt) = { 294 paddr(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 295 } 296 def from_llptw(id: UInt) = { 297 id < l2tlbParams.llptwsize.U 298 } 299 def from_ptw(id: UInt) = { 300 id === l2tlbParams.llptwsize.U 301 } 302 def from_hptw(id: UInt) = { 303 id === l2tlbParams.llptwsize.U + 1.U 304 } 305 val waiting_resp = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 306 val flush_latch = RegInit(VecInit(Seq.fill(MemReqWidth)(false.B))) 307 val hptw_bypassed = RegInit(false.B) 308 for (i <- waiting_resp.indices) { 309 assert(!flush_latch(i) || waiting_resp(i)) // when sfence_latch wait for mem resp, waiting_resp should be true 310 } 311 312 val llptw_out = llptw.io.out 313 val llptw_mem = llptw.io.mem 314 llptw_mem.flush_latch := flush_latch.take(l2tlbParams.llptwsize) 315 llptw_mem.req_mask := waiting_resp.take(l2tlbParams.llptwsize) 316 ptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize) 317 hptw.io.mem.mask := waiting_resp.apply(l2tlbParams.llptwsize + 1) 318 319 val mem_arb = Module(new Arbiter(new L2TlbMemReqBundle(), 3)) 320 mem_arb.io.in(0) <> ptw.io.mem.req 321 mem_arb.io.in(1) <> llptw_mem.req 322 mem_arb.io.in(2) <> hptw.io.mem.req 323 mem_arb.io.out.ready := mem.a.ready && !flush 324 325 // // assert, should not send mem access at same addr for twice. 326 // val last_resp_vpn = RegEnable(cache.io.refill.bits.req_info_dup(0).vpn, cache.io.refill.valid) 327 // val last_resp_s2xlate = RegEnable(cache.io.refill.bits.req_info_dup(0).s2xlate, cache.io.refill.valid) 328 // val last_resp_level = RegEnable(cache.io.refill.bits.level_dup(0), cache.io.refill.valid) 329 // val last_resp_v = RegInit(false.B) 330 // val last_has_invalid = !Cat(cache.io.refill.bits.ptes.asTypeOf(Vec(blockBits/XLEN, UInt(XLEN.W))).map(a => a(0))).andR || cache.io.refill.bits.sel_pte_dup(0).asTypeOf(new PteBundle).isAf() 331 // when (cache.io.refill.valid) { last_resp_v := !last_has_invalid} 332 // when (flush) { last_resp_v := false.B } 333 // XSError(last_resp_v && cache.io.refill.valid && 334 // (cache.io.refill.bits.req_info_dup(0).vpn === last_resp_vpn) && 335 // (cache.io.refill.bits.level_dup(0) === last_resp_level) && 336 // (cache.io.refill.bits.req_info_dup(0).s2xlate === last_resp_s2xlate), 337 // "l2tlb should not access mem at same addr for twice") 338 // // ATTENTION: this may wrongly assert when: a ptes is l2, last part is valid, 339 // // but the current part is invalid, so one more mem access happened 340 // // If this happened, remove the assert. 341 342 val req_addr_low = Reg(Vec(MemReqWidth, UInt((log2Up(l2tlbParams.blockBytes)-log2Up(XLEN/8)).W))) 343 344 when (llptw.io.in.fire) { 345 // when enq miss queue, set the req_addr_low to receive the mem resp data part 346 req_addr_low(llptw_mem.enq_ptr) := addr_low_from_vpn(llptw.io.in.bits.req_info.vpn) 347 } 348 when (mem_arb.io.out.fire) { 349 req_addr_low(mem_arb.io.out.bits.id) := addr_low_from_paddr(mem_arb.io.out.bits.addr) 350 waiting_resp(mem_arb.io.out.bits.id) := true.B 351 hptw_bypassed := from_hptw(mem_arb.io.out.bits.id) && mem_arb.io.out.bits.hptw_bypassed 352 } 353 // mem read 354 val memRead = edge.Get( 355 fromSource = mem_arb.io.out.bits.id, 356 // toAddress = memAddr(log2Up(CacheLineSize / 2 / 8) - 1, 0), 357 toAddress = blockBytes_align(mem_arb.io.out.bits.addr), 358 lgSize = log2Up(l2tlbParams.blockBytes).U 359 )._2 360 mem.a.bits := memRead 361 mem.a.valid := mem_arb.io.out.valid && !flush 362 mem.a.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.PTW.id.U) 363 mem.d.ready := true.B 364 // mem -> data buffer 365 val refill_data = RegInit(VecInit.fill(blockBits / l1BusDataWidth)(0.U(l1BusDataWidth.W))) 366 val refill_helper = edge.firstlastHelper(mem.d.bits, mem.d.fire) 367 val mem_resp_done = refill_helper._3 368 val mem_resp_from_llptw = from_llptw(mem.d.bits.source) 369 val mem_resp_from_ptw = from_ptw(mem.d.bits.source) 370 val mem_resp_from_hptw = from_hptw(mem.d.bits.source) 371 when (mem.d.valid) { 372 assert(mem.d.bits.source < MemReqWidth.U) 373 refill_data(refill_helper._4) := mem.d.bits.data 374 } 375 // refill_data_tmp is the wire fork of refill_data, but one cycle earlier 376 val refill_data_tmp = WireInit(refill_data) 377 refill_data_tmp(refill_helper._4) := mem.d.bits.data 378 379 // save only one pte for each id 380 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 381 val resp_pte = VecInit((0 until MemReqWidth).map(i => 382 if (i == l2tlbParams.llptwsize + 1) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data_tmp, req_addr_low(i))), mem_resp_done && mem_resp_from_hptw) } 383 else if (i == l2tlbParams.llptwsize) {RegEnable(get_part(refill_data_tmp, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data_tmp, req_addr_low(i))), mem_resp_done && mem_resp_from_ptw) } 384 else { Mux(llptw_mem.buffer_it(i), get_part(refill_data, req_addr_low(i)), RegEnable(get_part(refill_data, req_addr_low(i)), 0.U.asTypeOf(get_part(refill_data, req_addr_low(i))), llptw_mem.buffer_it(i))) } 385 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 386 )) 387 388 // save eight ptes for each id when sector tlb 389 // (miss queue may can't resp to tlb with low latency, it should have highest priority, but diffcult to design cache) 390 val resp_pte_sector = VecInit((0 until MemReqWidth).map(i => 391 if (i == l2tlbParams.llptwsize + 1) {RegEnable(refill_data_tmp, 0.U.asTypeOf(refill_data_tmp), mem_resp_done && mem_resp_from_hptw) } 392 else if (i == l2tlbParams.llptwsize) {RegEnable(refill_data_tmp, 0.U.asTypeOf(refill_data_tmp), mem_resp_done && mem_resp_from_ptw) } 393 else { Mux(llptw_mem.buffer_it(i), refill_data, RegEnable(refill_data, 0.U.asTypeOf(refill_data), llptw_mem.buffer_it(i))) } 394 // llptw could not use refill_data_tmp, because enq bypass's result works at next cycle 395 )) 396 397 // mem -> llptw 398 llptw_mem.resp.valid := mem_resp_done && mem_resp_from_llptw 399 llptw_mem.resp.bits.id := DataHoldBypass(mem.d.bits.source, mem.d.valid) 400 llptw_mem.resp.bits.value := DataHoldBypass(refill_data_tmp.asUInt, mem.d.valid) 401 // mem -> ptw 402 ptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_ptw 403 ptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize) 404 // mem -> hptw 405 hptw.io.mem.resp.valid := mem_resp_done && mem_resp_from_hptw 406 hptw.io.mem.resp.bits := resp_pte.apply(l2tlbParams.llptwsize + 1) 407 // mem -> cache 408 val refill_from_llptw = mem_resp_from_llptw 409 val refill_from_ptw = mem_resp_from_ptw 410 val refill_from_hptw = mem_resp_from_hptw 411 val refill_level = Mux(refill_from_llptw, 0.U, Mux(refill_from_ptw, RegEnable(ptw.io.refill.level, 0.U, ptw.io.mem.req.fire), RegEnable(hptw.io.refill.level, 0.U, hptw.io.mem.req.fire))) 412 val refill_valid = mem_resp_done && !flush && !flush_latch(mem.d.bits.source) && !hptw_bypassed 413 414 cache.io.refill.valid := GatedValidRegNext(refill_valid, false.B) 415 cache.io.refill.bits.ptes := refill_data.asUInt 416 cache.io.refill.bits.req_info_dup.map(_ := RegEnable(Mux(refill_from_llptw, llptw_mem.refill, Mux(refill_from_ptw, ptw.io.refill.req_info, hptw.io.refill.req_info)), refill_valid)) 417 cache.io.refill.bits.level_dup.map(_ := RegEnable(refill_level, refill_valid)) 418 cache.io.refill.bits.levelOH(refill_level, refill_valid) 419 cache.io.refill.bits.sel_pte_dup.map(_ := RegEnable(sel_data(refill_data_tmp.asUInt, req_addr_low(mem.d.bits.source)), refill_valid)) 420 421 if (env.EnableDifftest) { 422 val difftest_ptw_addr = RegInit(VecInit(Seq.fill(MemReqWidth)(0.U(PAddrBits.W)))) 423 when (mem.a.valid) { 424 difftest_ptw_addr(mem.a.bits.source) := mem.a.bits.address 425 } 426 427 val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) 428 difftest.coreid := io.hartId 429 difftest.index := 2.U 430 difftest.valid := cache.io.refill.valid 431 difftest.addr := difftest_ptw_addr(RegEnable(mem.d.bits.source, mem.d.valid)) 432 difftest.data := refill_data.asTypeOf(difftest.data) 433 difftest.idtfr := DontCare 434 } 435 436 if (env.EnableDifftest) { 437 for (i <- 0 until PtwWidth) { 438 val difftest = DifftestModule(new DiffL2TLBEvent) 439 difftest.coreid := io.hartId 440 difftest.valid := io.tlb(i).resp.fire && !io.tlb(i).resp.bits.s1.af && !io.tlb(i).resp.bits.s2.gaf 441 difftest.index := i.U 442 difftest.vpn := Cat(io.tlb(i).resp.bits.s1.entry.tag, 0.U(sectortlbwidth.W)) 443 difftest.pbmt := io.tlb(i).resp.bits.s1.entry.pbmt 444 difftest.g_pbmt := io.tlb(i).resp.bits.s2.entry.pbmt 445 for (j <- 0 until tlbcontiguous) { 446 difftest.ppn(j) := Cat(io.tlb(i).resp.bits.s1.entry.ppn, io.tlb(i).resp.bits.s1.ppn_low(j)) 447 difftest.valididx(j) := io.tlb(i).resp.bits.s1.valididx(j) 448 difftest.pteidx(j) := io.tlb(i).resp.bits.s1.pteidx(j) 449 } 450 difftest.perm := io.tlb(i).resp.bits.s1.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 451 difftest.level := io.tlb(i).resp.bits.s1.entry.level.getOrElse(0.U.asUInt) 452 difftest.pf := io.tlb(i).resp.bits.s1.pf 453 difftest.satp := Cat(io.csr.tlb.satp.mode, io.csr.tlb.satp.asid, io.csr.tlb.satp.ppn) 454 difftest.vsatp := Cat(io.csr.tlb.vsatp.mode, io.csr.tlb.vsatp.asid, io.csr.tlb.vsatp.ppn) 455 difftest.hgatp := Cat(io.csr.tlb.hgatp.mode, io.csr.tlb.hgatp.vmid, io.csr.tlb.hgatp.ppn) 456 difftest.gvpn := io.tlb(i).resp.bits.s2.entry.tag 457 difftest.g_perm := io.tlb(i).resp.bits.s2.entry.perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 458 difftest.g_level := io.tlb(i).resp.bits.s2.entry.level.getOrElse(0.U.asUInt) 459 difftest.s2ppn := io.tlb(i).resp.bits.s2.entry.ppn 460 difftest.gpf := io.tlb(i).resp.bits.s2.gpf 461 difftest.s2xlate := io.tlb(i).resp.bits.s2xlate 462 } 463 } 464 465 // pmp 466 pmp_check(0).req <> ptw.io.pmp.req 467 ptw.io.pmp.resp <> pmp_check(0).resp 468 pmp_check(1).req <> llptw.io.pmp.req 469 llptw.io.pmp.resp <> pmp_check(1).resp 470 pmp_check(2).req <> hptw.io.pmp.req 471 hptw.io.pmp.resp <> pmp_check(2).resp 472 473 llptw_out.ready := outReady(llptw_out.bits.req_info.source, outArbMqPort) 474 475 // hptw and page cache -> ptw and llptw 476 val HptwRespArbCachePort = 0 477 val HptwRespArbHptw = 1 478 hptw_resp_arb.io.in(HptwRespArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.isHptwReq 479 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.id := cache.io.resp.bits.toHptw.id 480 hptw_resp_arb.io.in(HptwRespArbCachePort).bits.resp := cache.io.resp.bits.toHptw.resp 481 hptw_resp_arb.io.in(HptwRespArbHptw).valid := hptw.io.resp.valid 482 hptw_resp_arb.io.in(HptwRespArbHptw).bits.id := hptw.io.resp.bits.id 483 hptw_resp_arb.io.in(HptwRespArbHptw).bits.resp := hptw.io.resp.bits.resp 484 hptw.io.resp.ready := hptw_resp_arb.io.in(HptwRespArbHptw).ready 485 486 ptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id === FsmReqID.U 487 ptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 488 llptw.io.hptw.resp.valid := hptw_resp_arb.io.out.valid && hptw_resp_arb.io.out.bits.id =/= FsmReqID.U 489 llptw.io.hptw.resp.bits.id := hptw_resp_arb.io.out.bits.id 490 llptw.io.hptw.resp.bits.h_resp := hptw_resp_arb.io.out.bits.resp 491 hptw_resp_arb.io.out.ready := true.B 492 493 // Timing: Maybe need to do some optimization or even add one more cycle 494 for (i <- 0 until PtwWidth) { 495 mergeArb(i).in(outArbCachePort).valid := cache.io.resp.valid && cache.io.resp.bits.hit && cache.io.resp.bits.req_info.source===i.U && !cache.io.resp.bits.isHptwReq 496 mergeArb(i).in(outArbCachePort).bits.s2xlate := cache.io.resp.bits.req_info.s2xlate 497 mergeArb(i).in(outArbCachePort).bits.s1 := cache.io.resp.bits.stage1 498 mergeArb(i).in(outArbCachePort).bits.s2 := cache.io.resp.bits.toHptw.resp 499 mergeArb(i).in(outArbFsmPort).valid := ptw.io.resp.valid && ptw.io.resp.bits.source===i.U 500 mergeArb(i).in(outArbFsmPort).bits.s2xlate := ptw.io.resp.bits.s2xlate 501 mergeArb(i).in(outArbFsmPort).bits.s1 := ptw.io.resp.bits.resp 502 mergeArb(i).in(outArbFsmPort).bits.s2 := ptw.io.resp.bits.h_resp 503 mergeArb(i).in(outArbMqPort).valid := llptw_out.valid && llptw_out.bits.req_info.source===i.U 504 mergeArb(i).in(outArbMqPort).bits.s2xlate := llptw_out.bits.req_info.s2xlate 505 mergeArb(i).in(outArbMqPort).bits.s1 := Mux( 506 llptw_out.bits.first_s2xlate_fault, llptw_stage1(llptw_out.bits.id), 507 contiguous_pte_to_merge_ptwResp( 508 resp_pte_sector(llptw_out.bits.id).asUInt, llptw_out.bits.req_info.vpn, llptw_out.bits.af, 509 true, s2xlate = llptw_out.bits.req_info.s2xlate, mPBMTE = mPBMTE, hPBMTE = hPBMTE, gpf = llptw_out.bits.h_resp.gpf 510 ) 511 ) 512 mergeArb(i).in(outArbMqPort).bits.s2 := llptw_out.bits.h_resp 513 mergeArb(i).out.ready := outArb(i).in(0).ready 514 } 515 516 for (i <- 0 until PtwWidth) { 517 outArb(i).in(0).valid := mergeArb(i).out.valid 518 outArb(i).in(0).bits.s2xlate := mergeArb(i).out.bits.s2xlate 519 outArb(i).in(0).bits.s1 := merge_ptwResp_to_sector_ptwResp(mergeArb(i).out.bits.s1) 520 outArb(i).in(0).bits.s2 := mergeArb(i).out.bits.s2 521 } 522 523 // io.tlb.map(_.resp) <> outArb.map(_.out) 524 io.tlb.map(_.resp).zip(outArb.map(_.out)).map{ 525 case (resp, out) => resp <> out 526 } 527 528 // sfence 529 when (flush) { 530 for (i <- 0 until MemReqWidth) { 531 when (waiting_resp(i)) { 532 flush_latch(i) := true.B 533 } 534 } 535 } 536 // mem -> control signal 537 // waiting_resp and sfence_latch will be reset when mem_resp_done 538 when (mem_resp_done) { 539 waiting_resp(mem.d.bits.source) := false.B 540 flush_latch(mem.d.bits.source) := false.B 541 } 542 543 def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = { 544 sink.valid := source.valid && !block_signal 545 source.ready := sink.ready && !block_signal 546 sink.bits := source.bits 547 } 548 549 def get_part(data: Vec[UInt], index: UInt): UInt = { 550 val inner_data = data.asTypeOf(Vec(data.getWidth / XLEN, UInt(XLEN.W))) 551 inner_data(index) 552 } 553 554 // not_super means that this is a normal page 555 // valididx(i) will be all true when super page to be convenient for l1 tlb matching 556 def contiguous_pte_to_merge_ptwResp(pte: UInt, vpn: UInt, af: Bool, af_first: Boolean, s2xlate: UInt, mPBMTE: Bool, hPBMTE: Bool, not_super: Boolean = true, gpf: Bool) : PtwMergeResp = { 557 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 558 val ptw_merge_resp = Wire(new PtwMergeResp()) 559 val hasS2xlate = s2xlate =/= noS2xlate 560 val pbmte = Mux(s2xlate === onlyStage1 || s2xlate === allStage, hPBMTE, mPBMTE) 561 for (i <- 0 until tlbcontiguous) { 562 val pte_in = pte(64 * i + 63, 64 * i).asTypeOf(new PteBundle()) 563 val ptw_resp = Wire(new PtwMergeEntry(tagLen = sectorvpnLen, hasPerm = true, hasLevel = true, hasNapot = true)) 564 ptw_resp.ppn := pte_in.getPPN()(ptePPNLen - 1, sectortlbwidth) 565 ptw_resp.ppn_low := pte_in.getPPN()(sectortlbwidth - 1, 0) 566 ptw_resp.level.map(_ := 0.U) 567 ptw_resp.pbmt := pte_in.pbmt 568 ptw_resp.n.map(_ := pte_in.n) 569 ptw_resp.perm.map(_ := pte_in.getPerm()) 570 ptw_resp.tag := vpn(vpnLen - 1, sectortlbwidth) 571 ptw_resp.pf := (if (af_first) !af else true.B) && (pte_in.isPf(0.U, pbmte) || !pte_in.isLeaf()) 572 ptw_resp.af := (if (!af_first) pte_in.isPf(0.U, pbmte) else true.B) && (af || (Mux(s2xlate === allStage, false.B, pte_in.isAf()) && !(hasS2xlate && gpf))) 573 ptw_resp.v := !ptw_resp.pf 574 ptw_resp.prefetch := DontCare 575 ptw_resp.asid := Mux(hasS2xlate, vsatp.asid, satp.asid) 576 ptw_resp.vmid.map(_ := hgatp.vmid) 577 ptw_merge_resp.entry(i) := ptw_resp 578 } 579 ptw_merge_resp.pteidx := UIntToOH(vpn(sectortlbwidth - 1, 0)).asBools 580 ptw_merge_resp.not_super := not_super.B 581 ptw_merge_resp.not_merge := hasS2xlate 582 ptw_merge_resp 583 } 584 585 def merge_ptwResp_to_sector_ptwResp(pte: PtwMergeResp) : PtwSectorResp = { 586 assert(tlbcontiguous == 8, "Only support tlbcontiguous = 8!") 587 val ptw_sector_resp = Wire(new PtwSectorResp) 588 ptw_sector_resp.entry.tag := pte.entry(OHToUInt(pte.pteidx)).tag 589 ptw_sector_resp.entry.asid := pte.entry(OHToUInt(pte.pteidx)).asid 590 ptw_sector_resp.entry.vmid.map(_ := pte.entry(OHToUInt(pte.pteidx)).vmid.getOrElse(0.U)) 591 ptw_sector_resp.entry.ppn := pte.entry(OHToUInt(pte.pteidx)).ppn 592 ptw_sector_resp.entry.pbmt := pte.entry(OHToUInt(pte.pteidx)).pbmt 593 ptw_sector_resp.entry.n.map(_ := pte.entry(OHToUInt(pte.pteidx)).n.getOrElse(0.U)) 594 ptw_sector_resp.entry.perm.map(_ := pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle))) 595 ptw_sector_resp.entry.level.map(_ := pte.entry(OHToUInt(pte.pteidx)).level.getOrElse(0.U(log2Up(Level + 1).W))) 596 ptw_sector_resp.entry.prefetch := pte.entry(OHToUInt(pte.pteidx)).prefetch 597 ptw_sector_resp.entry.v := pte.entry(OHToUInt(pte.pteidx)).v 598 ptw_sector_resp.af := pte.entry(OHToUInt(pte.pteidx)).af 599 ptw_sector_resp.pf := pte.entry(OHToUInt(pte.pteidx)).pf 600 ptw_sector_resp.addr_low := OHToUInt(pte.pteidx) 601 ptw_sector_resp.pteidx := pte.pteidx 602 for (i <- 0 until tlbcontiguous) { 603 val ppn_equal = pte.entry(i).ppn === pte.entry(OHToUInt(pte.pteidx)).ppn 604 val pbmt_equal = pte.entry(i).pbmt === pte.entry(OHToUInt(pte.pteidx)).pbmt 605 val n_equal = pte.entry(i).n.getOrElse(0.U) === pte.entry(OHToUInt(pte.pteidx)).n.getOrElse(0.U) 606 val perm_equal = pte.entry(i).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt === pte.entry(OHToUInt(pte.pteidx)).perm.getOrElse(0.U.asTypeOf(new PtePermBundle)).asUInt 607 val v_equal = pte.entry(i).v === pte.entry(OHToUInt(pte.pteidx)).v 608 val af_equal = pte.entry(i).af === pte.entry(OHToUInt(pte.pteidx)).af 609 val pf_equal = pte.entry(i).pf === pte.entry(OHToUInt(pte.pteidx)).pf 610 ptw_sector_resp.valididx(i) := ((ppn_equal && pbmt_equal && n_equal && perm_equal && v_equal && af_equal && pf_equal) || !pte.not_super) && !pte.not_merge 611 ptw_sector_resp.ppn_low(i) := pte.entry(i).ppn_low 612 } 613 ptw_sector_resp.valididx(OHToUInt(pte.pteidx)) := true.B 614 ptw_sector_resp 615 } 616 617 def outReady(source: UInt, port: Int): Bool = { 618 MuxLookup(source, true.B)((0 until PtwWidth).map(i => i.U -> mergeArb(i).in(port).ready)) 619 } 620 621 // debug info 622 for (i <- 0 until PtwWidth) { 623 XSDebug(p"[io.tlb(${i.U})] ${io.tlb(i)}\n") 624 } 625 XSDebug(p"[sfence] ${io.sfence}\n") 626 XSDebug(p"[io.csr.tlb] ${io.csr.tlb}\n") 627 628 for (i <- 0 until PtwWidth) { 629 XSPerfAccumulate(s"req_count${i}", io.tlb(i).req(0).fire) 630 XSPerfAccumulate(s"req_blocked_count_${i}", io.tlb(i).req(0).valid && !io.tlb(i).req(0).ready) 631 } 632 XSPerfAccumulate(s"req_blocked_by_mq", arb1.io.out.valid && missQueue.io.out.valid) 633 for (i <- 0 until (MemReqWidth + 1)) { 634 XSPerfAccumulate(s"mem_req_util${i}", PopCount(waiting_resp) === i.U) 635 } 636 XSPerfAccumulate("mem_cycle", PopCount(waiting_resp) =/= 0.U) 637 XSPerfAccumulate("mem_count", mem.a.fire) 638 for (i <- 0 until PtwWidth) { 639 XSPerfAccumulate(s"llptw_ppn_af${i}", mergeArb(i).in(outArbMqPort).valid && mergeArb(i).in(outArbMqPort).bits.s1.entry(OHToUInt(mergeArb(i).in(outArbMqPort).bits.s1.pteidx)).af && !llptw_out.bits.af) 640 XSPerfAccumulate(s"access_fault${i}", io.tlb(i).resp.fire && io.tlb(i).resp.bits.s1.af) 641 } 642 643 // print configs 644 println(s"${l2tlbParams.name}: a ptw, a llptw with size ${l2tlbParams.llptwsize}, miss queue size ${MissQueueSize} l2:${l2tlbParams.l2Size} fa l1: nSets ${l2tlbParams.l1nSets} nWays ${l2tlbParams.l1nWays} l0: ${l2tlbParams.l0nSets} nWays ${l2tlbParams.l0nWays} blockBytes:${l2tlbParams.blockBytes}") 645 646 val perfEvents = Seq(llptw, cache, ptw).flatMap(_.getPerfEvents) 647 generatePerfEvent() 648 649 val isWriteL1TlbTable = Constantin.createRecord(s"isWriteL1TlbTable$hartId") 650 val L1TlbTable = ChiselDB.createTable(s"L1Tlb_hart$hartId", new L1TlbDB) 651 val ITlbReqDB, DTlbReqDB, ITlbRespDB, DTlbRespDB = Wire(new L1TlbDB) 652 ITlbReqDB.vpn := io.tlb(0).req(0).bits.vpn 653 DTlbReqDB.vpn := io.tlb(1).req(0).bits.vpn 654 ITlbRespDB.vpn := io.tlb(0).resp.bits.s1.entry.tag 655 DTlbRespDB.vpn := io.tlb(1).resp.bits.s1.entry.tag 656 L1TlbTable.log(ITlbReqDB, isWriteL1TlbTable.orR && io.tlb(0).req(0).fire, "ITlbReq", clock, reset) 657 L1TlbTable.log(DTlbReqDB, isWriteL1TlbTable.orR && io.tlb(1).req(0).fire, "DTlbReq", clock, reset) 658 L1TlbTable.log(ITlbRespDB, isWriteL1TlbTable.orR && io.tlb(0).resp.fire, "ITlbResp", clock, reset) 659 L1TlbTable.log(DTlbRespDB, isWriteL1TlbTable.orR && io.tlb(1).resp.fire, "DTlbResp", clock, reset) 660 661 val isWritePageCacheTable = Constantin.createRecord(s"isWritePageCacheTable$hartId") 662 val PageCacheTable = ChiselDB.createTable(s"PageCache_hart$hartId", new PageCacheDB) 663 val PageCacheDB = Wire(new PageCacheDB) 664 PageCacheDB.vpn := Cat(cache.io.resp.bits.stage1.entry(0).tag, OHToUInt(cache.io.resp.bits.stage1.pteidx)) 665 PageCacheDB.source := cache.io.resp.bits.req_info.source 666 PageCacheDB.bypassed := cache.io.resp.bits.bypassed 667 PageCacheDB.is_first := cache.io.resp.bits.isFirst 668 PageCacheDB.prefetched := cache.io.resp.bits.stage1.entry(0).prefetch 669 PageCacheDB.prefetch := cache.io.resp.bits.prefetch 670 PageCacheDB.l2Hit := cache.io.resp.bits.toFsm.l2Hit 671 PageCacheDB.l1Hit := cache.io.resp.bits.toFsm.l1Hit 672 PageCacheDB.hit := cache.io.resp.bits.hit 673 PageCacheTable.log(PageCacheDB, isWritePageCacheTable.orR && cache.io.resp.fire, "PageCache", clock, reset) 674 675 val isWritePTWTable = Constantin.createRecord(s"isWritePTWTable$hartId") 676 val PTWTable = ChiselDB.createTable(s"PTW_hart$hartId", new PTWDB) 677 val PTWReqDB, PTWRespDB, LLPTWReqDB, LLPTWRespDB = Wire(new PTWDB) 678 PTWReqDB.vpn := ptw.io.req.bits.req_info.vpn 679 PTWReqDB.source := ptw.io.req.bits.req_info.source 680 PTWRespDB.vpn := ptw.io.refill.req_info.vpn 681 PTWRespDB.source := ptw.io.refill.req_info.source 682 LLPTWReqDB.vpn := llptw.io.in.bits.req_info.vpn 683 LLPTWReqDB.source := llptw.io.in.bits.req_info.source 684 LLPTWRespDB.vpn := llptw.io.mem.refill.vpn 685 LLPTWRespDB.source := llptw.io.mem.refill.source 686 PTWTable.log(PTWReqDB, isWritePTWTable.orR && ptw.io.req.fire, "PTWReq", clock, reset) 687 PTWTable.log(PTWRespDB, isWritePTWTable.orR && ptw.io.mem.resp.fire, "PTWResp", clock, reset) 688 PTWTable.log(LLPTWReqDB, isWritePTWTable.orR && llptw.io.in.fire, "LLPTWReq", clock, reset) 689 PTWTable.log(LLPTWRespDB, isWritePTWTable.orR && llptw.io.mem.resp.fire, "LLPTWResp", clock, reset) 690 691 val isWriteL2TlbMissQueueTable = Constantin.createRecord(s"isWriteL2TlbMissQueueTable$hartId") 692 val L2TlbMissQueueTable = ChiselDB.createTable(s"L2TlbMissQueue_hart$hartId", new L2TlbMissQueueDB) 693 val L2TlbMissQueueInDB, L2TlbMissQueueOutDB = Wire(new L2TlbMissQueueDB) 694 L2TlbMissQueueInDB.vpn := missQueue.io.in.bits.req_info.vpn 695 L2TlbMissQueueOutDB.vpn := missQueue.io.out.bits.req_info.vpn 696 L2TlbMissQueueTable.log(L2TlbMissQueueInDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.in.fire, "L2TlbMissQueueIn", clock, reset) 697 L2TlbMissQueueTable.log(L2TlbMissQueueOutDB, isWriteL2TlbMissQueueTable.orR && missQueue.io.out.fire, "L2TlbMissQueueOut", clock, reset) 698} 699 700/** BlockHelper, block missqueue, not to send too many req to cache 701 * Parameter: 702 * enable: enable BlockHelper, mq should not send too many reqs 703 * start: when miss queue out fire and need, block miss queue's out 704 * block: block miss queue's out 705 * latency: last missqueue out's cache access latency 706 */ 707class BlockHelper(latency: Int)(implicit p: Parameters) extends XSModule { 708 val io = IO(new Bundle { 709 val enable = Input(Bool()) 710 val start = Input(Bool()) 711 val block = Output(Bool()) 712 }) 713 714 val count = RegInit(0.U(log2Ceil(latency).W)) 715 val valid = RegInit(false.B) 716 val work = RegInit(true.B) 717 718 io.block := valid 719 720 when (io.start && work) { valid := true.B } 721 when (valid) { count := count + 1.U } 722 when (count === (latency.U) || io.enable) { 723 valid := false.B 724 work := io.enable 725 count := 0.U 726 } 727} 728 729class PTEHelper() extends ExtModule { 730 val clock = IO(Input(Clock())) 731 val enable = IO(Input(Bool())) 732 val satp = IO(Input(UInt(64.W))) 733 val vpn = IO(Input(UInt(64.W))) 734 val pte = IO(Output(UInt(64.W))) 735 val level = IO(Output(UInt(8.W))) 736 val pf = IO(Output(UInt(8.W))) 737} 738 739class PTWDelayN[T <: Data](gen: T, n: Int, flush: Bool) extends Module { 740 val io = IO(new Bundle() { 741 val in = Input(gen) 742 val out = Output(gen) 743 val ptwflush = Input(flush.cloneType) 744 }) 745 val out = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 746 val t = RegInit(VecInit(Seq.fill(n)(0.U.asTypeOf(gen)))) 747 out(0) := io.in 748 if (n == 1) { 749 io.out := out(0) 750 } else { 751 when (io.ptwflush) { 752 for (i <- 0 until n) { 753 t(i) := 0.U.asTypeOf(gen) 754 out(i) := 0.U.asTypeOf(gen) 755 } 756 io.out := 0.U.asTypeOf(gen) 757 } .otherwise { 758 for (i <- 1 until n) { 759 t(i-1) := out(i-1) 760 out(i) := t(i-1) 761 } 762 io.out := out(n-1) 763 } 764 } 765} 766 767object PTWDelayN { 768 def apply[T <: Data](in: T, n: Int, flush: Bool): T = { 769 val delay = Module(new PTWDelayN(in.cloneType, n, flush)) 770 delay.io.in := in 771 delay.io.ptwflush := flush 772 delay.io.out 773 } 774} 775 776class FakePTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 777 val io = IO(new L2TLBIO) 778 val flush = VecInit(Seq.fill(PtwWidth)(false.B)) 779 flush(0) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, itlbParams.fenceDelay) 780 flush(1) := DelayN(io.sfence.valid || io.csr.tlb.satp.changed, ldtlbParams.fenceDelay) 781 for (i <- 0 until PtwWidth) { 782 val helper = Module(new PTEHelper()) 783 helper.clock := clock 784 helper.satp := io.csr.tlb.satp.ppn 785 786 if (coreParams.softPTWDelay == 1) { 787 helper.enable := io.tlb(i).req(0).fire 788 helper.vpn := io.tlb(i).req(0).bits.vpn 789 } else { 790 helper.enable := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay - 1, flush(i)) 791 helper.vpn := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay - 1, flush(i)) 792 } 793 794 val pte = helper.pte.asTypeOf(new PteBundle) 795 val level = helper.level 796 val pf = helper.pf 797 val empty = RegInit(true.B) 798 when (io.tlb(i).req(0).fire) { 799 empty := false.B 800 } .elsewhen (io.tlb(i).resp.fire || flush(i)) { 801 empty := true.B 802 } 803 804 io.tlb(i).req(0).ready := empty || io.tlb(i).resp.fire 805 io.tlb(i).resp.valid := PTWDelayN(io.tlb(i).req(0).fire, coreParams.softPTWDelay, flush(i)) 806 assert(!io.tlb(i).resp.valid || io.tlb(i).resp.ready) 807 io.tlb(i).resp.bits.s1.entry.tag := PTWDelayN(io.tlb(i).req(0).bits.vpn, coreParams.softPTWDelay, flush(i)) 808 io.tlb(i).resp.bits.s1.entry.pbmt := pte.pbmt 809 io.tlb(i).resp.bits.s1.entry.ppn := pte.ppn 810 io.tlb(i).resp.bits.s1.entry.perm.map(_ := pte.getPerm()) 811 io.tlb(i).resp.bits.s1.entry.level.map(_ := level) 812 io.tlb(i).resp.bits.s1.pf := pf 813 io.tlb(i).resp.bits.s1.af := DontCare // TODO: implement it 814 io.tlb(i).resp.bits.s1.entry.v := !pf 815 io.tlb(i).resp.bits.s1.entry.prefetch := DontCare 816 io.tlb(i).resp.bits.s1.entry.asid := io.csr.tlb.satp.asid 817 } 818} 819 820class L2TLBWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter { 821 override def shouldBeInlined: Boolean = false 822 val useSoftPTW = coreParams.softPTW 823 val node = if (!useSoftPTW) TLIdentityNode() else null 824 val ptw = if (!useSoftPTW) LazyModule(new L2TLB()) else null 825 if (!useSoftPTW) { 826 node := ptw.node 827 } 828 829 class L2TLBWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents { 830 val io = IO(new L2TLBIO) 831 val perfEvents = if (useSoftPTW) { 832 val fake_ptw = Module(new FakePTW()) 833 io <> fake_ptw.io 834 Seq() 835 } 836 else { 837 io <> ptw.module.io 838 ptw.module.getPerfEvents 839 } 840 generatePerfEvent() 841 } 842 843 lazy val module = new L2TLBWrapperImp(this) 844} 845