1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 24import utils._ 25import utility._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/** Page Table Walk is divided into two parts 31 * One, PTW: page walk for pde, except for leaf entries, one by one 32 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 33 */ 34 35 36/** PTW : page table walker 37 * a finite state machine 38 * only take 1GB and 2MB page walks 39 * or in other words, except the last level(leaf) 40 **/ 41class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 42 val req = Flipped(DecoupledIO(new Bundle { 43 val req_info = new L2TlbInnerBundle() 44 val l1Hit = Bool() 45 val ppn = UInt(gvpnLen.W) 46 val stage1Hit = Bool() 47 val stage1 = new PtwMergeResp 48 })) 49 val resp = DecoupledIO(new Bundle { 50 val source = UInt(bSourceWidth.W) 51 val s2xlate = UInt(2.W) 52 val resp = new PtwMergeResp 53 val h_resp = new HptwResp 54 }) 55 56 val llptw = DecoupledIO(new LLPTWInBundle()) 57 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 58 // to avoid corner case that caused duplicate entries 59 60 val hptw = new Bundle { 61 val req = DecoupledIO(new Bundle { 62 val source = UInt(bSourceWidth.W) 63 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 64 val gvpn = UInt(vpnLen.W) 65 }) 66 val resp = Flipped(Valid(new Bundle { 67 val h_resp = Output(new HptwResp) 68 })) 69 } 70 val mem = new Bundle { 71 val req = DecoupledIO(new L2TlbMemReqBundle()) 72 val resp = Flipped(ValidIO(UInt(XLEN.W))) 73 val mask = Input(Bool()) 74 } 75 val pmp = new Bundle { 76 val req = ValidIO(new PMPReqBundle()) 77 val resp = Flipped(new PMPRespBundle()) 78 } 79 80 val refill = Output(new Bundle { 81 val req_info = new L2TlbInnerBundle() 82 val level = UInt(log2Up(Level).W) 83 }) 84} 85 86class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 87 val io = IO(new PTWIO) 88 val sfence = io.sfence 89 val mem = io.mem 90 val req_s2xlate = Reg(UInt(2.W)) 91 val enableS2xlate = req_s2xlate =/= noS2xlate 92 val onlyS1xlate = req_s2xlate === onlyStage1 93 val onlyS2xlate = req_s2xlate === onlyStage2 94 95 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 96 val hgatp = io.csr.hgatp 97 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 98 val s2xlate = enableS2xlate && !onlyS1xlate 99 val level = RegInit(0.U(log2Up(Level).W)) 100 val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level 101 val ppn = Reg(UInt(gvpnLen.W)) 102 val vpn = Reg(UInt(vpnLen.W)) // vpn or gvpn(onlyS2xlate) 103 val levelNext = level + 1.U 104 val l1Hit = Reg(Bool()) 105 val pte = mem.resp.bits.asTypeOf(new PteBundle().cloneType) 106 107 // s/w register 108 val s_pmp_check = RegInit(true.B) 109 val s_mem_req = RegInit(true.B) 110 val s_llptw_req = RegInit(true.B) 111 val w_mem_resp = RegInit(true.B) 112 val s_hptw_req = RegInit(true.B) 113 val w_hptw_resp = RegInit(true.B) 114 val s_last_hptw_req = RegInit(true.B) 115 val w_last_hptw_resp = RegInit(true.B) 116 // for updating "level" 117 val mem_addr_update = RegInit(false.B) 118 119 val idle = RegInit(true.B) 120 val finish = WireInit(false.B) 121 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish 122 123 val pageFault = pte.isPf(level) 124 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 125 126 val hptw_pageFault = RegInit(false.B) 127 val hptw_accessFault = RegInit(false.B) 128 val last_s2xlate = RegInit(false.B) 129 val stage1Hit = RegEnable(io.req.bits.stage1Hit, io.req.fire) 130 val stage1 = RegEnable(io.req.bits.stage1, io.req.fire) 131 val hptw_resp_stage2 = Reg(Bool()) 132 133 val ppn_af = Mux(s2xlate, pte.isStage1Af(), pte.isAf()) // In two-stage address translation, stage 1 ppn is a vpn for host, so don't need to check ppn_high 134 val find_pte = pte.isLeaf() || ppn_af || pageFault 135 val to_find_pte = level === 1.U && find_pte === false.B 136 val source = RegEnable(io.req.bits.req_info.source, io.req.fire) 137 138 val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2)) 139 val l2addr = MakeAddr(Mux(l1Hit, ppn, pte.getPPN()), getVpnn(vpn, 1)) 140 val mem_addr = Mux(af_level === 0.U, l1addr, l2addr) 141 142 val hptw_resp = RegEnable(io.hptw.resp.bits.h_resp, io.hptw.resp.fire) 143 val gpaddr = MuxCase(mem_addr, Seq( 144 stage1Hit -> Cat(stage1.genPPN(), 0.U(offLen.W)), 145 onlyS2xlate -> Cat(vpn, 0.U(offLen.W)), 146 !s_last_hptw_req -> Cat(MuxLookup(level, pte.getPPN())(Seq( 147 0.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen * 2), vpn(vpnnLen * 2 - 1, 0)), 148 1.U -> Cat(pte.getPPN()(gvpnLen - 1, vpnnLen), vpn(vpnnLen - 1, 0) 149 ))), 150 0.U(offLen.W)) 151 )) 152 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 153 154 io.req.ready := idle 155 val ptw_resp = Wire(new PtwMergeResp) 156 ptw_resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level,level), pte, vpn, satp.asid, hgatp.asid, vpn(sectortlbwidth - 1, 0), not_super = false) 157 158 val normal_resp = idle === false.B && mem_addr_update && !last_s2xlate && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault) || onlyS2xlate) 159 val stageHit_resp = idle === false.B && hptw_resp_stage2 160 io.resp.valid := Mux(stage1Hit, stageHit_resp, normal_resp) 161 io.resp.bits.source := source 162 io.resp.bits.resp := Mux(stage1Hit, stage1, ptw_resp) 163 io.resp.bits.h_resp := hptw_resp 164 io.resp.bits.s2xlate := req_s2xlate 165 166 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault 167 io.llptw.bits.req_info.source := source 168 io.llptw.bits.req_info.vpn := vpn 169 io.llptw.bits.req_info.s2xlate := req_s2xlate 170 io.llptw.bits.ppn := DontCare 171 172 io.pmp.req.valid := DontCare // samecycle, do not use valid 173 io.pmp.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 174 io.pmp.req.bits.size := 3.U // TODO: fix it 175 io.pmp.req.bits.cmd := TlbCmd.read 176 177 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 178 mem.req.bits.addr := Mux(s2xlate, hpaddr, mem_addr) 179 mem.req.bits.id := FsmReqID.U(bMemID.W) 180 mem.req.bits.hptw_bypassed := false.B 181 182 io.refill.req_info.s2xlate := Mux(enableS2xlate, onlyStage1, req_s2xlate) // ptw refill the pte of stage 1 when s2xlate is enabled 183 io.refill.req_info.vpn := vpn 184 io.refill.level := level 185 io.refill.req_info.source := source 186 187 io.hptw.req.valid := !s_hptw_req || !s_last_hptw_req 188 io.hptw.req.bits.id := FsmReqID.U(bMemID.W) 189 io.hptw.req.bits.gvpn := get_pn(gpaddr) 190 io.hptw.req.bits.source := source 191 192 when (io.req.fire && io.req.bits.stage1Hit){ 193 idle := false.B 194 req_s2xlate := io.req.bits.req_info.s2xlate 195 s_hptw_req := false.B 196 hptw_resp_stage2 := false.B 197 } 198 199 when (io.hptw.resp.fire && w_hptw_resp === false.B && stage1Hit){ 200 w_hptw_resp := true.B 201 hptw_resp_stage2 := true.B 202 } 203 204 when (io.resp.fire && stage1Hit){ 205 idle := true.B 206 } 207 208 when (io.req.fire && !io.req.bits.stage1Hit){ 209 val req = io.req.bits 210 level := Mux(req.l1Hit, 1.U, 0.U) 211 af_level := Mux(req.l1Hit, 1.U, 0.U) 212 ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn) 213 vpn := io.req.bits.req_info.vpn 214 l1Hit := req.l1Hit 215 accessFault := false.B 216 idle := false.B 217 hptw_pageFault := false.B 218 req_s2xlate := io.req.bits.req_info.s2xlate 219 when(io.req.bits.req_info.s2xlate =/= noS2xlate && io.req.bits.req_info.s2xlate =/= onlyStage1){ 220 last_s2xlate := true.B 221 s_hptw_req := false.B 222 }.otherwise { 223 s_pmp_check := false.B 224 } 225 } 226 227 when(io.hptw.req.fire && s_hptw_req === false.B){ 228 s_hptw_req := true.B 229 w_hptw_resp := false.B 230 } 231 232 when(io.hptw.resp.fire && w_hptw_resp === false.B && !stage1Hit) { 233 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 234 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 235 w_hptw_resp := true.B 236 when(onlyS2xlate){ 237 mem_addr_update := true.B 238 last_s2xlate := false.B 239 }.otherwise { 240 s_pmp_check := false.B 241 } 242 } 243 244 when(io.hptw.req.fire && s_last_hptw_req === false.B) { 245 w_last_hptw_resp := false.B 246 s_last_hptw_req := true.B 247 } 248 249 when(io.hptw.resp.fire && w_last_hptw_resp === false.B){ 250 hptw_pageFault := io.hptw.resp.bits.h_resp.gpf 251 hptw_accessFault := io.hptw.resp.bits.h_resp.gaf 252 w_last_hptw_resp := true.B 253 mem_addr_update := true.B 254 last_s2xlate := false.B 255 } 256 257 when(sent_to_pmp && mem_addr_update === false.B){ 258 s_mem_req := false.B 259 s_pmp_check := true.B 260 } 261 262 when(accessFault && idle === false.B){ 263 s_pmp_check := true.B 264 s_mem_req := true.B 265 w_mem_resp := true.B 266 s_llptw_req := true.B 267 s_hptw_req := true.B 268 w_hptw_resp := true.B 269 s_last_hptw_req := true.B 270 w_last_hptw_resp := true.B 271 mem_addr_update := true.B 272 last_s2xlate := false.B 273 } 274 275 when (mem.req.fire){ 276 s_mem_req := true.B 277 w_mem_resp := false.B 278 } 279 280 when(mem.resp.fire && w_mem_resp === false.B){ 281 w_mem_resp := true.B 282 af_level := af_level + 1.U 283 s_llptw_req := false.B 284 mem_addr_update := true.B 285 } 286 287 when(mem_addr_update){ 288 when(level === 0.U && !onlyS2xlate && !(find_pte || accessFault)){ 289 level := levelNext 290 when(s2xlate){ 291 s_hptw_req := false.B 292 }.otherwise{ 293 s_mem_req := false.B 294 } 295 s_llptw_req := true.B 296 mem_addr_update := false.B 297 }.elsewhen(io.llptw.valid){ 298 when(io.llptw.fire) { 299 idle := true.B 300 s_llptw_req := true.B 301 mem_addr_update := false.B 302 last_s2xlate := false.B 303 } 304 finish := true.B 305 }.elsewhen(s2xlate && last_s2xlate === true.B) { 306 when(accessFault || pageFault || ppn_af){ 307 last_s2xlate := false.B 308 }.otherwise{ 309 s_last_hptw_req := false.B 310 mem_addr_update := false.B 311 } 312 }.elsewhen(io.resp.valid){ 313 when(io.resp.fire) { 314 idle := true.B 315 s_llptw_req := true.B 316 mem_addr_update := false.B 317 accessFault := false.B 318 } 319 finish := true.B 320 } 321 } 322 323 324 when (flush) { 325 idle := true.B 326 s_pmp_check := true.B 327 s_mem_req := true.B 328 s_llptw_req := true.B 329 w_mem_resp := true.B 330 accessFault := false.B 331 mem_addr_update := false.B 332 s_hptw_req := true.B 333 w_hptw_resp := true.B 334 s_last_hptw_req := true.B 335 w_last_hptw_resp := true.B 336 } 337 338 339 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 340 341 // perf 342 XSPerfAccumulate("fsm_count", io.req.fire) 343 for (i <- 0 until PtwWidth) { 344 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire && io.req.bits.req_info.source === i.U) 345 } 346 XSPerfAccumulate("fsm_busy", !idle) 347 XSPerfAccumulate("fsm_idle", idle) 348 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 349 XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af) 350 XSPerfAccumulate("mem_count", mem.req.fire) 351 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire, true)) 352 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 353 354 TimeOutAssert(!idle, timeOutThreshold, "page table walker time out") 355 356 val perfEvents = Seq( 357 ("fsm_count ", io.req.fire ), 358 ("fsm_busy ", !idle ), 359 ("fsm_idle ", idle ), 360 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 361 ("mem_count ", mem.req.fire ), 362 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire, true)), 363 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 364 ) 365 generatePerfEvent() 366} 367 368/*========================= LLPTW ==============================*/ 369 370/** LLPTW : Last Level Page Table Walker 371 * the page walker that only takes 4KB(last level) page walk. 372 **/ 373 374class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 375 val req_info = Output(new L2TlbInnerBundle()) 376 val ppn = Output(UInt(gvpnLen.W)) 377} 378 379class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 380 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 381 val out = DecoupledIO(new Bundle { 382 val req_info = Output(new L2TlbInnerBundle()) 383 val id = Output(UInt(bMemID.W)) 384 val h_resp = Output(new HptwResp) 385 val af = Output(Bool()) 386 }) 387 val mem = new Bundle { 388 val req = DecoupledIO(new L2TlbMemReqBundle()) 389 val resp = Flipped(Valid(new Bundle { 390 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 391 val value = Output(UInt(blockBits.W)) 392 })) 393 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 394 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 395 val refill = Output(new L2TlbInnerBundle()) 396 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 397 } 398 val cache = DecoupledIO(new L2TlbInnerBundle()) 399 val pmp = new Bundle { 400 val req = Valid(new PMPReqBundle()) 401 val resp = Flipped(new PMPRespBundle()) 402 } 403 val hptw = new Bundle { 404 val req = DecoupledIO(new Bundle{ 405 val source = UInt(bSourceWidth.W) 406 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 407 val gvpn = UInt(vpnLen.W) 408 }) 409 val resp = Flipped(Valid(new Bundle { 410 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 411 val h_resp = Output(new HptwResp) 412 })) 413 } 414} 415 416class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 417 val req_info = new L2TlbInnerBundle() 418 val ppn = UInt(gvpnLen.W) 419 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 420 val af = Bool() 421 val hptw_resp = new HptwResp() 422} 423 424 425class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 426 val io = IO(new LLPTWIO()) 427 val enableS2xlate = io.in.bits.req_info.s2xlate =/= noS2xlate 428 val satp = Mux(enableS2xlate, io.csr.vsatp, io.csr.satp) 429 430 val flush = io.sfence.valid || io.csr.satp.changed || io.csr.vsatp.changed || io.csr.hgatp.changed 431 val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry())) 432 val state_idle :: state_hptw_req :: state_hptw_resp :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_last_hptw_req :: state_last_hptw_resp :: state_cache :: Nil = Enum(10) 433 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 434 435 val is_emptys = state.map(_ === state_idle) 436 val is_mems = state.map(_ === state_mem_req) 437 val is_waiting = state.map(_ === state_mem_waiting) 438 val is_having = state.map(_ === state_mem_out) 439 val is_cache = state.map(_ === state_cache) 440 val is_hptw_req = state.map(_ === state_hptw_req) 441 val is_last_hptw_req = state.map(_ === state_last_hptw_req) 442 val is_hptw_resp = state.map(_ === state_hptw_resp) 443 val is_last_hptw_resp = state.map(_ === state_last_hptw_resp) 444 445 val full = !ParallelOR(is_emptys).asBool 446 val enq_ptr = ParallelPriorityEncoder(is_emptys) 447 448 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 449 val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 450 for (i <- 0 until l2tlbParams.llptwsize) { 451 mem_arb.io.in(i).bits := entries(i) 452 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 453 } 454 455 // process hptw requests in serial 456 val hyper_arb1 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 457 for (i <- 0 until l2tlbParams.llptwsize) { 458 hyper_arb1.io.in(i).bits := entries(i) 459 hyper_arb1.io.in(i).valid := is_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 460 } 461 val hyper_arb2 = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 462 for(i <- 0 until l2tlbParams.llptwsize) { 463 hyper_arb2.io.in(i).bits := entries(i) 464 hyper_arb2.io.in(i).valid := is_last_hptw_req(i) && !(Cat(is_hptw_resp).orR) && !(Cat(is_last_hptw_resp).orR) 465 } 466 467 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 468 469 // duplicate req 470 // to_wait: wait for the last to access mem, set to mem_resp 471 // to_cache: the last is back just right now, set to mem_cache 472 val dup_vec = state.indices.map(i => 473 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) && io.in.bits.req_info.s2xlate === entries(i).req_info.s2xlate 474 ) 475 val dup_req_fire = mem_arb.io.out.fire && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) && io.in.bits.req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate // dup with the req fire entry 476 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already 477 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 478 val dup_vec_last_hptw = dup_vec.zipWithIndex.map{case (d, i) => d && (is_last_hptw_req(i) || is_last_hptw_resp(i))} 479 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 480 val dup_wait_resp = io.mem.resp.fire && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 481 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 482 val to_mem_out = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === noS2xlate 483 val to_cache = Cat(dup_vec_having).orR || Cat(dup_vec_last_hptw).orR 484 val to_hptw_req = io.in.bits.req_info.s2xlate === allStage 485 val to_last_hptw_req = dup_wait_resp && entries(io.mem.resp.bits.id).req_info.s2xlate === allStage 486 val last_hptw_req_id = io.mem.resp.bits.id 487 val req_paddr = MakeAddr(io.in.bits.ppn(ppnLen-1, 0), getVpnn(io.in.bits.req_info.vpn, 0)) 488 val req_hpaddr = MakeAddr(entries(last_hptw_req_id).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(io.in.bits.req_info.vpn, 0)) 489 val index = Mux(entries(last_hptw_req_id).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 490 val last_hptw_req_ppn = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle()))(index).getPPN() 491 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 492 493 XSError(io.in.fire && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 494 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 495 val enq_state_normal = MuxCase(state_addr_check, Seq( 496 to_mem_out -> state_mem_out, // same to the blew, but the mem resp now 497 to_last_hptw_req -> state_last_hptw_req, 498 to_wait -> state_mem_waiting, 499 to_cache -> state_cache, 500 to_hptw_req -> state_hptw_req 501 )) 502 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 503 when (io.in.fire) { 504 // if prefetch req does not need mem access, just give it up. 505 // so there will be at most 1 + FilterSize entries that needs re-access page cache 506 // so 2 + FilterSize is enough to avoid dead-lock 507 state(enq_ptr) := enq_state 508 entries(enq_ptr).req_info := io.in.bits.req_info 509 entries(enq_ptr).ppn := Mux(to_last_hptw_req, last_hptw_req_ppn, io.in.bits.ppn) 510 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 511 entries(enq_ptr).af := false.B 512 entries(enq_ptr).hptw_resp := Mux(to_last_hptw_req, entries(last_hptw_req_id).hptw_resp, Mux(to_wait, entries(wait_id).hptw_resp, entries(enq_ptr).hptw_resp)) 513 mem_resp_hit(enq_ptr) := to_mem_out || to_last_hptw_req 514 } 515 516 val enq_ptr_reg = RegNext(enq_ptr) 517 val need_addr_check = GatedValidRegNext(enq_state === state_addr_check && io.in.fire && !flush) 518 519 val hasHptwResp = ParallelOR(state.map(_ === state_hptw_resp)).asBool 520 val hptw_resp_ptr_reg = RegNext(io.hptw.resp.bits.id) 521 val hptw_need_addr_check = RegNext(hasHptwResp && io.hptw.resp.fire && !flush) && state(hptw_resp_ptr_reg) === state_addr_check 522 523 val ptes = io.mem.resp.bits.value.asTypeOf(Vec(blockBits / XLEN, new PteBundle())) 524 val gpaddr = MakeGPAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)) 525 val hptw_resp = entries(hptw_resp_ptr_reg).hptw_resp 526 val hpaddr = Cat(hptw_resp.genPPNS2(get_pn(gpaddr)), get_off(gpaddr)) 527 val addr = RegEnable(MakeAddr(io.in.bits.ppn(ppnLen - 1, 0), getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire) 528 io.pmp.req.valid := need_addr_check || hptw_need_addr_check 529 io.pmp.req.bits.addr := Mux(hptw_need_addr_check, hpaddr, addr) 530 io.pmp.req.bits.cmd := TlbCmd.read 531 io.pmp.req.bits.size := 3.U // TODO: fix it 532 val pmp_resp_valid = io.pmp.req.valid // same cycle 533 when (pmp_resp_valid) { 534 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 535 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 536 val ptr = Mux(hptw_need_addr_check, hptw_resp_ptr_reg, enq_ptr_reg); 537 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 538 entries(ptr).af := accessFault 539 state(ptr) := Mux(accessFault, state_mem_out, state_mem_req) 540 } 541 542 when (mem_arb.io.out.fire) { 543 for (i <- state.indices) { 544 when (state(i) =/= state_idle && state(i) =/= state_mem_out && state(i) =/= state_last_hptw_req && state(i) =/= state_last_hptw_resp 545 && entries(i).req_info.s2xlate === mem_arb.io.out.bits.req_info.s2xlate 546 && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 547 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 548 state(i) := state_mem_waiting 549 entries(i).hptw_resp := entries(mem_arb.io.chosen).hptw_resp 550 entries(i).wait_id := mem_arb.io.chosen 551 } 552 } 553 } 554 when (io.mem.resp.fire) { 555 state.indices.map{i => 556 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 557 state(i) := Mux(entries(i).req_info.s2xlate === allStage, state_last_hptw_req, state_mem_out) 558 mem_resp_hit(i) := true.B 559 val req_paddr = MakeAddr(entries(i).ppn, getVpnn(entries(i).req_info.vpn, 0)) 560 val req_hpaddr = MakeAddr(entries(i).hptw_resp.genPPNS2(get_pn(req_paddr)), getVpnn(entries(i).req_info.vpn, 0)) 561 val index = Mux(entries(i).req_info.s2xlate === allStage, req_hpaddr, req_paddr)(log2Up(l2tlbParams.blockBytes)-1, log2Up(XLEN/8)) 562 entries(i).ppn := ptes(index).getPPN() // for last stage 2 translation 563 } 564 } 565 } 566 567 when (hyper_arb1.io.out.fire) { 568 for (i <- state.indices) { 569 when (state(i) === state_hptw_req && entries(i).ppn === hyper_arb1.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb1.io.chosen === i.U) { 570 state(i) := state_hptw_resp 571 entries(i).wait_id := hyper_arb1.io.chosen 572 } 573 } 574 } 575 576 when (hyper_arb2.io.out.fire) { 577 for (i <- state.indices) { 578 when (state(i) === state_last_hptw_req && entries(i).ppn === hyper_arb2.io.out.bits.ppn && entries(i).req_info.s2xlate === allStage && hyper_arb2.io.chosen === i.U) { 579 state(i) := state_last_hptw_resp 580 entries(i).wait_id := hyper_arb2.io.chosen 581 } 582 } 583 } 584 585 when (io.hptw.resp.fire) { 586 for (i <- state.indices) { 587 when (state(i) === state_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 588 // change the entry that is waiting hptw resp 589 val need_to_waiting_vec = state.indices.map(i => state(i) === state_mem_waiting && dup(entries(i).req_info.vpn, entries(io.hptw.resp.bits.id).req_info.vpn)) 590 val waiting_index = ParallelMux(need_to_waiting_vec zip entries.map(_.wait_id)) 591 state(i) := Mux(Cat(need_to_waiting_vec).orR, state_mem_waiting, state_addr_check) 592 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 593 entries(i).wait_id := Mux(Cat(need_to_waiting_vec).orR, waiting_index, entries(i).wait_id) 594 //To do: change the entry that is having the same hptw req 595 } 596 when (state(i) === state_last_hptw_resp && io.hptw.resp.bits.id === entries(i).wait_id && io.hptw.resp.bits.h_resp.entry.tag === entries(i).ppn) { 597 state(i) := state_mem_out 598 entries(i).hptw_resp := io.hptw.resp.bits.h_resp 599 //To do: change the entry that is having the same hptw req 600 } 601 } 602 } 603 when (io.out.fire) { 604 assert(state(mem_ptr) === state_mem_out) 605 state(mem_ptr) := state_idle 606 } 607 mem_resp_hit.map(a => when (a) { a := false.B } ) 608 609 when (io.cache.fire) { 610 state(cache_ptr) := state_idle 611 } 612 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 613 614 when (flush) { 615 state.map(_ := state_idle) 616 } 617 618 io.in.ready := !full 619 620 io.out.valid := ParallelOR(is_having).asBool 621 io.out.bits.req_info := entries(mem_ptr).req_info 622 io.out.bits.id := mem_ptr 623 io.out.bits.af := entries(mem_ptr).af 624 io.out.bits.h_resp := entries(mem_ptr).hptw_resp 625 626 val hptw_req_arb = Module(new Arbiter(new Bundle{ 627 val source = UInt(bSourceWidth.W) 628 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 629 val ppn = UInt(gvpnLen.W) 630 } , 2)) 631 // first stage 2 translation 632 hptw_req_arb.io.in(0).valid := hyper_arb1.io.out.valid 633 hptw_req_arb.io.in(0).bits.source := hyper_arb1.io.out.bits.req_info.source 634 hptw_req_arb.io.in(0).bits.ppn := hyper_arb1.io.out.bits.ppn 635 hptw_req_arb.io.in(0).bits.id := hyper_arb1.io.chosen 636 hyper_arb1.io.out.ready := hptw_req_arb.io.in(0).ready 637 // last stage 2 translation 638 hptw_req_arb.io.in(1).valid := hyper_arb2.io.out.valid 639 hptw_req_arb.io.in(1).bits.source := hyper_arb2.io.out.bits.req_info.source 640 hptw_req_arb.io.in(1).bits.ppn := hyper_arb2.io.out.bits.ppn 641 hptw_req_arb.io.in(1).bits.id := hyper_arb2.io.chosen 642 hyper_arb2.io.out.ready := hptw_req_arb.io.in(1).ready 643 hptw_req_arb.io.out.ready := io.hptw.req.ready 644 io.hptw.req.valid := hptw_req_arb.io.out.fire && !flush 645 io.hptw.req.bits.gvpn := hptw_req_arb.io.out.bits.ppn 646 io.hptw.req.bits.id := hptw_req_arb.io.out.bits.id 647 io.hptw.req.bits.source := hptw_req_arb.io.out.bits.source 648 649 io.mem.req.valid := mem_arb.io.out.valid && !flush 650 val mem_paddr = MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 651 val mem_hpaddr = MakeAddr(mem_arb.io.out.bits.hptw_resp.genPPNS2(get_pn(mem_paddr)), getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 652 io.mem.req.bits.addr := Mux(mem_arb.io.out.bits.req_info.s2xlate === allStage, mem_hpaddr, mem_paddr) 653 io.mem.req.bits.id := mem_arb.io.chosen 654 io.mem.req.bits.hptw_bypassed := false.B 655 mem_arb.io.out.ready := io.mem.req.ready 656 val mem_refill_id = RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0)) 657 io.mem.refill := entries(mem_refill_id).req_info 658 io.mem.refill.s2xlate := Mux(entries(mem_refill_id).req_info.s2xlate === noS2xlate, noS2xlate, onlyStage1) // llptw refill the pte of stage 1 659 io.mem.buffer_it := mem_resp_hit 660 io.mem.enq_ptr := enq_ptr 661 662 io.cache.valid := Cat(is_cache).orR 663 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 664 665 XSPerfAccumulate("llptw_in_count", io.in.fire) 666 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 667 for (i <- 0 until 7) { 668 XSPerfAccumulate(s"enq_state${i}", io.in.fire && enq_state === i.U) 669 } 670 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 671 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 672 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 673 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 674 } 675 XSPerfAccumulate("mem_count", io.mem.req.fire) 676 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 677 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 678 679 for (i <- 0 until l2tlbParams.llptwsize) { 680 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 681 } 682 683 val perfEvents = Seq( 684 ("tlbllptw_incount ", io.in.fire ), 685 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 686 ("tlbllptw_memcount ", io.mem.req.fire ), 687 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 688 ) 689 generatePerfEvent() 690} 691 692/*========================= HPTW ==============================*/ 693 694/** HPTW : Hypervisor Page Table Walker 695 * the page walker take the virtual machine's page walk. 696 * guest physical address translation, guest physical address -> host physical address 697 **/ 698class HPTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 699 val req = Flipped(DecoupledIO(new Bundle { 700 val source = UInt(bSourceWidth.W) 701 val id = UInt(log2Up(l2tlbParams.llptwsize).W) 702 val gvpn = UInt(vpnLen.W) 703 val ppn = UInt(ppnLen.W) 704 val l1Hit = Bool() 705 val l2Hit = Bool() 706 val bypassed = Bool() // if bypass, don't refill 707 })) 708 val resp = DecoupledIO(new Bundle { 709 val source = UInt(bSourceWidth.W) 710 val resp = Output(new HptwResp()) 711 val id = Output(UInt(bMemID.W)) 712 }) 713 714 val mem = new Bundle { 715 val req = DecoupledIO(new L2TlbMemReqBundle()) 716 val resp = Flipped(ValidIO(UInt(XLEN.W))) 717 val mask = Input(Bool()) 718 } 719 val refill = Output(new Bundle { 720 val req_info = new L2TlbInnerBundle() 721 val level = UInt(log2Up(Level).W) 722 }) 723 val pmp = new Bundle { 724 val req = ValidIO(new PMPReqBundle()) 725 val resp = Flipped(new PMPRespBundle()) 726 } 727} 728 729class HPTW()(implicit p: Parameters) extends XSModule with HasPtwConst { 730 val io = IO(new HPTWIO) 731 val hgatp = io.csr.hgatp 732 val sfence = io.sfence 733 val flush = sfence.valid || hgatp.changed 734 735 val level = RegInit(0.U(log2Up(Level).W)) 736 val gpaddr = Reg(UInt(GPAddrBits.W)) 737 val req_ppn = Reg(UInt(ppnLen.W)) 738 val vpn = gpaddr(GPAddrBits-1, offLen) 739 val levelNext = level + 1.U 740 val l1Hit = Reg(Bool()) 741 val l2Hit = Reg(Bool()) 742 val bypassed = Reg(Bool()) 743 val pg_base = MakeGPAddr(hgatp.ppn, getGVpnn(vpn, 2.U)) // for l0 744// val pte = io.mem.resp.bits.MergeRespToPte() 745 val pte = io.mem.resp.bits.asTypeOf(new PteBundle().cloneType) 746 val ppn_l1 = Mux(l1Hit, req_ppn, pte.ppn) 747 val ppn_l2 = Mux(l2Hit, req_ppn, pte.ppn) 748 val ppn = Mux(level === 1.U, ppn_l1, ppn_l2) //for l1 and l2 749 val p_pte = MakeAddr(ppn, getVpnn(vpn, 2.U - level)) 750 val mem_addr = Mux(level === 0.U, pg_base, p_pte) 751 752 //s/w register 753 val s_pmp_check = RegInit(true.B) 754 val s_mem_req = RegInit(true.B) 755 val w_mem_resp = RegInit(true.B) 756 val idle = RegInit(true.B) 757 val mem_addr_update = RegInit(false.B) 758 val finish = WireInit(false.B) 759 760 val sent_to_pmp = !idle && (!s_pmp_check || mem_addr_update) && !finish 761 val pageFault = pte.isPf(level) || (!pte.isLeaf() && level >= 2.U) 762 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 763 764 val ppn_af = pte.isAf() 765 val find_pte = pte.isLeaf() || ppn_af || pageFault 766 767 val resp_valid = !idle && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 768 val id = Reg(UInt(log2Up(l2tlbParams.llptwsize).W)) 769 val source = RegEnable(io.req.bits.source, io.req.fire) 770 771 io.req.ready := idle 772 val resp = Wire(new HptwResp()) 773 resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, level, pte, vpn, hgatp.asid) 774 io.resp.valid := resp_valid 775 io.resp.bits.id := id 776 io.resp.bits.resp := resp 777 io.resp.bits.source := source 778 779 io.pmp.req.valid := DontCare 780 io.pmp.req.bits.addr := mem_addr 781 io.pmp.req.bits.size := 3.U 782 io.pmp.req.bits.cmd := TlbCmd.read 783 784 io.mem.req.valid := !s_mem_req && !io.mem.mask && !accessFault && s_pmp_check 785 io.mem.req.bits.addr := mem_addr 786 io.mem.req.bits.id := HptwReqId.U(bMemID.W) 787 io.mem.req.bits.hptw_bypassed := bypassed 788 789 io.refill.req_info.vpn := vpn 790 io.refill.level := level 791 io.refill.req_info.source := source 792 io.refill.req_info.s2xlate := onlyStage2 793 when (idle){ 794 when(io.req.fire){ 795 bypassed := io.req.bits.bypassed 796 level := Mux(io.req.bits.l2Hit, 2.U, Mux(io.req.bits.l1Hit, 1.U, 0.U)) 797 idle := false.B 798 gpaddr := Cat(io.req.bits.gvpn, 0.U(offLen.W)) 799 accessFault := false.B 800 s_pmp_check := false.B 801 id := io.req.bits.id 802 req_ppn := io.req.bits.ppn 803 l1Hit := io.req.bits.l1Hit 804 l2Hit := io.req.bits.l2Hit 805 } 806 } 807 808 when(sent_to_pmp && !mem_addr_update){ 809 s_mem_req := false.B 810 s_pmp_check := true.B 811 } 812 813 when(accessFault && !idle){ 814 s_pmp_check := true.B 815 s_mem_req := true.B 816 w_mem_resp := true.B 817 mem_addr_update := true.B 818 } 819 820 when(io.mem.req.fire){ 821 s_mem_req := true.B 822 w_mem_resp := false.B 823 } 824 825 when(io.mem.resp.fire && !w_mem_resp){ 826 w_mem_resp := true.B 827 mem_addr_update := true.B 828 } 829 830 when(mem_addr_update){ 831 when(!(find_pte || accessFault)){ 832 level := levelNext 833 s_mem_req := false.B 834 mem_addr_update := false.B 835 }.elsewhen(resp_valid){ 836 when(io.resp.fire){ 837 idle := true.B 838 mem_addr_update := false.B 839 accessFault := false.B 840 } 841 finish := true.B 842 } 843 } 844 when (flush) { 845 idle := true.B 846 s_pmp_check := true.B 847 s_mem_req := true.B 848 w_mem_resp := true.B 849 accessFault := false.B 850 mem_addr_update := false.B 851 } 852}