1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import chisel3.internal.naming.chiselName 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import utility._ 27import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 28import freechips.rocketchip.tilelink._ 29import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 30 31/** Page Table Walk is divided into two parts 32 * One, PTW: page walk for pde, except for leaf entries, one by one 33 * Two, LLPTW: page walk for pte, only the leaf entries(4KB), in parallel 34 */ 35 36 37/** PTW : page table walker 38 * a finite state machine 39 * only take 1GB and 2MB page walks 40 * or in other words, except the last level(leaf) 41 **/ 42class PTWIO()(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 43 val req = Flipped(DecoupledIO(new Bundle { 44 val req_info = new L2TlbInnerBundle() 45 val l1Hit = Bool() 46 val ppn = UInt(ppnLen.W) 47 })) 48 val resp = DecoupledIO(new Bundle { 49 val source = UInt(bSourceWidth.W) 50 val resp = new PtwResp 51 }) 52 53 val llptw = DecoupledIO(new LLPTWInBundle()) 54 // NOTE: llptw change from "connect to llptw" to "connect to page cache" 55 // to avoid corner case that caused duplicate entries 56 57 val mem = new Bundle { 58 val req = DecoupledIO(new L2TlbMemReqBundle()) 59 val resp = Flipped(ValidIO(UInt(XLEN.W))) 60 val mask = Input(Bool()) 61 } 62 val pmp = new Bundle { 63 val req = ValidIO(new PMPReqBundle()) 64 val resp = Flipped(new PMPRespBundle()) 65 } 66 67 val refill = Output(new Bundle { 68 val req_info = new L2TlbInnerBundle() 69 val level = UInt(log2Up(Level).W) 70 }) 71} 72 73@chiselName 74class PTW()(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 75 val io = IO(new PTWIO) 76 val sfence = io.sfence 77 val mem = io.mem 78 val satp = io.csr.satp 79 val flush = io.sfence.valid || io.csr.satp.changed 80 81 val level = RegInit(0.U(log2Up(Level).W)) 82 val af_level = RegInit(0.U(log2Up(Level).W)) // access fault return this level 83 val ppn = Reg(UInt(ppnLen.W)) 84 val vpn = Reg(UInt(vpnLen.W)) 85 val levelNext = level + 1.U 86 val l1Hit = Reg(Bool()) 87 val memPte = mem.resp.bits.asTypeOf(new PteBundle().cloneType) 88 89 // s/w register 90 val s_pmp_check = RegInit(true.B) 91 val s_mem_req = RegInit(true.B) 92 val s_llptw_req = RegInit(true.B) 93 val w_mem_resp = RegInit(true.B) 94 // for updating "level" 95 val mem_addr_update = RegInit(false.B) 96 97 val idle = RegInit(true.B) 98 val finish = WireInit(false.B) 99 val sent_to_pmp = idle === false.B && (s_pmp_check === false.B || mem_addr_update) && !finish 100 101 val pageFault = memPte.isPf(level) 102 val accessFault = RegEnable(io.pmp.resp.ld || io.pmp.resp.mmio, sent_to_pmp) 103 104 val ppn_af = memPte.isAf() 105 val find_pte = memPte.isLeaf() || ppn_af || pageFault 106 val to_find_pte = level === 1.U && find_pte === false.B 107 val source = RegEnable(io.req.bits.req_info.source, io.req.fire()) 108 109 val l1addr = MakeAddr(satp.ppn, getVpnn(vpn, 2)) 110 val l2addr = MakeAddr(Mux(l1Hit, ppn, memPte.ppn), getVpnn(vpn, 1)) 111 val mem_addr = Mux(af_level === 0.U, l1addr, l2addr) 112 113 io.req.ready := idle 114 115 io.resp.valid := idle === false.B && mem_addr_update && ((w_mem_resp && find_pte) || (s_pmp_check && accessFault)) 116 io.resp.bits.source := source 117 io.resp.bits.resp.apply(pageFault && !accessFault && !ppn_af, accessFault || ppn_af, Mux(accessFault, af_level,level), memPte, vpn, satp.asid) 118 119 io.llptw.valid := s_llptw_req === false.B && to_find_pte && !accessFault 120 io.llptw.bits.req_info.source := source 121 io.llptw.bits.req_info.vpn := vpn 122 io.llptw.bits.ppn := memPte.ppn 123 124 io.pmp.req.valid := DontCare // samecycle, do not use valid 125 io.pmp.req.bits.addr := mem_addr 126 io.pmp.req.bits.size := 3.U // TODO: fix it 127 io.pmp.req.bits.cmd := TlbCmd.read 128 129 mem.req.valid := s_mem_req === false.B && !mem.mask && !accessFault && s_pmp_check 130 mem.req.bits.addr := mem_addr 131 mem.req.bits.id := FsmReqID.U(bMemID.W) 132 133 io.refill.req_info.vpn := vpn 134 io.refill.level := level 135 io.refill.req_info.source := source 136 137 when (io.req.fire()){ 138 val req = io.req.bits 139 level := Mux(req.l1Hit, 1.U, 0.U) 140 af_level := Mux(req.l1Hit, 1.U, 0.U) 141 ppn := Mux(req.l1Hit, io.req.bits.ppn, satp.ppn) 142 vpn := io.req.bits.req_info.vpn 143 l1Hit := req.l1Hit 144 accessFault := false.B 145 s_pmp_check := false.B 146 idle := false.B 147 } 148 149 when(sent_to_pmp && mem_addr_update === false.B){ 150 s_mem_req := false.B 151 s_pmp_check := true.B 152 } 153 154 when(accessFault && idle === false.B){ 155 s_pmp_check := true.B 156 s_mem_req := true.B 157 w_mem_resp := true.B 158 s_llptw_req := true.B 159 mem_addr_update := true.B 160 } 161 162 when (mem.req.fire()){ 163 s_mem_req := true.B 164 w_mem_resp := false.B 165 } 166 167 when(mem.resp.fire() && w_mem_resp === false.B){ 168 w_mem_resp := true.B 169 af_level := af_level + 1.U 170 s_llptw_req := false.B 171 mem_addr_update := true.B 172 } 173 174 when(mem_addr_update){ 175 when(level === 0.U && !(find_pte || accessFault)){ 176 level := levelNext 177 s_mem_req := false.B 178 s_llptw_req := true.B 179 mem_addr_update := false.B 180 }.elsewhen(io.llptw.valid){ 181 when(io.llptw.fire()) { 182 idle := true.B 183 s_llptw_req := true.B 184 mem_addr_update := false.B 185 } 186 finish := true.B 187 }.elsewhen(io.resp.valid){ 188 when(io.resp.fire()) { 189 idle := true.B 190 s_llptw_req := true.B 191 mem_addr_update := false.B 192 accessFault := false.B 193 } 194 finish := true.B 195 } 196 } 197 198 199 when (sfence.valid) { 200 idle := true.B 201 s_pmp_check := true.B 202 s_mem_req := true.B 203 s_llptw_req := true.B 204 w_mem_resp := true.B 205 accessFault := false.B 206 mem_addr_update := false.B 207 } 208 209 210 XSDebug(p"[ptw] level:${level} notFound:${pageFault}\n") 211 212 // perf 213 XSPerfAccumulate("fsm_count", io.req.fire()) 214 for (i <- 0 until PtwWidth) { 215 XSPerfAccumulate(s"fsm_count_source${i}", io.req.fire() && io.req.bits.req_info.source === i.U) 216 } 217 XSPerfAccumulate("fsm_busy", !idle) 218 XSPerfAccumulate("fsm_idle", idle) 219 XSPerfAccumulate("resp_blocked", io.resp.valid && !io.resp.ready) 220 XSPerfAccumulate("ptw_ppn_af", io.resp.fire && ppn_af) 221 XSPerfAccumulate("mem_count", mem.req.fire()) 222 XSPerfAccumulate("mem_cycle", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)) 223 XSPerfAccumulate("mem_blocked", mem.req.valid && !mem.req.ready) 224 225 TimeOutAssert(!idle, timeOutThreshold, "page table walker time out") 226 227 val perfEvents = Seq( 228 ("fsm_count ", io.req.fire() ), 229 ("fsm_busy ", !idle ), 230 ("fsm_idle ", idle ), 231 ("resp_blocked ", io.resp.valid && !io.resp.ready ), 232 ("mem_count ", mem.req.fire() ), 233 ("mem_cycle ", BoolStopWatch(mem.req.fire, mem.resp.fire(), true)), 234 ("mem_blocked ", mem.req.valid && !mem.req.ready ), 235 ) 236 generatePerfEvent() 237} 238 239/*========================= LLPTW ==============================*/ 240 241/** LLPTW : Last Level Page Table Walker 242 * the page walker that only takes 4KB(last level) page walk. 243 **/ 244 245class LLPTWInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 246 val req_info = Output(new L2TlbInnerBundle()) 247 val ppn = Output(UInt(PAddrBits.W)) 248} 249 250class LLPTWIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 251 val in = Flipped(DecoupledIO(new LLPTWInBundle())) 252 val out = DecoupledIO(new Bundle { 253 val req_info = Output(new L2TlbInnerBundle()) 254 val id = Output(UInt(bMemID.W)) 255 val af = Output(Bool()) 256 }) 257 val mem = new Bundle { 258 val req = DecoupledIO(new L2TlbMemReqBundle()) 259 val resp = Flipped(Valid(new Bundle { 260 val id = Output(UInt(log2Up(l2tlbParams.llptwsize).W)) 261 })) 262 val enq_ptr = Output(UInt(log2Ceil(l2tlbParams.llptwsize).W)) 263 val buffer_it = Output(Vec(l2tlbParams.llptwsize, Bool())) 264 val refill = Output(new L2TlbInnerBundle()) 265 val req_mask = Input(Vec(l2tlbParams.llptwsize, Bool())) 266 } 267 val cache = DecoupledIO(new L2TlbInnerBundle()) 268 val pmp = new Bundle { 269 val req = Valid(new PMPReqBundle()) 270 val resp = Flipped(new PMPRespBundle()) 271 } 272} 273 274class LLPTWEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 275 val req_info = new L2TlbInnerBundle() 276 val ppn = UInt(ppnLen.W) 277 val wait_id = UInt(log2Up(l2tlbParams.llptwsize).W) 278 val af = Bool() 279} 280 281 282@chiselName 283class LLPTW(implicit p: Parameters) extends XSModule with HasPtwConst with HasPerfEvents { 284 val io = IO(new LLPTWIO()) 285 286 val flush = io.sfence.valid || io.csr.satp.changed 287 val entries = Reg(Vec(l2tlbParams.llptwsize, new LLPTWEntry())) 288 val state_idle :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: state_cache :: Nil = Enum(6) 289 val state = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(state_idle))) 290 291 val is_emptys = state.map(_ === state_idle) 292 val is_mems = state.map(_ === state_mem_req) 293 val is_waiting = state.map(_ === state_mem_waiting) 294 val is_having = state.map(_ === state_mem_out) 295 val is_cache = state.map(_ === state_cache) 296 297 val full = !ParallelOR(is_emptys).asBool() 298 val enq_ptr = ParallelPriorityEncoder(is_emptys) 299 300 val mem_ptr = ParallelPriorityEncoder(is_having) // TODO: optimize timing, bad: entries -> ptr -> entry 301 val mem_arb = Module(new RRArbiter(new LLPTWEntry(), l2tlbParams.llptwsize)) 302 for (i <- 0 until l2tlbParams.llptwsize) { 303 mem_arb.io.in(i).bits := entries(i) 304 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 305 } 306 307 val cache_ptr = ParallelMux(is_cache, (0 until l2tlbParams.llptwsize).map(_.U(log2Up(l2tlbParams.llptwsize).W))) 308 309 // duplicate req 310 // to_wait: wait for the last to access mem, set to mem_resp 311 // to_cache: the last is back just right now, set to mem_cache 312 val dup_vec = state.indices.map(i => 313 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) 314 ) 315 val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) // dup with the req fire entry 316 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already 317 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 318 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 319 val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 320 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 321 val to_mem_out = dup_wait_resp 322 val to_cache = Cat(dup_vec_having).orR 323 XSError(RegNext(dup_req_fire && Cat(dup_vec_wait).orR, init = false.B), "mem req but some entries already waiting, should not happed") 324 325 XSError(io.in.fire() && ((to_mem_out && to_cache) || (to_wait && to_cache)), "llptw enq, to cache conflict with to mem") 326 val mem_resp_hit = RegInit(VecInit(Seq.fill(l2tlbParams.llptwsize)(false.B))) 327 val enq_state_normal = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now 328 Mux(to_wait, state_mem_waiting, 329 Mux(to_cache, state_cache, state_addr_check))) 330 val enq_state = Mux(from_pre(io.in.bits.req_info.source) && enq_state_normal =/= state_addr_check, state_idle, enq_state_normal) 331 when (io.in.fire()) { 332 // if prefetch req does not need mem access, just give it up. 333 // so there will be at most 1 + FilterSize entries that needs re-access page cache 334 // so 2 + FilterSize is enough to avoid dead-lock 335 state(enq_ptr) := enq_state 336 entries(enq_ptr).req_info := io.in.bits.req_info 337 entries(enq_ptr).ppn := io.in.bits.ppn 338 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 339 entries(enq_ptr).af := false.B 340 mem_resp_hit(enq_ptr) := to_mem_out 341 } 342 343 val enq_ptr_reg = RegNext(enq_ptr) 344 val need_addr_check = RegNext(enq_state === state_addr_check && io.in.fire() && !flush) 345 val last_enq_vpn = RegEnable(io.in.bits.req_info.vpn, io.in.fire()) 346 347 io.pmp.req.valid := need_addr_check 348 io.pmp.req.bits.addr := RegEnable(MakeAddr(io.in.bits.ppn, getVpnn(io.in.bits.req_info.vpn, 0)), io.in.fire()) 349 io.pmp.req.bits.cmd := TlbCmd.read 350 io.pmp.req.bits.size := 3.U // TODO: fix it 351 val pmp_resp_valid = io.pmp.req.valid // same cycle 352 when (pmp_resp_valid) { 353 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 354 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 355 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 356 entries(enq_ptr_reg).af := accessFault 357 state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req) 358 } 359 360 when (mem_arb.io.out.fire()) { 361 for (i <- state.indices) { 362 when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 363 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 364 state(i) := state_mem_waiting 365 entries(i).wait_id := mem_arb.io.chosen 366 } 367 } 368 } 369 when (io.mem.resp.fire()) { 370 state.indices.map{i => 371 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 372 state(i) := state_mem_out 373 mem_resp_hit(i) := true.B 374 } 375 } 376 } 377 when (io.out.fire()) { 378 assert(state(mem_ptr) === state_mem_out) 379 state(mem_ptr) := state_idle 380 } 381 mem_resp_hit.map(a => when (a) { a := false.B } ) 382 383 when (io.cache.fire) { 384 state(cache_ptr) := state_idle 385 } 386 XSError(io.out.fire && io.cache.fire && (mem_ptr === cache_ptr), "mem resp and cache fire at the same time at same entry") 387 388 when (flush) { 389 state.map(_ := state_idle) 390 } 391 392 io.in.ready := !full 393 394 io.out.valid := ParallelOR(is_having).asBool() 395 io.out.bits.req_info := entries(mem_ptr).req_info 396 io.out.bits.id := mem_ptr 397 io.out.bits.af := entries(mem_ptr).af 398 399 io.mem.req.valid := mem_arb.io.out.valid && !flush 400 io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 401 io.mem.req.bits.id := mem_arb.io.chosen 402 mem_arb.io.out.ready := io.mem.req.ready 403 io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(l2tlbParams.llptwsize)-1, 0))).req_info 404 io.mem.buffer_it := mem_resp_hit 405 io.mem.enq_ptr := enq_ptr 406 407 io.cache.valid := Cat(is_cache).orR 408 io.cache.bits := ParallelMux(is_cache, entries.map(_.req_info)) 409 410 XSPerfAccumulate("llptw_in_count", io.in.fire()) 411 XSPerfAccumulate("llptw_in_block", io.in.valid && !io.in.ready) 412 for (i <- 0 until 7) { 413 XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U) 414 } 415 for (i <- 0 until (l2tlbParams.llptwsize + 1)) { 416 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 417 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 418 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 419 } 420 XSPerfAccumulate("mem_count", io.mem.req.fire()) 421 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 422 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 423 424 for (i <- 0 until l2tlbParams.llptwsize) { 425 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 426 } 427 428 val perfEvents = Seq( 429 ("tlbllptw_incount ", io.in.fire() ), 430 ("tlbllptw_inblock ", io.in.valid && !io.in.ready), 431 ("tlbllptw_memcount ", io.mem.req.fire() ), 432 ("tlbllptw_memcycle ", PopCount(is_waiting) ), 433 ) 434 generatePerfEvent() 435} 436