1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache.mmu 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import chisel3.internal.naming.chiselName 23import xiangshan._ 24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants} 25import utils._ 26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp} 27import freechips.rocketchip.tilelink._ 28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 29 30/* Miss Queue dont care about duplicate req, which is done by PtwFilter 31 * PtwMissQueue is just a Queue inside Chisel with flush 32 */ 33 34class L2TlbMQEntry(implicit p: Parameters) extends XSBundle with HasPtwConst { 35 val req_info = new L2TlbInnerBundle() 36 val ppn = UInt(ppnLen.W) 37 val wait_id = UInt(log2Up(MSHRSize).W) 38 val af = Bool() 39} 40 41class L2TlbMQInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst { 42 val req_info = Output(new L2TlbInnerBundle()) 43 val l3 = Valid(Output(UInt(PAddrBits.W))) 44} 45 46class L2TlbMQCacheBundle(implicit p: Parameters) extends L2TlbInnerBundle with HasPtwConst 47 48class L2TlbMQIO(implicit p: Parameters) extends MMUIOBaseBundle with HasPtwConst { 49 val in = Flipped(Decoupled(new L2TlbMQInBundle())) 50 val cache = Decoupled(new L2TlbMQCacheBundle()) 51 val fsm_done = Input(Bool()) 52 val out = DecoupledIO(new Bundle { 53 val req_info = Output(new L2TlbInnerBundle()) 54 val id = Output(UInt(bMemID.W)) 55 val af = Output(Bool()) 56 }) 57 val mem = new Bundle { 58 val req = DecoupledIO(new L2TlbMemReqBundle()) 59 val resp = Flipped(Valid(new Bundle { 60 val id = Output(UInt(log2Up(MSHRSize).W)) 61 })) 62 val enq_ptr = Output(UInt(log2Ceil(MSHRSize).W)) 63 val buffer_it = Output(Vec(MSHRSize, Bool())) 64 val refill = Output(new L2TlbInnerBundle()) 65 val req_mask = Input(Vec(MSHRSize, Bool())) 66 } 67 val pmp = new Bundle { 68 val req = Valid(new PMPReqBundle()) 69 val resp = Flipped(new PMPRespBundle()) 70 } 71} 72 73@chiselName 74class L2TlbMissQueue(implicit p: Parameters) extends XSModule with HasPtwConst { 75 require(MSHRSize >= (2 + l2tlbParams.filterSize)) 76 77 val io = IO(new L2TlbMQIO()) 78 79 val entries = Reg(Vec(MSHRSize, new L2TlbMQEntry())) 80 val state_idle :: state_cache_high :: state_cache_low :: state_addr_check :: state_mem_req :: state_mem_waiting :: state_mem_out :: Nil = Enum(7) 81 val state = RegInit(VecInit(Seq.fill(MSHRSize)(state_idle))) 82 val is_emptys = state.map(_ === state_idle) 83 val is_caches_high = state.map(_ === state_cache_high) 84 val is_caches_low = state.map(_ === state_cache_low) 85 val is_mems = state.map(_ === state_mem_req) 86 val is_waiting = state.map(_ === state_mem_waiting) 87 val is_having = state.map(_ === state_mem_out) 88 89 val full = !ParallelOR(is_emptys).asBool() 90 val enq_ptr = ParallelPriorityEncoder(is_emptys) 91 val cache_high_ptr = ParallelPriorityEncoder(is_caches_high) 92 val cache_low_ptr = ParallelPriorityEncoder(is_caches_low) 93 94 val cache_arb = Module(new RRArbiter(new L2TlbMQCacheBundle(), 2)) 95 cache_arb.io.in(0).valid := Cat(is_caches_high).orR && io.fsm_done // fsm busy, required l1/l2 pte is not ready 96 cache_arb.io.in(0).bits := entries(cache_high_ptr).req_info 97 cache_arb.io.in(1).valid := Cat(is_caches_low).orR 98 cache_arb.io.in(1).bits := entries(cache_low_ptr).req_info 99 cache_arb.io.out.ready := io.cache.ready 100 val cache_ptr = Mux(cache_arb.io.chosen === 0.U, cache_high_ptr, cache_low_ptr) 101 102 val mem_ptr = ParallelPriorityEncoder(is_having) 103 val mem_arb = Module(new RRArbiter(new L2TlbMQEntry(), MSHRSize)) 104 for (i <- 0 until MSHRSize) { 105 mem_arb.io.in(i).bits := entries(i) 106 mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i) 107 } 108 109 // duplicate req 110 // to_wait: wait for the last to access mem, set to mem_resp 111 // to_cache: the last is back just right now, set to mem_cache 112 def dup(vpn1: UInt, vpn2: UInt): Bool = { 113 dropL3SectorBits(vpn1) === dropL3SectorBits(vpn2) 114 } 115 val dup_vec = state.indices.map(i => 116 dup(io.in.bits.req_info.vpn, entries(i).req_info.vpn) 117 ) 118 val dup_req_fire = mem_arb.io.out.fire() && dup(io.in.bits.req_info.vpn, mem_arb.io.out.bits.req_info.vpn) // dup with the req fire entry 119 val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // dup with "mem_waiting" entres, sending mem req already 120 val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && is_having(i)} // dup with the "mem_out" entry recv the data just now 121 val wait_id = Mux(dup_req_fire, mem_arb.io.chosen, ParallelMux(dup_vec_wait zip entries.map(_.wait_id))) 122 val dup_wait_resp = io.mem.resp.fire() && VecInit(dup_vec_wait)(io.mem.resp.bits.id) // dup with the entry that data coming next cycle 123 val to_wait = Cat(dup_vec_wait).orR || dup_req_fire 124 val to_mem_out = dup_wait_resp 125 val to_cache_low = Cat(dup_vec_having).orR 126 assert(RegNext(!(dup_req_fire && Cat(dup_vec_wait).orR), init = true.B), "mem req but some entries already waiting, should not happed") 127 128 val mem_resp_hit = RegInit(VecInit(Seq.fill(MSHRSize)(false.B))) 129 val enq_state = Mux(to_mem_out, state_mem_out, // same to the blew, but the mem resp now 130 Mux(to_cache_low, state_cache_low, // same to the below, but the mem resp last cycle 131 Mux(to_wait, state_mem_waiting, // wait for the prev mem resp 132 Mux(io.in.bits.l3.valid, state_addr_check, state_cache_high)))) 133 when (io.in.fire()) { 134 // if prefetch req does not need mem access, just give it up. 135 // so there will be at most 1 + FilterSize entries that needs re-access page cache 136 // so 2 + FilterSize is enough to avoid dead-lock 137 state(enq_ptr) := Mux(from_pre(io.in.bits.req_info.source) && enq_state =/= state_addr_check, state_idle, enq_state) 138 entries(enq_ptr).req_info := io.in.bits.req_info 139 entries(enq_ptr).ppn := io.in.bits.l3.bits 140 entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr) 141 entries(enq_ptr).af := false.B 142 mem_resp_hit(enq_ptr) := to_mem_out 143 } 144 when (mem_arb.io.out.fire()) { 145 for (i <- state.indices) { 146 when (state(i) =/= state_idle && dup(entries(i).req_info.vpn, mem_arb.io.out.bits.req_info.vpn)) { 147 // NOTE: "dup enq set state to mem_wait" -> "sending req set other dup entries to mem_wait" 148 state(i) := state_mem_waiting 149 entries(i).wait_id := mem_arb.io.chosen 150 } 151 } 152 } 153 when (io.mem.resp.fire()) { 154 state.indices.map{i => 155 when (state(i) === state_mem_waiting && io.mem.resp.bits.id === entries(i).wait_id) { 156 state(i) := state_mem_out 157 mem_resp_hit(i) := true.B 158 } 159 } 160 } 161 when (io.out.fire()) { 162 assert(state(mem_ptr) === state_mem_out) 163 state(mem_ptr) := state_idle 164 } 165 when (io.cache.fire()) { 166 state(cache_ptr) := state_idle 167 } 168 169 mem_resp_hit.map(a => when (a) { a := false.B } ) 170 171 val enq_ptr_reg = RegNext(enq_ptr) 172 173 io.pmp.req.valid := RegNext(enq_state === state_addr_check) 174 io.pmp.req.bits.addr := MakeAddr(entries(enq_ptr_reg).ppn, getVpnn(entries(enq_ptr_reg).req_info.vpn, 0)) 175 io.pmp.req.bits.cmd := TlbCmd.read 176 io.pmp.req.bits.size := 3.U // TODO: fix it 177 val pmp_resp_valid = io.pmp.req.valid // same cycle 178 when (pmp_resp_valid && (state(enq_ptr_reg) === state_addr_check) && 179 !(mem_arb.io.out.fire && dup(entries(enq_ptr_reg).req_info.vpn, mem_arb.io.out.bits.req_info.vpn))) { 180 // NOTE: when pmp resp but state is not addr check, then the entry is dup with other entry, the state was changed before 181 // when dup with the req-ing entry, set to mem_waiting (above codes), and the ld must be false, so dontcare 182 val accessFault = io.pmp.resp.ld || io.pmp.resp.mmio 183 entries(enq_ptr_reg).af := accessFault 184 state(enq_ptr_reg) := Mux(accessFault, state_mem_out, state_mem_req) 185 } 186 187 val flush = io.sfence.valid || io.csr.satp.changed 188 when (flush) { 189 state.map(_ := state_idle) 190 } 191 192 io.in.ready := !full 193 io.cache.valid := cache_arb.io.out.valid 194 io.cache.bits := cache_arb.io.out.bits 195 196 io.out.valid := ParallelOR(is_having).asBool() 197 io.out.bits.req_info := entries(mem_ptr).req_info 198 io.out.bits.id := mem_ptr 199 io.out.bits.af := entries(mem_ptr).af 200 201 io.mem.req.valid := mem_arb.io.out.valid && !flush 202 io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.req_info.vpn, 0)) 203 io.mem.req.bits.id := mem_arb.io.chosen 204 mem_arb.io.out.ready := io.mem.req.ready 205 io.mem.refill := entries(RegNext(io.mem.resp.bits.id(log2Up(MSHRSize)-1, 0))).req_info 206 io.mem.buffer_it := mem_resp_hit 207 io.mem.enq_ptr := enq_ptr 208 209 XSPerfAccumulate("mq_in_count", io.in.fire()) 210 XSPerfAccumulate("mq_in_block", io.in.valid && !io.in.ready) 211 for (i <- 0 until 7) { 212 XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U) 213 } 214 for (i <- 0 until (MSHRSize + 1)) { 215 XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U) 216 XSPerfAccumulate(s"cache_high_util${i}", PopCount(is_caches_high) === i.U) 217 XSPerfAccumulate(s"cache_low_util${i}", PopCount(is_caches_low) === i.U) 218 XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U) 219 XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U) 220 } 221 XSPerfAccumulate("mem_count", io.mem.req.fire()) 222 XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U) 223 XSPerfAccumulate("blocked_in", io.in.valid && !io.in.ready) 224 225 for (i <- 0 until MSHRSize) { 226 TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}") 227 } 228 229 val perfinfo = IO(new Bundle(){ 230 val perfEvents = Output(new PerfEventsBundle(4)) 231 }) 232 val perfEvents = Seq( 233 ("tlbmissq_incount ", io.in.fire() ), 234 ("tlbmissq_inblock ", io.in.valid && !io.in.ready), 235 ("tlbmissq_memcount ", io.mem.req.fire() ), 236 ("tlbmissq_memcycle ", PopCount(is_waiting) ), 237 ) 238 239 for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) { 240 perf_out.incr_step := RegNext(perf) 241 } 242} 243