1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.frontend.icache 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import difftest._ 23import freechips.rocketchip.tilelink._ 24import utils._ 25import xiangshan.cache.mmu._ 26import xiangshan.frontend._ 27import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle} 28import huancun.PreferCacheKey 29import xiangshan.XSCoreParamsKey 30import xiangshan.SoftIfetchPrefetchBundle 31import utility._ 32 33abstract class IPrefetchBundle(implicit p: Parameters) extends ICacheBundle 34abstract class IPrefetchModule(implicit p: Parameters) extends ICacheModule 35 36class IPrefetchReq(implicit p: Parameters) extends IPrefetchBundle { 37 val startAddr : UInt = UInt(VAddrBits.W) 38 val nextlineStart : UInt = UInt(VAddrBits.W) 39 val ftqIdx : FtqPtr = new FtqPtr 40 val isSoftPrefetch: Bool = Bool() 41 def crossCacheline: Bool = startAddr(blockOffBits - 1) === 1.U 42 43 def fromFtqICacheInfo(info: FtqICacheInfo): IPrefetchReq = { 44 this.startAddr := info.startAddr 45 this.nextlineStart := info.nextlineStart 46 this.ftqIdx := info.ftqIdx 47 this.isSoftPrefetch := false.B 48 this 49 } 50 51 def fromSoftPrefetch(req: SoftIfetchPrefetchBundle): IPrefetchReq = { 52 this.startAddr := req.vaddr 53 this.nextlineStart := req.vaddr + (1 << blockOffBits).U 54 this.ftqIdx := DontCare 55 this.isSoftPrefetch := true.B 56 this 57 } 58} 59 60class IPrefetchIO(implicit p: Parameters) extends IPrefetchBundle { 61 // control 62 val csr_pf_enable = Input(Bool()) 63 val csr_parity_enable = Input(Bool()) 64 val flush = Input(Bool()) 65 66 val req = Flipped(Decoupled(new IPrefetchReq)) 67 val flushFromBpu = Flipped(new BpuFlushInfo) 68 val itlb = Vec(PortNumber, new TlbRequestIO) 69 val pmp = Vec(PortNumber, new ICachePMPBundle) 70 val metaRead = new ICacheMetaReqBundle 71 val MSHRReq = DecoupledIO(new ICacheMissReq) 72 val MSHRResp = Flipped(ValidIO(new ICacheMissResp)) 73 val wayLookupWrite = DecoupledIO(new WayLookupInfo) 74} 75 76class IPrefetchPipe(implicit p: Parameters) extends IPrefetchModule 77{ 78 val io: IPrefetchIO = IO(new IPrefetchIO) 79 80 val (toITLB, fromITLB) = (io.itlb.map(_.req), io.itlb.map(_.resp)) 81 val (toPMP, fromPMP) = (io.pmp.map(_.req), io.pmp.map(_.resp)) 82 val (toMeta, fromMeta) = (io.metaRead.toIMeta, io.metaRead.fromIMeta) 83 val (toMSHR, fromMSHR) = (io.MSHRReq, io.MSHRResp) 84 val toWayLookup = io.wayLookupWrite 85 86 val s0_fire, s1_fire, s2_fire = WireInit(false.B) 87 val s0_discard, s2_discard = WireInit(false.B) 88 val s0_ready, s1_ready, s2_ready = WireInit(false.B) 89 val s0_flush, s1_flush, s2_flush = WireInit(false.B) 90 val from_bpu_s0_flush, from_bpu_s1_flush = WireInit(false.B) 91 92 /** 93 ****************************************************************************** 94 * IPrefetch Stage 0 95 * - 1. receive ftq req 96 * - 2. send req to ITLB 97 * - 3. send req to Meta SRAM 98 ****************************************************************************** 99 */ 100 val s0_valid = io.req.valid 101 102 /** 103 ****************************************************************************** 104 * receive ftq req 105 ****************************************************************************** 106 */ 107 val s0_req_vaddr = VecInit(Seq(io.req.bits.startAddr, io.req.bits.nextlineStart)) 108 val s0_req_ftqIdx = io.req.bits.ftqIdx 109 val s0_isSoftPrefetch = io.req.bits.isSoftPrefetch 110 val s0_doubleline = io.req.bits.crossCacheline 111 val s0_req_vSetIdx = s0_req_vaddr.map(get_idx) 112 113 from_bpu_s0_flush := !s0_isSoftPrefetch && (io.flushFromBpu.shouldFlushByStage2(s0_req_ftqIdx) || 114 io.flushFromBpu.shouldFlushByStage3(s0_req_ftqIdx)) 115 s0_flush := io.flush || from_bpu_s0_flush || s1_flush 116 117 val s0_can_go = s1_ready && toITLB(0).ready && toITLB(1).ready && toMeta.ready 118 io.req.ready := s0_can_go 119 120 s0_fire := s0_valid && s0_can_go && !s0_flush 121 122 /** 123 ****************************************************************************** 124 * IPrefetch Stage 1 125 * - 1. Receive resp from ITLB 126 * - 2. Receive resp from IMeta and check 127 * - 3. Monitor the requests from missUnit to write to SRAM. 128 * - 4. Wirte wayLookup 129 ****************************************************************************** 130 */ 131 val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = s1_flush, lastFlush = false.B) 132 133 val s1_req_vaddr = RegEnable(s0_req_vaddr, 0.U.asTypeOf(s0_req_vaddr), s0_fire) 134 val s1_isSoftPrefetch = RegEnable(s0_isSoftPrefetch, 0.U.asTypeOf(s0_isSoftPrefetch), s0_fire) 135 val s1_doubleline = RegEnable(s0_doubleline, 0.U.asTypeOf(s0_doubleline), s0_fire) 136 val s1_req_ftqIdx = RegEnable(s0_req_ftqIdx, 0.U.asTypeOf(s0_req_ftqIdx), s0_fire) 137 val s1_req_vSetIdx = VecInit(s1_req_vaddr.map(get_idx)) 138 139 val m_idle :: m_itlbResend :: m_metaResend :: m_enqWay :: m_enterS2 :: Nil = Enum(5) 140 val state = RegInit(m_idle) 141 val next_state = WireDefault(state) 142 val s0_fire_r = RegNext(s0_fire) 143 dontTouch(state) 144 dontTouch(next_state) 145 state := next_state 146 147 /** 148 ****************************************************************************** 149 * resend itlb req if miss 150 ****************************************************************************** 151 */ 152 val s1_wait_itlb = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 153 (0 until PortNumber).foreach { i => 154 when(s1_flush) { 155 s1_wait_itlb(i) := false.B 156 }.elsewhen(RegNext(s0_fire) && fromITLB(i).bits.miss) { 157 s1_wait_itlb(i) := true.B 158 }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) { 159 s1_wait_itlb(i) := false.B 160 } 161 } 162 val s1_need_itlb = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss, 163 (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_doubleline)) 164 val tlb_valid_pulse = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss, 165 (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_doubleline)) 166 val tlb_valid_latch = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_pulse(i), s1_fire, flush=s1_flush))) 167 val itlb_finish = tlb_valid_latch(0) && (!s1_doubleline || tlb_valid_latch(1)) 168 169 for (i <- 0 until PortNumber) { 170 toITLB(i).valid := s1_need_itlb(i) || (s0_valid && (if(i == 0) true.B else s0_doubleline)) 171 toITLB(i).bits := DontCare 172 toITLB(i).bits.size := 3.U 173 toITLB(i).bits.vaddr := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 174 toITLB(i).bits.debug.pc := Mux(s1_need_itlb(i), s1_req_vaddr(i), s0_req_vaddr(i)) 175 toITLB(i).bits.cmd := TlbCmd.exec 176 toITLB(i).bits.no_translate := false.B 177 } 178 fromITLB.foreach(_.ready := true.B) 179 io.itlb.foreach(_.req_kill := false.B) 180 181 /** 182 ****************************************************************************** 183 * Receive resp from ITLB 184 ****************************************************************************** 185 */ 186 val s1_req_paddr_wire = VecInit(fromITLB.map(_.bits.paddr(0))) 187 val s1_req_paddr_reg = VecInit((0 until PortNumber).map( i => 188 RegEnable(s1_req_paddr_wire(i), 0.U(PAddrBits.W), tlb_valid_pulse(i)) 189 )) 190 val s1_req_paddr = VecInit((0 until PortNumber).map( i => 191 Mux(tlb_valid_pulse(i), s1_req_paddr_wire(i), s1_req_paddr_reg(i)) 192 )) 193 val s1_req_gpaddr_tmp = VecInit((0 until PortNumber).map( i => 194 ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.gpaddr(0)), data = fromITLB(i).bits.gpaddr(0)) 195 )) 196 val s1_itlb_exception = VecInit((0 until PortNumber).map( i => 197 ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U(ExceptionType.width.W), data = ExceptionType.fromTlbResp(fromITLB(i).bits)) 198 )) 199 val s1_itlb_pbmt = VecInit((0 until PortNumber).map( i => 200 ResultHoldBypass(valid = tlb_valid_pulse(i), init = 0.U.asTypeOf(fromITLB(i).bits.pbmt(0)), data = fromITLB(i).bits.pbmt(0)) 201 )) 202 val s1_itlb_exception_gpf = VecInit(s1_itlb_exception.map(_ === ExceptionType.gpf)) 203 204 /* Select gpaddr with the first gpf 205 * Note: the backend wants the base guest physical address of a fetch block 206 * for port(i), its base gpaddr is actually (gpaddr - i * blocksize) 207 * see GPAMem: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/backend/GPAMem.scala#L33-L34 208 * see also: https://github.com/OpenXiangShan/XiangShan/blob/344cf5d55568dd40cd658a9ee66047a505eeb504/src/main/scala/xiangshan/frontend/IFU.scala#L374-L375 209 */ 210 val s1_req_gpaddr = PriorityMuxDefault( 211 s1_itlb_exception_gpf zip (0 until PortNumber).map(i => s1_req_gpaddr_tmp(i) - (i << blockOffBits).U), 212 0.U.asTypeOf(s1_req_gpaddr_tmp(0)) 213 ) 214 215 /** 216 ****************************************************************************** 217 * resend metaArray read req when itlb miss finish 218 ****************************************************************************** 219 */ 220 val s1_need_meta = ((state === m_itlbResend) && itlb_finish) || (state === m_metaResend) 221 toMeta.valid := s1_need_meta || s0_valid 222 toMeta.bits := DontCare 223 toMeta.bits.isDoubleLine := Mux(s1_need_meta, s1_doubleline, s0_doubleline) 224 225 for (i <- 0 until PortNumber) { 226 toMeta.bits.vSetIdx(i) := Mux(s1_need_meta, s1_req_vSetIdx(i), s0_req_vSetIdx(i)) 227 } 228 229 /** 230 ****************************************************************************** 231 * Receive resp from IMeta and check 232 ****************************************************************************** 233 */ 234 val s1_req_ptags = VecInit(s1_req_paddr.map(get_phy_tag)) 235 236 val s1_meta_ptags = fromMeta.tags 237 val s1_meta_valids = fromMeta.entryValid 238 // If error is found in either way, the tag_eq_vec is unreliable, so we do not use waymask, but directly .orR 239 val s1_meta_corrupt = VecInit(fromMeta.errors.map(_.asUInt.orR)) 240 241 def get_waymask(paddrs: Vec[UInt]): Vec[UInt] = { 242 val ptags = paddrs.map(get_phy_tag) 243 val tag_eq_vec = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w => s1_meta_ptags(p)(w) === ptags(p))))) 244 val tag_match_vec = VecInit((0 until PortNumber).map( k => VecInit(tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w)}))) 245 val waymasks = VecInit(tag_match_vec.map(_.asUInt)) 246 waymasks 247 } 248 249 val s1_SRAM_waymasks = VecInit((0 until PortNumber).map(i => 250 Mux(tlb_valid_pulse(i), get_waymask(s1_req_paddr_wire)(i), get_waymask(s1_req_paddr_reg)(i)))) 251 252 /** 253 ****************************************************************************** 254 * update waymask according to MSHR update data 255 ****************************************************************************** 256 */ 257 def update_waymask(mask: UInt, vSetIdx: UInt, ptag: UInt): UInt = { 258 require(mask.getWidth == nWays) 259 val new_mask = WireInit(mask) 260 val valid = fromMSHR.valid && !fromMSHR.bits.corrupt 261 val vset_same = fromMSHR.bits.vSetIdx === vSetIdx 262 val ptag_same = getPhyTagFromBlk(fromMSHR.bits.blkPaddr) === ptag 263 val way_same = fromMSHR.bits.waymask === mask 264 when(valid && vset_same) { 265 when(ptag_same) { 266 new_mask := fromMSHR.bits.waymask 267 }.elsewhen(way_same) { 268 new_mask := 0.U 269 } 270 } 271 new_mask 272 } 273 274 val s1_SRAM_valid = s0_fire_r || RegNext(s1_need_meta && toMeta.ready) 275 val s1_MSHR_valid = fromMSHR.valid && !fromMSHR.bits.corrupt 276 val s1_waymasks = WireInit(VecInit(Seq.fill(PortNumber)(0.U(nWays.W)))) 277 val s1_waymasks_r = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_SRAM_valid || s1_MSHR_valid) 278 (0 until PortNumber).foreach{i => 279 val old_waymask = Mux(s1_SRAM_valid, s1_SRAM_waymasks(i), s1_waymasks_r(i)) 280 s1_waymasks(i) := update_waymask(old_waymask, s1_req_vSetIdx(i), s1_req_ptags(i)) 281 } 282 283 /** 284 ****************************************************************************** 285 * send enqueu req to WayLookup 286 ******** ********************************************************************** 287 */ 288 // Disallow enqueuing wayLookup when SRAM write occurs. 289 toWayLookup.valid := ((state === m_enqWay) || ((state === m_idle) && itlb_finish)) && 290 !s1_flush && !fromMSHR.valid && !s1_isSoftPrefetch // do not enqueue soft prefetch 291 toWayLookup.bits.vSetIdx := s1_req_vSetIdx 292 toWayLookup.bits.waymask := s1_waymasks 293 toWayLookup.bits.ptag := s1_req_ptags 294 toWayLookup.bits.gpaddr := s1_req_gpaddr 295 (0 until PortNumber).foreach { i => 296 val excpValid = (if (i == 0) true.B else s1_doubleline) // exception in first line is always valid, in second line is valid iff is doubleline request 297 // Send s1_itlb_exception to WayLookup (instead of s1_exception_out) for better timing. Will check pmp again in mainPipe 298 toWayLookup.bits.itlb_exception(i) := Mux(excpValid, s1_itlb_exception(i), ExceptionType.none) 299 toWayLookup.bits.itlb_pbmt(i) := Mux(excpValid, s1_itlb_pbmt(i), Pbmt.pma) 300 toWayLookup.bits.meta_corrupt(i) := excpValid && s1_meta_corrupt(i) 301 } 302 303 val s1_waymasks_vec = s1_waymasks.map(_.asTypeOf(Vec(nWays, Bool()))) 304 when(toWayLookup.fire) { 305 assert(PopCount(s1_waymasks_vec(0)) <= 1.U && (PopCount(s1_waymasks_vec(1)) <= 1.U || !s1_doubleline), 306 "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ", 307 PopCount(s1_waymasks_vec(0)) > 1.U, s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0), 308 PopCount(s1_waymasks_vec(1)) > 1.U && s1_doubleline, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1)) 309 } 310 311 /** 312 ****************************************************************************** 313 * PMP check 314 ****************************************************************************** 315 */ 316 toPMP.zipWithIndex.foreach { case (p, i) => 317 // if itlb has exception, paddr can be invalid, therefore pmp check can be skipped 318 p.valid := s1_valid // && s1_itlb_exception === ExceptionType.none 319 p.bits.addr := s1_req_paddr(i) 320 p.bits.size := 3.U // TODO 321 p.bits.cmd := TlbCmd.exec 322 } 323 val s1_pmp_exception = VecInit(fromPMP.map(ExceptionType.fromPMPResp)) 324 val s1_pmp_mmio = VecInit(fromPMP.map(_.mmio)) 325 326 // also raise af when meta array corrupt is detected, to cancel prefetch 327 val s1_meta_exception = VecInit(s1_meta_corrupt.map(ExceptionType.fromECC(io.csr_parity_enable, _))) 328 329 // merge s1 itlb/pmp/meta exceptions, itlb has the highest priority, pmp next, meta lowest 330 val s1_exception_out = ExceptionType.merge( 331 s1_itlb_exception, 332 s1_pmp_exception, 333 s1_meta_exception 334 ) 335 336 // merge pmp mmio and itlb pbmt 337 val s1_mmio = VecInit((s1_pmp_mmio zip s1_itlb_pbmt).map{ case (mmio, pbmt) => 338 mmio || Pbmt.isUncache(pbmt) 339 }) 340 341 /** 342 ****************************************************************************** 343 * state machine 344 ******** ********************************************************************** 345 */ 346 347 switch(state) { 348 is(m_idle) { 349 when(s1_valid) { 350 when(!itlb_finish) { 351 next_state := m_itlbResend 352 }.elsewhen(!toWayLookup.fire && !s1_isSoftPrefetch) { // itlb_finish 353 next_state := m_enqWay 354 }.elsewhen(!s2_ready) { // itlb_finish && (toWayLookup.fire || s1_isSoftPrefetch) 355 next_state := m_enterS2 356 } // .otherwise { next_state := m_idle } 357 } // .otherwise { next_state := m_idle } // !s1_valid 358 } 359 is(m_itlbResend) { 360 when(itlb_finish) { 361 when(!toMeta.ready) { 362 next_state := m_metaResend 363 }.elsewhen(!s1_isSoftPrefetch) { // toMeta.ready 364 next_state := m_enqWay 365 }.elsewhen(!s2_ready) { // toMeta.ready && s1_isSoftPrefetch 366 next_state := m_enterS2 367 }.otherwise { // toMeta.ready && s1_isSoftPrefetch && s2_ready 368 next_state := m_idle 369 } 370 } // .otherwise { next_state := m_itlbResend } // !itlb_finish 371 } 372 is(m_metaResend) { 373 when(toMeta.ready) { 374 when (!s1_isSoftPrefetch) { 375 next_state := m_enqWay 376 }.elsewhen(!s2_ready) { // s1_isSoftPrefetch 377 next_state := m_enterS2 378 }.otherwise { // s1_isSoftPrefetch && s2_ready 379 next_state := m_idle 380 } 381 } // .otherwise { next_state := m_metaResend } // !toMeta.ready 382 } 383 is(m_enqWay) { 384 // sanity check 385 assert(!s1_isSoftPrefetch, "Soft prefetch enters m_enqWay") 386 when(toWayLookup.fire && !s2_ready) { 387 next_state := m_enterS2 388 }.elsewhen(toWayLookup.fire && s2_ready) { 389 next_state := m_idle 390 } 391 } 392 is(m_enterS2) { 393 when(s2_ready) { 394 next_state := m_idle 395 } 396 } 397 } 398 399 when(s1_flush) { 400 next_state := m_idle 401 } 402 403 /** Stage 1 control */ 404 from_bpu_s1_flush := s1_valid && !s1_isSoftPrefetch && io.flushFromBpu.shouldFlushByStage3(s1_req_ftqIdx) 405 s1_flush := io.flush || from_bpu_s1_flush 406 407 s1_ready := next_state === m_idle 408 s1_fire := (next_state === m_idle) && s1_valid && !s1_flush // used to clear s1_valid & itlb_valid_latch 409 val s1_real_fire = s1_fire && io.csr_pf_enable // real "s1 fire" that s1 enters s2 410 411 /** 412 ****************************************************************************** 413 * IPrefetch Stage 2 414 * - 1. Monitor the requests from missUnit to write to SRAM. 415 * - 2. send req to missUnit 416 ****************************************************************************** 417 */ 418 val s2_valid = generatePipeControl(lastFire = s1_real_fire, thisFire = s2_fire, thisFlush = s2_flush, lastFlush = false.B) 419 420 val s2_req_vaddr = RegEnable(s1_req_vaddr, 0.U.asTypeOf(s1_req_vaddr), s1_real_fire) 421 val s2_isSoftPrefetch = RegEnable(s1_isSoftPrefetch, 0.U.asTypeOf(s1_isSoftPrefetch), s1_real_fire) 422 val s2_doubleline = RegEnable(s1_doubleline, 0.U.asTypeOf(s1_doubleline), s1_real_fire) 423 val s2_req_paddr = RegEnable(s1_req_paddr, 0.U.asTypeOf(s1_req_paddr), s1_real_fire) 424 val s2_exception = RegEnable(s1_exception_out, 0.U.asTypeOf(s1_exception_out), s1_real_fire) // includes itlb/pmp/meta exception 425 val s2_mmio = RegEnable(s1_mmio, 0.U.asTypeOf(s1_mmio), s1_real_fire) 426 val s2_waymasks = RegEnable(s1_waymasks, 0.U.asTypeOf(s1_waymasks), s1_real_fire) 427 428 val s2_req_vSetIdx = s2_req_vaddr.map(get_idx) 429 val s2_req_ptags = s2_req_paddr.map(get_phy_tag) 430 431 /** 432 ****************************************************************************** 433 * Monitor the requests from missUnit to write to SRAM 434 ****************************************************************************** 435 */ 436 437 /* NOTE: If fromMSHR.bits.corrupt, we should set s2_MSHR_hits to false.B, and send prefetch requests again. 438 * This is the opposite of how mainPipe handles fromMSHR.bits.corrupt, 439 * in which we should set s2_MSHR_hits to true.B, and send error to ifu. 440 */ 441 val s2_MSHR_match = VecInit((0 until PortNumber).map(i => 442 (s2_req_vSetIdx(i) === fromMSHR.bits.vSetIdx) && 443 (s2_req_ptags(i) === getPhyTagFromBlk(fromMSHR.bits.blkPaddr)) && 444 s2_valid && fromMSHR.valid && !fromMSHR.bits.corrupt 445 )) 446 val s2_MSHR_hits = (0 until PortNumber).map(i => ValidHoldBypass(s2_MSHR_match(i), s2_fire || s2_flush)) 447 448 val s2_SRAM_hits = s2_waymasks.map(_.orR) 449 val s2_hits = VecInit((0 until PortNumber).map(i => s2_MSHR_hits(i) || s2_SRAM_hits(i))) 450 451 /* s2_exception includes itlb pf/gpf/af, pmp af and meta corruption (af), neither of which should be prefetched 452 * mmio should not be prefetched 453 * also, if previous has exception, latter port should also not be prefetched 454 */ 455 val s2_miss = VecInit((0 until PortNumber).map { i => 456 !s2_hits(i) && (if (i==0) true.B else s2_doubleline) && 457 s2_exception.take(i+1).map(_ === ExceptionType.none).reduce(_&&_) && 458 s2_mmio.take(i+1).map(!_).reduce(_&&_) 459 }) 460 461 /** 462 ****************************************************************************** 463 * send req to missUnit 464 ****************************************************************************** 465 */ 466 val toMSHRArbiter = Module(new Arbiter(new ICacheMissReq, PortNumber)) 467 468 // To avoid sending duplicate requests. 469 val has_send = RegInit(VecInit(Seq.fill(PortNumber)(false.B))) 470 (0 until PortNumber).foreach{ i => 471 when(s1_real_fire) { 472 has_send(i) := false.B 473 }.elsewhen(toMSHRArbiter.io.in(i).fire) { 474 has_send(i) := true.B 475 } 476 } 477 478 (0 until PortNumber).map{ i => 479 toMSHRArbiter.io.in(i).valid := s2_valid && s2_miss(i) && !has_send(i) 480 toMSHRArbiter.io.in(i).bits.blkPaddr := getBlkAddr(s2_req_paddr(i)) 481 toMSHRArbiter.io.in(i).bits.vSetIdx := s2_req_vSetIdx(i) 482 } 483 484 toMSHR <> toMSHRArbiter.io.out 485 486 s2_flush := io.flush 487 488 val s2_finish = (0 until PortNumber).map(i => has_send(i) || !s2_miss(i) || toMSHRArbiter.io.in(i).fire).reduce(_&&_) 489 s2_ready := s2_finish || !s2_valid 490 s2_fire := s2_valid && s2_finish && !s2_flush 491 492 /** PerfAccumulate */ 493 // the number of bpu flush 494 XSPerfAccumulate("bpu_s0_flush", from_bpu_s0_flush) 495 XSPerfAccumulate("bpu_s1_flush", from_bpu_s1_flush) 496 // the number of prefetch request received from ftq or backend (software prefetch) 497// XSPerfAccumulate("prefetch_req_receive", io.req.fire) 498 XSPerfAccumulate("prefetch_req_receive_hw", io.req.fire && !io.req.bits.isSoftPrefetch) 499 XSPerfAccumulate("prefetch_req_receive_sw", io.req.fire && io.req.bits.isSoftPrefetch) 500 // the number of prefetch request sent to missUnit 501// XSPerfAccumulate("prefetch_req_send", toMSHR.fire) 502 XSPerfAccumulate("prefetch_req_send_hw", toMSHR.fire && !s2_isSoftPrefetch) 503 XSPerfAccumulate("prefetch_req_send_sw", toMSHR.fire && s2_isSoftPrefetch) 504 XSPerfAccumulate("to_missUnit_stall", toMSHR.valid && !toMSHR.ready) 505 /** 506 * Count the number of requests that are filtered for various reasons. 507 * The number of prefetch discard in Performance Accumulator may be 508 * a littel larger the number of really discarded. Because there can 509 * be multiple reasons for a canceled request at the same time. 510 */ 511 // discard prefetch request by flush 512 // XSPerfAccumulate("fdip_prefetch_discard_by_tlb_except", p1_discard && p1_tlb_except) 513 // // discard prefetch request by hit icache SRAM 514 // XSPerfAccumulate("fdip_prefetch_discard_by_hit_cache", p2_discard && p1_meta_hit) 515 // // discard prefetch request by hit wirte SRAM 516 // XSPerfAccumulate("fdip_prefetch_discard_by_p1_monoitor", p1_discard && p1_monitor_hit) 517 // // discard prefetch request by pmp except or mmio 518 // XSPerfAccumulate("fdip_prefetch_discard_by_pmp", p2_discard && p2_pmp_except) 519 // // discard prefetch request by hit mainPipe info 520 // // XSPerfAccumulate("fdip_prefetch_discard_by_mainPipe", p2_discard && p2_mainPipe_hit) 521}