1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chisel3._ 20import chisel3.util._ 21import coupledL2.VaddrKey 22import difftest._ 23import freechips.rocketchip.tilelink.ClientStates._ 24import freechips.rocketchip.tilelink.MemoryOpCategories._ 25import freechips.rocketchip.tilelink.TLPermissions._ 26import freechips.rocketchip.tilelink._ 27import huancun.{AliasKey, DirtyKey, PrefetchKey} 28import org.chipsalliance.cde.config.Parameters 29import utility._ 30import utils._ 31import xiangshan._ 32import xiangshan.mem.AddPipelineReg 33import xiangshan.mem.prefetch._ 34import xiangshan.mem.trace._ 35 36class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle { 37 val source = UInt(sourceTypeWidth.W) 38 val pf_source = UInt(L1PfSourceBits.W) 39 val cmd = UInt(M_SZ.W) 40 val addr = UInt(PAddrBits.W) 41 val vaddr = UInt(VAddrBits.W) 42 val way_en = UInt(DCacheWays.W) 43 val pc = UInt(VAddrBits.W) 44 45 // store 46 val full_overwrite = Bool() 47 48 // which word does amo work on? 49 val word_idx = UInt(log2Up(blockWords).W) 50 val amo_data = UInt(DataBits.W) 51 val amo_mask = UInt((DataBits / 8).W) 52 53 val req_coh = new ClientMetadata 54 val replace_coh = new ClientMetadata 55 val replace_tag = UInt(tagBits.W) 56 val id = UInt(reqIdWidth.W) 57 58 val replace_pf = UInt(L1PfSourceBits.W) 59 60 // For now, miss queue entry req is actually valid when req.valid && !cancel 61 // * req.valid is fast to generate 62 // * cancel is slow to generate, it will not be used until the last moment 63 // 64 // cancel may come from the following sources: 65 // 1. miss req blocked by writeback queue: 66 // a writeback req of the same address is in progress 67 // 2. pmp check failed 68 val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid 69 70 // Req source decode 71 // Note that req source is NOT cmd type 72 // For instance, a req which isFromPrefetch may have R or W cmd 73 def isFromLoad = source === LOAD_SOURCE.U 74 def isFromStore = source === STORE_SOURCE.U 75 def isFromAMO = source === AMO_SOURCE.U 76 def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U 77 def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW 78 def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR 79 def hit = req_coh.isValid() 80} 81 82class MissReqStoreData(implicit p: Parameters) extends DCacheBundle { 83 // store data and store mask will be written to miss queue entry 84 // 1 cycle after req.fire() and meta write 85 val store_data = UInt((cfg.blockBytes * 8).W) 86 val store_mask = UInt(cfg.blockBytes.W) 87} 88 89class MissReq(implicit p: Parameters) extends MissReqWoStoreData { 90 // store data and store mask will be written to miss queue entry 91 // 1 cycle after req.fire() and meta write 92 val store_data = UInt((cfg.blockBytes * 8).W) 93 val store_mask = UInt(cfg.blockBytes.W) 94 95 def toMissReqStoreData(): MissReqStoreData = { 96 val out = Wire(new MissReqStoreData) 97 out.store_data := store_data 98 out.store_mask := store_mask 99 out 100 } 101 102 def toMissReqWoStoreData(): MissReqWoStoreData = { 103 val out = Wire(new MissReqWoStoreData) 104 out.source := source 105 out.replace_pf := replace_pf 106 out.pf_source := pf_source 107 out.cmd := cmd 108 out.addr := addr 109 out.vaddr := vaddr 110 out.way_en := way_en 111 out.full_overwrite := full_overwrite 112 out.word_idx := word_idx 113 out.amo_data := amo_data 114 out.amo_mask := amo_mask 115 out.req_coh := req_coh 116 out.replace_coh := replace_coh 117 out.replace_tag := replace_tag 118 out.id := id 119 out.cancel := cancel 120 out.pc := pc 121 out 122 } 123} 124 125class MissResp(implicit p: Parameters) extends DCacheBundle { 126 val id = UInt(log2Up(cfg.nMissEntries).W) 127 // cache miss request is handled by miss queue, either merged or newly allocated 128 val handled = Bool() 129 // cache req missed, merged into one of miss queue entries 130 // i.e. !miss_merged means this access is the first miss for this cacheline 131 val merged = Bool() 132 val repl_way_en = UInt(DCacheWays.W) 133} 134 135 136/** 137 * miss queue enq logic: enq is now splited into 2 cycles 138 * +---------------------------------------------------------------------+ pipeline reg +-------------------------+ 139 * + s0: enq source arbiter, judge mshr alloc or merge + +-------+ + s1: real alloc or merge + 140 * + +-----+ primary_fire? -> + | alloc | + + 141 * + mainpipe -> req0 -> | | secondary_fire? -> + | merge | + + 142 * + loadpipe0 -> req1 -> | arb | -> req -> + -> | req | -> + + 143 * + loadpipe1 -> req2 -> | | mshr id -> + | id | + + 144 * + +-----+ + +-------+ + + 145 * +---------------------------------------------------------------------+ +-------------------------+ 146 */ 147 148// a pipeline reg between MissReq and MissEntry 149class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle { 150 val req = new MissReq 151 // this request is about to merge to an existing mshr 152 val merge = Bool() 153 // this request is about to allocate a new mshr 154 val alloc = Bool() 155 val mshr_id = UInt(log2Up(cfg.nMissEntries).W) 156 157 def reg_valid(): Bool = { 158 (merge || alloc) 159 } 160 161 def matched(new_req: MissReq): Bool = { 162 val block_match = get_block(req.addr) === get_block(new_req.addr) 163 block_match && reg_valid() && !(req.isFromPrefetch) 164 } 165 166 def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = { 167 val block_match = get_block(req.addr) === get_block(new_req.addr) 168 new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch) 169 } 170 171 def reject_req(new_req: MissReq): Bool = { 172 val block_match = get_block(req.addr) === get_block(new_req.addr) 173 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 174 val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad 175 // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model 176 val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore 177 178 val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr) 179 val way_match = req.way_en === new_req.way_en 180 Mux( 181 alloc, 182 Mux( 183 block_match, 184 !alias_match || !(merge_load || merge_store), 185 set_match && way_match 186 ), 187 false.B 188 ) 189 } 190 191 def merge_req(new_req: MissReq): Bool = { 192 val block_match = get_block(req.addr) === get_block(new_req.addr) 193 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 194 val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad 195 // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model 196 val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore 197 Mux( 198 alloc, 199 block_match && alias_match && (merge_load || merge_store), 200 false.B 201 ) 202 } 203 204 // send out acquire as soon as possible 205 // if a new store miss req is about to merge into this pipe reg, don't send acquire now 206 def can_send_acquire(valid: Bool, new_req: MissReq): Bool = { 207 alloc && !(valid && merge_req(new_req) && new_req.isFromStore) 208 } 209 210 def get_acquire(l2_pf_store_only: Bool): TLBundleA = { 211 val acquire = Wire(new TLBundleA(edge.bundle)) 212 val grow_param = req.req_coh.onAccess(req.cmd)._2 213 val acquireBlock = edge.AcquireBlock( 214 fromSource = mshr_id, 215 toAddress = get_block_addr(req.addr), 216 lgSize = (log2Up(cfg.blockBytes)).U, 217 growPermissions = grow_param 218 )._2 219 val acquirePerm = edge.AcquirePerm( 220 fromSource = mshr_id, 221 toAddress = get_block_addr(req.addr), 222 lgSize = (log2Up(cfg.blockBytes)).U, 223 growPermissions = grow_param 224 )._2 225 acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock) 226 // resolve cache alias by L2 227 acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12)) 228 // pass vaddr to l2 229 acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits)) 230 // trigger prefetch 231 acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B)) 232 // req source 233 when(req.isFromLoad) { 234 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U) 235 }.elsewhen(req.isFromStore) { 236 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U) 237 }.elsewhen(req.isFromAMO) { 238 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U) 239 }.otherwise { 240 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 241 } 242 243 acquire 244 } 245} 246 247class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule { 248 val io = IO(new Bundle() { 249 val hartId = Input(UInt(8.W)) 250 // MSHR ID 251 val id = Input(UInt(log2Up(cfg.nMissEntries).W)) 252 // client requests 253 // MSHR update request, MSHR state and addr will be updated when req.fire 254 val req = Flipped(ValidIO(new MissReqWoStoreData)) 255 // pipeline reg 256 val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge)) 257 // allocate this entry for new req 258 val primary_valid = Input(Bool()) 259 // this entry is free and can be allocated to new reqs 260 val primary_ready = Output(Bool()) 261 // this entry is busy, but it can merge the new req 262 val secondary_ready = Output(Bool()) 263 // this entry is busy and it can not merge the new req 264 val secondary_reject = Output(Bool()) 265 // way selected for replacing, used to support plru update 266 val repl_way_en = Output(UInt(DCacheWays.W)) 267 268 // bus 269 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 270 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 271 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 272 273 // send refill info to load queue 274 val refill_to_ldq = ValidIO(new Refill) 275 276 // refill pipe 277 val refill_pipe_req = DecoupledIO(new RefillPipeReq) 278 val refill_pipe_resp = Input(Bool()) 279 280 // replace pipe 281 val replace_pipe_req = DecoupledIO(new MainPipeReq) 282 val replace_pipe_resp = Input(Bool()) 283 284 // main pipe: amo miss 285 val main_pipe_req = DecoupledIO(new MainPipeReq) 286 val main_pipe_resp = Input(Bool()) 287 288 val block_addr = ValidIO(UInt(PAddrBits.W)) 289 290 val debug_early_replace = ValidIO(new Bundle() { 291 // info about the block that has been replaced 292 val idx = UInt(idxBits.W) // vaddr 293 val tag = UInt(tagBits.W) // paddr 294 }) 295 296 val req_handled_by_this_entry = Output(Bool()) 297 298 val forwardInfo = Output(new MissEntryForwardIO) 299 val l2_pf_store_only = Input(Bool()) 300 301 // whether the pipeline reg has send out an acquire 302 val acquire_fired_by_pipe_reg = Input(Bool()) 303 val memSetPattenDetected = Input(Bool()) 304 305 val perf_pending_prefetch = Output(Bool()) 306 val perf_pending_normal = Output(Bool()) 307 308 val rob_head_query = new DCacheBundle { 309 val vaddr = Input(UInt(VAddrBits.W)) 310 val query_valid = Input(Bool()) 311 312 val resp = Output(Bool()) 313 314 def hit(e_vaddr: UInt): Bool = { 315 require(e_vaddr.getWidth == VAddrBits) 316 query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset) 317 } 318 } 319 320 val latency_monitor = new DCacheBundle { 321 val load_miss_refilling = Output(Bool()) 322 val store_miss_refilling = Output(Bool()) 323 val amo_miss_refilling = Output(Bool()) 324 val pf_miss_refilling = Output(Bool()) 325 } 326 327 val prefetch_info = new DCacheBundle { 328 val late_prefetch = Output(Bool()) 329 } 330 val nMaxPrefetchEntry = Input(UInt(64.W)) 331 val matched = Output(Bool()) 332 }) 333 334 assert(!RegNext(io.primary_valid && !io.primary_ready)) 335 336 val req = Reg(new MissReqWoStoreData) 337 val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use 338 val req_store_mask = Reg(UInt(cfg.blockBytes.W)) 339 val req_valid = RegInit(false.B) 340 val set = addr_to_dcache_set(req.vaddr) 341 342 val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req 343 344 val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd) 345 346 val s_acquire = RegInit(true.B) 347 val s_grantack = RegInit(true.B) 348 val s_replace_req = RegInit(true.B) 349 val s_refill = RegInit(true.B) 350 val s_mainpipe_req = RegInit(true.B) 351 352 val w_grantfirst = RegInit(true.B) 353 val w_grantlast = RegInit(true.B) 354 val w_replace_resp = RegInit(true.B) 355 val w_refill_resp = RegInit(true.B) 356 val w_mainpipe_resp = RegInit(true.B) 357 358 val release_entry = s_grantack && w_refill_resp && w_mainpipe_resp 359 360 val acquire_not_sent = !s_acquire && !io.mem_acquire.ready 361 val data_not_refilled = !w_grantfirst 362 363 val error = RegInit(false.B) 364 val prefetch = RegInit(false.B) 365 val access = RegInit(false.B) 366 367 val should_refill_data_reg = Reg(Bool()) 368 val should_refill_data = WireInit(should_refill_data_reg) 369 370 // val full_overwrite = req.isFromStore && req_store_mask.andR 371 val full_overwrite = Reg(Bool()) 372 373 val (_, _, refill_done, refill_count) = edge.count(io.mem_grant) 374 val grant_param = Reg(UInt(TLPermissions.bdWidth.W)) 375 376 // refill data with store data, this reg will be used to store: 377 // 1. store data (if needed), before l2 refill data 378 // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array) 379 val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W))) 380 // raw data refilled to l1 by l2 381 val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W))) 382 383 // allocate current miss queue entry for a miss req 384 val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel) 385 // merge miss req to current miss queue entry 386 val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel) 387 388 val req_handled_by_this_entry = primary_fire || secondary_fire 389 390 // for perf use 391 val secondary_fired = RegInit(false.B) 392 393 io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired 394 io.perf_pending_normal := req_valid && (!prefetch || secondary_fired) 395 396 io.rob_head_query.resp := io.rob_head_query.hit(req.vaddr) && req_valid 397 398 io.req_handled_by_this_entry := req_handled_by_this_entry 399 400 when (release_entry && req_valid) { 401 req_valid := false.B 402 } 403 404 when (io.miss_req_pipe_reg.alloc) { 405 assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated") 406 req_valid := true.B 407 408 req := miss_req_pipe_reg_bits.toMissReqWoStoreData() 409 req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData() 410 req.addr := get_block_addr(miss_req_pipe_reg_bits.addr) 411 412 s_acquire := io.acquire_fired_by_pipe_reg 413 s_grantack := false.B 414 415 w_grantfirst := false.B 416 w_grantlast := false.B 417 418 when(miss_req_pipe_reg_bits.isFromStore) { 419 req_store_mask := miss_req_pipe_reg_bits.store_mask 420 for (i <- 0 until blockRows) { 421 refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i) 422 } 423 } 424 full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite 425 426 when (!miss_req_pipe_reg_bits.isFromAMO) { 427 s_refill := false.B 428 w_refill_resp := false.B 429 } 430 431 when (!miss_req_pipe_reg_bits.hit && miss_req_pipe_reg_bits.replace_coh.isValid() && !miss_req_pipe_reg_bits.isFromAMO) { 432 s_replace_req := false.B 433 w_replace_resp := false.B 434 } 435 436 when (miss_req_pipe_reg_bits.isFromAMO) { 437 s_mainpipe_req := false.B 438 w_mainpipe_resp := false.B 439 } 440 441 should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad 442 error := false.B 443 prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid) 444 access := false.B 445 secondary_fired := false.B 446 } 447 448 when (io.miss_req_pipe_reg.merge) { 449 assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged") 450 assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access)) 451 assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO)) 452 // use the most uptodate meta 453 req.req_coh := miss_req_pipe_reg_bits.req_coh 454 455 assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!") 456 457 when (miss_req_pipe_reg_bits.isFromStore) { 458 req := miss_req_pipe_reg_bits 459 req.addr := get_block_addr(miss_req_pipe_reg_bits.addr) 460 req.way_en := req.way_en 461 req.replace_coh := req.replace_coh 462 req.replace_tag := req.replace_tag 463 req_store_mask := miss_req_pipe_reg_bits.store_mask 464 for (i <- 0 until blockRows) { 465 refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i) 466 } 467 full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite 468 assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store") 469 } 470 471 should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad 472 should_refill_data_reg := should_refill_data 473 when (!input_req_is_prefetch) { 474 access := true.B // when merge non-prefetch req, set access bit 475 } 476 secondary_fired := true.B 477 } 478 479 when (io.mem_acquire.fire) { 480 s_acquire := true.B 481 } 482 483 // merge data refilled by l2 and store data, update miss queue entry, gen refill_req 484 val new_data = Wire(Vec(blockRows, UInt(rowBits.W))) 485 val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W))) 486 // merge refilled data and store data (if needed) 487 def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = { 488 val full_wmask = FillInterleaved(8, wmask) 489 (~full_wmask & old_data | full_wmask & new_data) 490 } 491 for (i <- 0 until blockRows) { 492 // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i) 493 new_data(i) := refill_and_store_data(i) 494 // we only need to merge data for Store 495 new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U) 496 } 497 498 val hasData = RegInit(true.B) 499 val isDirty = RegInit(false.B) 500 when (io.mem_grant.fire) { 501 w_grantfirst := true.B 502 grant_param := io.mem_grant.bits.param 503 when (edge.hasData(io.mem_grant.bits)) { 504 // GrantData 505 for (i <- 0 until beatRows) { 506 val idx = (refill_count << log2Floor(beatRows)) + i.U 507 val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i) 508 refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx)) 509 } 510 w_grantlast := w_grantlast || refill_done 511 hasData := true.B 512 }.otherwise { 513 // Grant 514 assert(full_overwrite) 515 for (i <- 0 until blockRows) { 516 refill_and_store_data(i) := new_data(i) 517 } 518 w_grantlast := true.B 519 hasData := false.B 520 } 521 522 error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error 523 524 refill_data_raw(refill_count) := io.mem_grant.bits.data 525 isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B) 526 } 527 528 when (io.mem_finish.fire) { 529 s_grantack := true.B 530 } 531 532 when (io.replace_pipe_req.fire) { 533 s_replace_req := true.B 534 } 535 536 when (io.replace_pipe_resp) { 537 w_replace_resp := true.B 538 } 539 540 when (io.refill_pipe_req.fire) { 541 s_refill := true.B 542 } 543 544 when (io.refill_pipe_resp) { 545 w_refill_resp := true.B 546 } 547 548 when (io.main_pipe_req.fire) { 549 s_mainpipe_req := true.B 550 } 551 552 when (io.main_pipe_resp) { 553 w_mainpipe_resp := true.B 554 } 555 556 def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = { 557 acquire_not_sent && (req.isFromLoad || req.isFromPrefetch) && (new_req.isFromLoad || new_req.isFromStore) 558 } 559 560 def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = { 561 data_not_refilled && (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad 562 } 563 564 // Note that late prefetch will be ignored 565 566 def should_merge(new_req: MissReqWoStoreData): Bool = { 567 val block_match = get_block(req.addr) === get_block(new_req.addr) 568 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 569 block_match && alias_match && 570 ( 571 before_req_sent_can_merge(new_req) || 572 before_data_refill_can_merge(new_req) 573 ) 574 } 575 576 // store can be merged before io.mem_acquire.fire 577 // store can not be merged the cycle that io.mem_acquire.fire 578 // load can be merged before io.mem_grant.fire 579 // 580 // TODO: merge store if possible? mem_acquire may need to be re-issued, 581 // but sbuffer entry can be freed 582 def should_reject(new_req: MissReqWoStoreData): Bool = { 583 val block_match = get_block(req.addr) === get_block(new_req.addr) 584 val set_match = set === addr_to_dcache_set(new_req.vaddr) 585 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 586 587 req_valid && 588 Mux( 589 block_match, 590 (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match, 591 set_match && new_req.way_en === req.way_en 592 ) 593 } 594 595 // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req 596 when(RegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) { 597 // can accept prefetch req 598 io.primary_ready := !req_valid && !RegNext(primary_fire) 599 }.otherwise { 600 // cannot accept prefetch req except when a memset patten is detected 601 io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !RegNext(primary_fire) 602 } 603 io.secondary_ready := should_merge(io.req.bits) 604 io.secondary_reject := should_reject(io.req.bits) 605 io.repl_way_en := req.way_en 606 607 // should not allocate, merge or reject at the same time 608 assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U)) 609 610 val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => { 611 val data = refill_and_store_data.asUInt 612 data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth) 613 }))) 614 // when granted data is all ready, wakeup lq's miss load 615 io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire) 616 io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits)) 617 io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count)) 618 io.refill_to_ldq.bits.error := RegNext(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied) 619 io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire) 620 io.refill_to_ldq.bits.hasdata := hasData 621 io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt 622 io.refill_to_ldq.bits.id := io.id 623 624 // if the entry has a pending merge req, wait for it 625 // Note: now, only wait for store, because store may acquire T 626 io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && miss_req_pipe_reg_bits.isFromStore) 627 val grow_param = req.req_coh.onAccess(req.cmd)._2 628 val acquireBlock = edge.AcquireBlock( 629 fromSource = io.id, 630 toAddress = req.addr, 631 lgSize = (log2Up(cfg.blockBytes)).U, 632 growPermissions = grow_param 633 )._2 634 val acquirePerm = edge.AcquirePerm( 635 fromSource = io.id, 636 toAddress = req.addr, 637 lgSize = (log2Up(cfg.blockBytes)).U, 638 growPermissions = grow_param 639 )._2 640 io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock) 641 // resolve cache alias by L2 642 io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12)) 643 // pass vaddr to l2 644 io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits)) 645 // trigger prefetch 646 io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B)) 647 // req source 648 when(prefetch && !secondary_fired) { 649 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 650 }.otherwise { 651 when(req.isFromStore) { 652 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U) 653 }.elsewhen(req.isFromLoad) { 654 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U) 655 }.elsewhen(req.isFromAMO) { 656 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U) 657 }.otherwise { 658 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 659 } 660 } 661 require(nSets <= 256) 662 663 io.mem_grant.ready := !w_grantlast && s_acquire 664 665 val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire) 666 assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits))) 667 io.mem_finish.valid := !s_grantack && w_grantfirst 668 io.mem_finish.bits := grantack 669 670 io.replace_pipe_req.valid := !s_replace_req 671 val replace = io.replace_pipe_req.bits 672 replace := DontCare 673 replace.miss := false.B 674 replace.miss_id := io.id 675 replace.miss_dirty := false.B 676 replace.probe := false.B 677 replace.probe_need_data := false.B 678 replace.source := LOAD_SOURCE.U 679 replace.vaddr := req.vaddr // only untag bits are needed 680 replace.addr := Cat(req.replace_tag, 0.U(pgUntagBits.W)) // only tag bits are needed 681 replace.store_mask := 0.U 682 replace.replace := true.B 683 replace.replace_way_en := req.way_en 684 replace.error := false.B 685 686 io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast 687 val refill = io.refill_pipe_req.bits 688 refill.source := req.source 689 refill.vaddr := req.vaddr 690 refill.addr := req.addr 691 refill.way_en := req.way_en 692 refill.wmask := Mux( 693 hasData || req.isFromLoad, 694 ~0.U(DCacheBanks.W), 695 VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req_store_mask).orR)).asUInt 696 ) 697 refill.data := refill_and_store_data.asTypeOf((new RefillPipeReq).data) 698 refill.miss_id := io.id 699 refill.id := req.id 700 def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = { 701 val c = categorize(cmd) 702 MuxLookup(Cat(c, param, dirty), Nothing, Seq( 703 //(effect param) -> (next) 704 Cat(rd, toB, false.B) -> Branch, 705 Cat(rd, toB, true.B) -> Branch, 706 Cat(rd, toT, false.B) -> Trunk, 707 Cat(rd, toT, true.B) -> Dirty, 708 Cat(wi, toT, false.B) -> Trunk, 709 Cat(wi, toT, true.B) -> Dirty, 710 Cat(wr, toT, false.B) -> Dirty, 711 Cat(wr, toT, true.B) -> Dirty)) 712 } 713 refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty)) 714 refill.error := error 715 refill.prefetch := req.pf_source 716 refill.access := access 717 refill.alias := req.vaddr(13, 12) // TODO 718 assert(!io.refill_pipe_req.valid || (refill.meta.coh =/= ClientMetadata(Nothing)), "refill modifies meta to Nothing, should not happen") 719 720 io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast 721 io.main_pipe_req.bits := DontCare 722 io.main_pipe_req.bits.miss := true.B 723 io.main_pipe_req.bits.miss_id := io.id 724 io.main_pipe_req.bits.miss_param := grant_param 725 io.main_pipe_req.bits.miss_dirty := isDirty 726 io.main_pipe_req.bits.miss_way_en := req.way_en 727 io.main_pipe_req.bits.probe := false.B 728 io.main_pipe_req.bits.source := req.source 729 io.main_pipe_req.bits.cmd := req.cmd 730 io.main_pipe_req.bits.vaddr := req.vaddr 731 io.main_pipe_req.bits.addr := req.addr 732 io.main_pipe_req.bits.store_data := refill_and_store_data.asUInt 733 io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W) 734 io.main_pipe_req.bits.word_idx := req.word_idx 735 io.main_pipe_req.bits.amo_data := req.amo_data 736 io.main_pipe_req.bits.amo_mask := req.amo_mask 737 io.main_pipe_req.bits.error := error 738 io.main_pipe_req.bits.id := req.id 739 740 io.block_addr.valid := req_valid && w_grantlast && !w_refill_resp 741 io.block_addr.bits := req.addr 742 743 io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire) 744 io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr) 745 io.debug_early_replace.bits.tag := req.replace_tag 746 747 io.forwardInfo.apply(req_valid, req.addr, refill_and_store_data, w_grantfirst, w_grantlast) 748 749 io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch 750 io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch 751 752 when(io.prefetch_info.late_prefetch) { 753 prefetch := false.B 754 } 755 756 // refill latency monitor 757 val start_counting = RegNext(io.mem_acquire.fire) || (RegNextN(primary_fire, 2) && s_acquire) 758 io.latency_monitor.load_miss_refilling := req_valid && req_primary_fire.isFromLoad && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 759 io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 760 io.latency_monitor.amo_miss_refilling := req_valid && req_primary_fire.isFromAMO && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 761 io.latency_monitor.pf_miss_refilling := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 762 763 XSPerfAccumulate("miss_req_primary", primary_fire) 764 XSPerfAccumulate("miss_req_merged", secondary_fire) 765 XSPerfAccumulate("load_miss_penalty_to_use", 766 should_refill_data && 767 BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true) 768 ) 769 XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp)) 770 XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready) 771 XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid) 772 XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready) 773 XSPerfAccumulate("penalty_from_grant_to_refill", !w_refill_resp && w_grantlast) 774 XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U) 775 XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U) 776 XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && miss_req_pipe_reg_bits.isFromStore) 777 778 val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(RegNext(primary_fire)), release_entry) 779 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true) 780 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false) 781 782 val load_miss_begin = primary_fire && io.req.bits.isFromLoad 783 val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data 784 val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time 785 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true) 786 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false) 787 788 val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, RegNext(io.mem_grant.fire && refill_done)) 789 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true) 790 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false) 791} 792 793class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasPerfEvents { 794 val io = IO(new Bundle { 795 val hartId = Input(UInt(8.W)) 796 val req = Flipped(DecoupledIO(new MissReq)) 797 val resp = Output(new MissResp) 798 val refill_to_ldq = ValidIO(new Refill) 799 800 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 801 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 802 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 803 804 val refill_pipe_req = DecoupledIO(new RefillPipeReq) 805 val refill_pipe_req_dup = Vec(nDupStatus, DecoupledIO(new RefillPipeReqCtrl)) 806 val refill_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W))) 807 808 val replace_pipe_req = DecoupledIO(new MainPipeReq) 809 val replace_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W))) 810 811 val main_pipe_req = DecoupledIO(new MainPipeReq) 812 val main_pipe_resp = Flipped(ValidIO(new AtomicsResp)) 813 814 // block probe 815 val probe_addr = Input(UInt(PAddrBits.W)) 816 val probe_block = Output(Bool()) 817 818 val full = Output(Bool()) 819 820 // only for performance counter 821 // This is valid when an mshr has finished replacing a block (w_replace_resp), 822 // but hasn't received Grant from L2 (!w_grantlast) 823 val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() { 824 // info about the block that has been replaced 825 val idx = UInt(idxBits.W) // vaddr 826 val tag = UInt(tagBits.W) // paddr 827 })) 828 829 // forward missqueue 830 val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO) 831 val l2_pf_store_only = Input(Bool()) 832 833 val memSetPattenDetected = Output(Bool()) 834 val lqEmpty = Input(Bool()) 835 836 val prefetch_info = new Bundle { 837 val naive = new Bundle { 838 val late_miss_prefetch = Output(Bool()) 839 } 840 841 val fdp = new Bundle { 842 val late_miss_prefetch = Output(Bool()) 843 val prefetch_monitor_cnt = Output(Bool()) 844 val total_prefetch = Output(Bool()) 845 } 846 } 847 848 val bloom_filter_query = new Bundle { 849 val set = ValidIO(new BloomQueryBundle(BLOOM_FILTER_ENTRY_NUM)) 850 val clr = ValidIO(new BloomQueryBundle(BLOOM_FILTER_ENTRY_NUM)) 851 } 852 853 val mq_enq_cancel = Output(Bool()) 854 855 val debugTopDown = new DCacheTopDownIO 856 }) 857 858 // 128KBL1: FIXME: provide vaddr for l2 859 860 val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge))) 861 862 val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge))) 863 val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire)) 864 865 val primary_ready_vec = entries.map(_.io.primary_ready) 866 val secondary_ready_vec = entries.map(_.io.secondary_ready) 867 val secondary_reject_vec = entries.map(_.io.secondary_reject) 868 val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr } 869 870 val merge = Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits))).orR 871 val reject = Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits))).orR 872 val alloc = !reject && !merge && Cat(primary_ready_vec).orR 873 val accept = alloc || merge 874 875 val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry) 876 // merged to pipeline reg 877 val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) 878 assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg") 879 assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req") 880 io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id) 881 io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled 882 io.resp.merged := merge 883 io.resp.repl_way_en := Mux(!req_pipeline_reg_handled, Mux1H(secondary_ready_vec, entries.map(_.io.repl_way_en)), miss_req_pipe_reg.req.way_en) 884 885 /* MissQueue enq logic is now splitted into 2 cycles 886 * 887 */ 888 miss_req_pipe_reg.req := io.req.bits 889 miss_req_pipe_reg.alloc := alloc && io.req.valid && !io.req.bits.cancel 890 miss_req_pipe_reg.merge := merge && io.req.valid && !io.req.bits.cancel 891 miss_req_pipe_reg.mshr_id := io.resp.id 892 893 assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!") 894 895 val source_except_load_cnt = RegInit(0.U(10.W)) 896 when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) { 897 when(io.req.bits.isFromLoad) { 898 source_except_load_cnt := 0.U 899 }.otherwise { 900 when(io.req.bits.isFromStore) { 901 source_except_load_cnt := source_except_load_cnt + 1.U 902 } 903 } 904 } 905 val Threshold = 8 906 val memSetPattenDetected = RegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty) 907 908 io.memSetPattenDetected := memSetPattenDetected 909 910 val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo)) 911 (0 until LoadPipelineWidth).map(i => { 912 val id = io.forward(i).mshrid 913 val req_valid = io.forward(i).valid 914 val paddr = io.forward(i).paddr 915 916 val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr) 917 io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr) 918 io.forward(i).forward_mshr := forward_mshr 919 io.forward(i).forwardData := forwardData 920 }) 921 922 assert(RegNext(PopCount(secondary_ready_vec) <= 1.U)) 923// assert(RegNext(PopCount(secondary_reject_vec) <= 1.U)) 924 // It is possible that one mshr wants to merge a req, while another mshr wants to reject it. 925 // That is, a coming req has the same paddr as that of mshr_0 (merge), 926 // while it has the same set and the same way as mshr_1 (reject). 927 // In this situation, the coming req should be merged by mshr_0 928// assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U)) 929 930 def select_valid_one[T <: Bundle]( 931 in: Seq[DecoupledIO[T]], 932 out: DecoupledIO[T], 933 name: Option[String] = None): Unit = { 934 935 if (name.nonEmpty) { out.suggestName(s"${name.get}_select") } 936 out.valid := Cat(in.map(_.valid)).orR 937 out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits)) 938 in.map(_.ready := out.ready) 939 assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U)) 940 } 941 942 io.mem_grant.ready := false.B 943 944 val nMaxPrefetchEntry = WireInit(Constantin.createRecord("nMaxPrefetchEntry" + p(XSCoreParamsKey).HartId.toString, initValue = 14.U)) 945 entries.zipWithIndex.foreach { 946 case (e, i) => 947 val former_primary_ready = if(i == 0) 948 false.B 949 else 950 Cat((0 until i).map(j => entries(j).io.primary_ready)).orR 951 952 e.io.hartId := io.hartId 953 e.io.id := i.U 954 e.io.l2_pf_store_only := io.l2_pf_store_only 955 e.io.req.valid := io.req.valid 956 e.io.primary_valid := io.req.valid && 957 !merge && 958 !reject && 959 !former_primary_ready && 960 e.io.primary_ready 961 e.io.req.bits := io.req.bits.toMissReqWoStoreData() 962 963 e.io.mem_grant.valid := false.B 964 e.io.mem_grant.bits := DontCare 965 when (io.mem_grant.bits.source === i.U) { 966 e.io.mem_grant <> io.mem_grant 967 } 968 969 when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) { 970 e.io.miss_req_pipe_reg := miss_req_pipe_reg 971 }.otherwise { 972 e.io.miss_req_pipe_reg := DontCare 973 e.io.miss_req_pipe_reg.merge := false.B 974 e.io.miss_req_pipe_reg.alloc := false.B 975 } 976 977 e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire 978 979 e.io.refill_pipe_resp := io.refill_pipe_resp.valid && io.refill_pipe_resp.bits === i.U 980 e.io.replace_pipe_resp := io.replace_pipe_resp.valid && io.replace_pipe_resp.bits === i.U 981 e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U 982 983 e.io.memSetPattenDetected := memSetPattenDetected 984 e.io.nMaxPrefetchEntry := nMaxPrefetchEntry 985 986 io.debug_early_replace(i) := e.io.debug_early_replace 987 e.io.main_pipe_req.ready := io.main_pipe_req.ready 988 } 989 990 io.req.ready := accept 991 io.mq_enq_cancel := io.req.bits.cancel 992 io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR 993 io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits)) 994 995 acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits) 996 acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only) 997 998 XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire) 999 XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid()) 1000 1001 val acquire_sources = Seq(acquire_from_pipereg) ++ entries.map(_.io.mem_acquire) 1002 TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*) 1003 TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*) 1004 1005 // arbiter_with_pipereg_N_dup(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, 1006 // io.refill_pipe_req_dup, 1007 // Some("refill_pipe_req")) 1008 val out_refill_pipe_req = Wire(Decoupled(new RefillPipeReq)) 1009 val out_refill_pipe_req_ctrl = Wire(Decoupled(new RefillPipeReqCtrl)) 1010 out_refill_pipe_req_ctrl.valid := out_refill_pipe_req.valid 1011 out_refill_pipe_req_ctrl.bits := out_refill_pipe_req.bits.getCtrl 1012 out_refill_pipe_req.ready := out_refill_pipe_req_ctrl.ready 1013 arbiter(entries.map(_.io.refill_pipe_req), out_refill_pipe_req, Some("refill_pipe_req")) 1014 for (dup <- io.refill_pipe_req_dup) { 1015 AddPipelineReg(out_refill_pipe_req_ctrl, dup, false.B) 1016 } 1017 AddPipelineReg(out_refill_pipe_req, io.refill_pipe_req, false.B) 1018 1019 arbiter_with_pipereg(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req")) 1020 1021 // amo's main pipe req out 1022 val main_pipe_req_vec = entries.map(_.io.main_pipe_req) 1023 io.main_pipe_req.valid := VecInit(main_pipe_req_vec.map(_.valid)).asUInt.orR 1024 io.main_pipe_req.bits := Mux1H(main_pipe_req_vec.map(_.valid), main_pipe_req_vec.map(_.bits)) 1025 assert(PopCount(VecInit(main_pipe_req_vec.map(_.valid))) <= 1.U, "multi main pipe req") 1026 1027 io.probe_block := Cat(probe_block_vec).orR 1028 1029 io.full := ~Cat(entries.map(_.io.primary_ready)).andR 1030 1031 // prefetch related 1032 io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR) 1033 1034 io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR) 1035 io.prefetch_info.fdp.prefetch_monitor_cnt := io.refill_pipe_req.fire 1036 io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source) 1037 1038 io.bloom_filter_query.set.valid := alloc && io.req.valid && !io.req.bits.cancel && !isFromL1Prefetch(io.req.bits.replace_pf) && io.req.bits.replace_coh.isValid() && isFromL1Prefetch(io.req.bits.pf_source) 1039 io.bloom_filter_query.set.bits.addr := io.bloom_filter_query.set.bits.get_addr(Cat(io.req.bits.replace_tag, get_untag(io.req.bits.vaddr))) // the evict block address 1040 1041 io.bloom_filter_query.clr.valid := io.refill_pipe_req.fire && isFromL1Prefetch(io.refill_pipe_req.bits.prefetch) 1042 io.bloom_filter_query.clr.bits.addr := io.bloom_filter_query.clr.bits.get_addr(io.refill_pipe_req.bits.addr) 1043 1044 // L1MissTrace Chisel DB 1045 val debug_miss_trace = Wire(new L1MissTrace) 1046 debug_miss_trace.vaddr := io.req.bits.vaddr 1047 debug_miss_trace.paddr := io.req.bits.addr 1048 debug_miss_trace.source := io.req.bits.source 1049 debug_miss_trace.pc := io.req.bits.pc 1050 1051 val isWriteL1MissQMissTable = WireInit(Constantin.createRecord("isWriteL1MissQMissTable" + p(XSCoreParamsKey).HartId.toString)) 1052 val table = ChiselDB.createTable("L1MissQMissTrace_hart"+ p(XSCoreParamsKey).HartId.toString, new L1MissTrace) 1053 table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset) 1054 1055 // Difftest 1056 if (env.EnableDifftest) { 1057 val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) 1058 difftest.coreid := io.hartId 1059 difftest.index := 1.U 1060 difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done 1061 difftest.addr := io.refill_to_ldq.bits.addr 1062 difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data) 1063 difftest.idtfr := DontCare 1064 } 1065 1066 // Perf count 1067 XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel) 1068 XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc) 1069 XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad) 1070 XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore) 1071 XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO) 1072 XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad) 1073 XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad) 1074 XSPerfAccumulate("probe_blocked_by_miss", io.probe_block) 1075 XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch) 1076 XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch) 1077 XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected) 1078 val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W)) 1079 val num_valids = PopCount(~Cat(primary_ready_vec).asUInt) 1080 when (num_valids > max_inflight) { 1081 max_inflight := num_valids 1082 } 1083 // max inflight (average) = max_inflight_total / cycle cnt 1084 XSPerfAccumulate("max_inflight", max_inflight) 1085 QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U) 1086 io.full := num_valids === cfg.nMissEntries.U 1087 XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1) 1088 1089 XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1) 1090 XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1) 1091 XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1) 1092 1093 XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling))) 1094 XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling))) 1095 XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling))) 1096 XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling))) 1097 1098 val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR 1099 1100 entries.foreach { 1101 case e => { 1102 e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid 1103 e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits 1104 } 1105 } 1106 1107 io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache 1108 1109 val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready)))) 1110 val perfEvents = Seq( 1111 ("dcache_missq_req ", io.req.fire), 1112 ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))), 1113 ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))), 1114 ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))), 1115 ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))), 1116 ) 1117 generatePerfEvent() 1118} 1119