1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chisel3._ 20import chisel3.util._ 21import coupledL2.VaddrKey 22import coupledL2.IsKeywordKey 23import difftest._ 24import freechips.rocketchip.tilelink.ClientStates._ 25import freechips.rocketchip.tilelink.MemoryOpCategories._ 26import freechips.rocketchip.tilelink.TLPermissions._ 27import freechips.rocketchip.tilelink._ 28import huancun.{AliasKey, DirtyKey, PrefetchKey} 29import org.chipsalliance.cde.config.Parameters 30import utility._ 31import utils._ 32import xiangshan._ 33import xiangshan.mem.AddPipelineReg 34import xiangshan.mem.prefetch._ 35import xiangshan.mem.trace._ 36import xiangshan.mem.LqPtr 37 38class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle { 39 val source = UInt(sourceTypeWidth.W) 40 val pf_source = UInt(L1PfSourceBits.W) 41 val cmd = UInt(M_SZ.W) 42 val addr = UInt(PAddrBits.W) 43 val vaddr = UInt(VAddrBits.W) 44 val pc = UInt(VAddrBits.W) 45 46 val lqIdx = new LqPtr 47 // store 48 val full_overwrite = Bool() 49 50 // which word does amo work on? 51 val word_idx = UInt(log2Up(blockWords).W) 52 val amo_data = UInt(DataBits.W) 53 val amo_mask = UInt((DataBits / 8).W) 54 55 val req_coh = new ClientMetadata 56 val id = UInt(reqIdWidth.W) 57 58 // For now, miss queue entry req is actually valid when req.valid && !cancel 59 // * req.valid is fast to generate 60 // * cancel is slow to generate, it will not be used until the last moment 61 // 62 // cancel may come from the following sources: 63 // 1. miss req blocked by writeback queue: 64 // a writeback req of the same address is in progress 65 // 2. pmp check failed 66 val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid 67 68 // Req source decode 69 // Note that req source is NOT cmd type 70 // For instance, a req which isFromPrefetch may have R or W cmd 71 def isFromLoad = source === LOAD_SOURCE.U 72 def isFromStore = source === STORE_SOURCE.U 73 def isFromAMO = source === AMO_SOURCE.U 74 def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U 75 def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW 76 def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR 77 def hit = req_coh.isValid() 78} 79 80class MissReqStoreData(implicit p: Parameters) extends DCacheBundle { 81 // store data and store mask will be written to miss queue entry 82 // 1 cycle after req.fire() and meta write 83 val store_data = UInt((cfg.blockBytes * 8).W) 84 val store_mask = UInt(cfg.blockBytes.W) 85} 86 87class MissQueueRefillInfo(implicit p: Parameters) extends MissReqStoreData { 88 // refill_info for mainpipe req awake 89 val miss_param = UInt(TLPermissions.bdWidth.W) 90 val miss_dirty = Bool() 91 val error = Bool() 92} 93 94class MissReq(implicit p: Parameters) extends MissReqWoStoreData { 95 // store data and store mask will be written to miss queue entry 96 // 1 cycle after req.fire() and meta write 97 val store_data = UInt((cfg.blockBytes * 8).W) 98 val store_mask = UInt(cfg.blockBytes.W) 99 100 def toMissReqStoreData(): MissReqStoreData = { 101 val out = Wire(new MissReqStoreData) 102 out.store_data := store_data 103 out.store_mask := store_mask 104 out 105 } 106 107 def toMissReqWoStoreData(): MissReqWoStoreData = { 108 val out = Wire(new MissReqWoStoreData) 109 out.source := source 110 out.pf_source := pf_source 111 out.cmd := cmd 112 out.addr := addr 113 out.vaddr := vaddr 114 out.full_overwrite := full_overwrite 115 out.word_idx := word_idx 116 out.amo_data := amo_data 117 out.amo_mask := amo_mask 118 out.req_coh := req_coh 119 out.id := id 120 out.cancel := cancel 121 out.pc := pc 122 out.lqIdx := lqIdx 123 out 124 } 125} 126 127class MissResp(implicit p: Parameters) extends DCacheBundle { 128 val id = UInt(log2Up(cfg.nMissEntries).W) 129 // cache miss request is handled by miss queue, either merged or newly allocated 130 val handled = Bool() 131 // cache req missed, merged into one of miss queue entries 132 // i.e. !miss_merged means this access is the first miss for this cacheline 133 val merged = Bool() 134} 135 136 137/** 138 * miss queue enq logic: enq is now splited into 2 cycles 139 * +---------------------------------------------------------------------+ pipeline reg +-------------------------+ 140 * + s0: enq source arbiter, judge mshr alloc or merge + +-------+ + s1: real alloc or merge + 141 * + +-----+ primary_fire? -> + | alloc | + + 142 * + mainpipe -> req0 -> | | secondary_fire? -> + | merge | + + 143 * + loadpipe0 -> req1 -> | arb | -> req -> + -> | req | -> + + 144 * + loadpipe1 -> req2 -> | | mshr id -> + | id | + + 145 * + +-----+ + +-------+ + + 146 * +---------------------------------------------------------------------+ +-------------------------+ 147 */ 148 149// a pipeline reg between MissReq and MissEntry 150class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle 151 with HasCircularQueuePtrHelper 152 { 153 val req = new MissReq 154 // this request is about to merge to an existing mshr 155 val merge = Bool() 156 // this request is about to allocate a new mshr 157 val alloc = Bool() 158 val cancel = Bool() 159 val mshr_id = UInt(log2Up(cfg.nMissEntries).W) 160 161 def reg_valid(): Bool = { 162 (merge || alloc) 163 } 164 165 def matched(new_req: MissReq): Bool = { 166 val block_match = get_block(req.addr) === get_block(new_req.addr) 167 block_match && reg_valid() && !(req.isFromPrefetch) 168 } 169 170 def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = { 171 val block_match = get_block(req.addr) === get_block(new_req.addr) 172 new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch) 173 } 174 175 def reject_req(new_req: MissReq): Bool = { 176 val block_match = get_block(req.addr) === get_block(new_req.addr) 177 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 178 val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad 179 // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model 180 val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore 181 182 val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr) 183 184 Mux( 185 alloc, 186 block_match && (!alias_match || !(merge_load || merge_store)), 187 false.B 188 ) 189 } 190 191 def merge_req(new_req: MissReq): Bool = { 192 val block_match = get_block(req.addr) === get_block(new_req.addr) 193 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 194 val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad 195 // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model 196 val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore 197 Mux( 198 alloc, 199 block_match && alias_match && (merge_load || merge_store), 200 false.B 201 ) 202 } 203 204 def merge_isKeyword(new_req: MissReq): Bool = { 205 val load_merge_load = merge_req(new_req) && req.isFromLoad && new_req.isFromLoad 206 val store_merge_load = merge_req(new_req) && req.isFromStore && new_req.isFromLoad 207 val load_merge_load_use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx) 208 val use_new_req_isKeyword = (load_merge_load && load_merge_load_use_new_req_isKeyword) || store_merge_load 209 Mux ( 210 use_new_req_isKeyword, 211 new_req.vaddr(5).asBool, 212 req.vaddr(5).asBool 213 ) 214 } 215 216 def isKeyword(): Bool= { 217 val alloc_isKeyword = Mux( 218 alloc, 219 Mux( 220 req.isFromLoad, 221 req.vaddr(5).asBool, 222 false.B), 223 false.B) 224 Mux( 225 merge_req(req), 226 merge_isKeyword(req), 227 alloc_isKeyword 228 ) 229 } 230 // send out acquire as soon as possible 231 // if a new store miss req is about to merge into this pipe reg, don't send acquire now 232 def can_send_acquire(valid: Bool, new_req: MissReq): Bool = { 233 alloc && !(valid && merge_req(new_req) && new_req.isFromStore) 234 } 235 236 def get_acquire(l2_pf_store_only: Bool): TLBundleA = { 237 val acquire = Wire(new TLBundleA(edge.bundle)) 238 val grow_param = req.req_coh.onAccess(req.cmd)._2 239 val acquireBlock = edge.AcquireBlock( 240 fromSource = mshr_id, 241 toAddress = get_block_addr(req.addr), 242 lgSize = (log2Up(cfg.blockBytes)).U, 243 growPermissions = grow_param 244 )._2 245 val acquirePerm = edge.AcquirePerm( 246 fromSource = mshr_id, 247 toAddress = get_block_addr(req.addr), 248 lgSize = (log2Up(cfg.blockBytes)).U, 249 growPermissions = grow_param 250 )._2 251 acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock) 252 // resolve cache alias by L2 253 acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12)) 254 // pass vaddr to l2 255 acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits)) 256 257 // miss req pipe reg pass keyword to L2, is priority 258 acquire.echo.lift(IsKeywordKey).foreach(_ := isKeyword()) 259 260 // trigger prefetch 261 acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B)) 262 // req source 263 when(req.isFromLoad) { 264 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U) 265 }.elsewhen(req.isFromStore) { 266 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U) 267 }.elsewhen(req.isFromAMO) { 268 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U) 269 }.otherwise { 270 acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 271 } 272 273 acquire 274 } 275 276 def block_match(release_addr: UInt): Bool = { 277 reg_valid() && get_block(req.addr) === get_block(release_addr) 278 } 279} 280 281class MissEntry(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule 282 with HasCircularQueuePtrHelper 283 { 284 val io = IO(new Bundle() { 285 val hartId = Input(UInt(hartIdLen.W)) 286 // MSHR ID 287 val id = Input(UInt(log2Up(cfg.nMissEntries).W)) 288 // client requests 289 // MSHR update request, MSHR state and addr will be updated when req.fire 290 val req = Flipped(ValidIO(new MissReqWoStoreData)) 291 val wbq_block_miss_req = Input(Bool()) 292 // pipeline reg 293 val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge)) 294 // allocate this entry for new req 295 val primary_valid = Input(Bool()) 296 // this entry is free and can be allocated to new reqs 297 val primary_ready = Output(Bool()) 298 // this entry is busy, but it can merge the new req 299 val secondary_ready = Output(Bool()) 300 // this entry is busy and it can not merge the new req 301 val secondary_reject = Output(Bool()) 302 // way selected for replacing, used to support plru update 303 // bus 304 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 305 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 306 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 307 308 val queryME = Vec(reqNum, Flipped(new DCacheMEQueryIOBundle)) 309 310 // send refill info to load queue, useless now 311 val refill_to_ldq = ValidIO(new Refill) 312 313 // replace pipe 314 val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache 315 316 // main pipe: amo miss 317 val main_pipe_req = DecoupledIO(new MainPipeReq) 318 val main_pipe_resp = Input(Bool()) 319 val main_pipe_refill_resp = Input(Bool()) 320 val main_pipe_replay = Input(Bool()) 321 322 // for main pipe s2 323 val refill_info = ValidIO(new MissQueueRefillInfo) 324 325 val block_addr = ValidIO(UInt(PAddrBits.W)) 326 327 val req_addr = ValidIO(UInt(PAddrBits.W)) 328 329 val req_handled_by_this_entry = Output(Bool()) 330 331 val forwardInfo = Output(new MissEntryForwardIO) 332 val l2_pf_store_only = Input(Bool()) 333 334 // whether the pipeline reg has send out an acquire 335 val acquire_fired_by_pipe_reg = Input(Bool()) 336 val memSetPattenDetected = Input(Bool()) 337 338 val perf_pending_prefetch = Output(Bool()) 339 val perf_pending_normal = Output(Bool()) 340 341 val rob_head_query = new DCacheBundle { 342 val vaddr = Input(UInt(VAddrBits.W)) 343 val query_valid = Input(Bool()) 344 345 val resp = Output(Bool()) 346 347 def hit(e_vaddr: UInt): Bool = { 348 require(e_vaddr.getWidth == VAddrBits) 349 query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset) 350 } 351 } 352 353 val latency_monitor = new DCacheBundle { 354 val load_miss_refilling = Output(Bool()) 355 val store_miss_refilling = Output(Bool()) 356 val amo_miss_refilling = Output(Bool()) 357 val pf_miss_refilling = Output(Bool()) 358 } 359 360 val prefetch_info = new DCacheBundle { 361 val late_prefetch = Output(Bool()) 362 } 363 val nMaxPrefetchEntry = Input(UInt(64.W)) 364 val matched = Output(Bool()) 365 }) 366 367 assert(!RegNext(io.primary_valid && !io.primary_ready)) 368 369 val req = Reg(new MissReqWoStoreData) 370 val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use 371 val req_store_mask = Reg(UInt(cfg.blockBytes.W)) 372 val req_valid = RegInit(false.B) 373 val set = addr_to_dcache_set(req.vaddr) 374 // initial keyword 375 val isKeyword = RegInit(false.B) 376 377 val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req 378 379 val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd) 380 381 val s_acquire = RegInit(true.B) 382 val s_grantack = RegInit(true.B) 383 val s_mainpipe_req = RegInit(true.B) 384 385 val w_grantfirst = RegInit(true.B) 386 val w_grantlast = RegInit(true.B) 387 val w_mainpipe_resp = RegInit(true.B) 388 val w_refill_resp = RegInit(true.B) 389 val w_l2hint = RegInit(true.B) 390 391 val mainpipe_req_fired = RegInit(true.B) 392 393 val release_entry = s_grantack && w_mainpipe_resp && w_refill_resp 394 395 val acquire_not_sent = !s_acquire && !io.mem_acquire.ready 396 val data_not_refilled = !w_grantfirst 397 398 val error = RegInit(false.B) 399 val prefetch = RegInit(false.B) 400 val access = RegInit(false.B) 401 402 val should_refill_data_reg = Reg(Bool()) 403 val should_refill_data = WireInit(should_refill_data_reg) 404 405 val should_replace = RegInit(false.B) 406 407 val full_overwrite = Reg(Bool()) 408 409 val (_, _, refill_done, refill_count) = edge.count(io.mem_grant) 410 val grant_param = Reg(UInt(TLPermissions.bdWidth.W)) 411 412 // refill data with store data, this reg will be used to store: 413 // 1. store data (if needed), before l2 refill data 414 // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array) 415 val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W))) 416 // raw data refilled to l1 by l2 417 val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W))) 418 419 // allocate current miss queue entry for a miss req 420 val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel && !io.wbq_block_miss_req) 421 val primary_accept = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel) 422 // merge miss req to current miss queue entry 423 val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel && !io.wbq_block_miss_req) 424 val secondary_accept = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel) 425 426 val req_handled_by_this_entry = primary_accept || secondary_accept 427 428 // for perf use 429 val secondary_fired = RegInit(false.B) 430 431 io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired 432 io.perf_pending_normal := req_valid && (!prefetch || secondary_fired) 433 434 io.rob_head_query.resp := io.rob_head_query.hit(req.vaddr) && req_valid 435 436 io.req_handled_by_this_entry := req_handled_by_this_entry 437 438 when (release_entry && req_valid) { 439 req_valid := false.B 440 } 441 442 when (io.miss_req_pipe_reg.alloc && !io.miss_req_pipe_reg.cancel) { 443 assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated") 444 req_valid := true.B 445 446 req := miss_req_pipe_reg_bits.toMissReqWoStoreData() 447 req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData() 448 req.addr := get_block_addr(miss_req_pipe_reg_bits.addr) 449 //only load miss need keyword 450 isKeyword := Mux(miss_req_pipe_reg_bits.isFromLoad, miss_req_pipe_reg_bits.vaddr(5).asBool,false.B) 451 452 s_acquire := io.acquire_fired_by_pipe_reg 453 s_grantack := false.B 454 s_mainpipe_req := false.B 455 456 w_grantfirst := false.B 457 w_grantlast := false.B 458 w_l2hint := false.B 459 mainpipe_req_fired := false.B 460 461 when(miss_req_pipe_reg_bits.isFromStore) { 462 req_store_mask := miss_req_pipe_reg_bits.store_mask 463 for (i <- 0 until blockRows) { 464 refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i) 465 } 466 } 467 full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite 468 469 when (!miss_req_pipe_reg_bits.isFromAMO) { 470 w_refill_resp := false.B 471 } 472 473 when (miss_req_pipe_reg_bits.isFromAMO) { 474 w_mainpipe_resp := false.B 475 } 476 477 should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad 478 error := false.B 479 prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid) 480 access := false.B 481 secondary_fired := false.B 482 } 483 484 when (io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel) { 485 assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged") 486 assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access)) 487 assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO)) 488 // use the most uptodate meta 489 req.req_coh := miss_req_pipe_reg_bits.req_coh 490 491 isKeyword := Mux( 492 before_req_sent_can_merge(miss_req_pipe_reg_bits), 493 before_req_sent_merge_iskeyword(miss_req_pipe_reg_bits), 494 isKeyword) 495 assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!") 496 497 when (miss_req_pipe_reg_bits.isFromStore) { 498 req := miss_req_pipe_reg_bits 499 req.addr := get_block_addr(miss_req_pipe_reg_bits.addr) 500 req_store_mask := miss_req_pipe_reg_bits.store_mask 501 for (i <- 0 until blockRows) { 502 refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i) 503 } 504 full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite 505 assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store") 506 } 507 508 should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad 509 should_refill_data_reg := should_refill_data 510 when (!input_req_is_prefetch) { 511 access := true.B // when merge non-prefetch req, set access bit 512 } 513 secondary_fired := true.B 514 } 515 516 when (io.mem_acquire.fire) { 517 s_acquire := true.B 518 } 519 520 // merge data refilled by l2 and store data, update miss queue entry, gen refill_req 521 val new_data = Wire(Vec(blockRows, UInt(rowBits.W))) 522 val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W))) 523 // merge refilled data and store data (if needed) 524 def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = { 525 val full_wmask = FillInterleaved(8, wmask) 526 (~full_wmask & old_data | full_wmask & new_data) 527 } 528 for (i <- 0 until blockRows) { 529 // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i) 530 new_data(i) := refill_and_store_data(i) 531 // we only need to merge data for Store 532 new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U) 533 } 534 535 val hasData = RegInit(true.B) 536 val isDirty = RegInit(false.B) 537 when (io.mem_grant.fire) { 538 w_grantfirst := true.B 539 grant_param := io.mem_grant.bits.param 540 when (edge.hasData(io.mem_grant.bits)) { 541 // GrantData 542 when (isKeyword) { 543 for (i <- 0 until beatRows) { 544 val idx = ((refill_count << log2Floor(beatRows)) + i.U) ^ 4.U 545 val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i) 546 refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx)) 547 } 548 } 549 .otherwise{ 550 for (i <- 0 until beatRows) { 551 val idx = (refill_count << log2Floor(beatRows)) + i.U 552 val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i) 553 refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx)) 554 } 555 } 556 w_grantlast := w_grantlast || refill_done 557 hasData := true.B 558 }.otherwise { 559 // Grant 560 assert(full_overwrite) 561 for (i <- 0 until blockRows) { 562 refill_and_store_data(i) := new_data(i) 563 } 564 w_grantlast := true.B 565 hasData := false.B 566 } 567 568 error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error 569 570 refill_data_raw(refill_count ^ isKeyword) := io.mem_grant.bits.data 571 isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B) 572 } 573 574 when (io.mem_finish.fire) { 575 s_grantack := true.B 576 } 577 578 when (io.main_pipe_req.fire) { 579 s_mainpipe_req := true.B 580 mainpipe_req_fired := true.B 581 } 582 583 when (io.main_pipe_replay) { 584 s_mainpipe_req := false.B 585 } 586 587 when (io.main_pipe_resp) { 588 w_mainpipe_resp := true.B 589 } 590 591 when(io.main_pipe_refill_resp) { 592 w_refill_resp := true.B 593 } 594 595 when (io.l2_hint.valid) { 596 w_l2hint := true.B 597 } 598 599 def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = { 600 // acquire_not_sent && (new_req.isFromLoad || new_req.isFromStore) 601 602 // Since most acquire requests have been issued from pipe_reg, 603 // the number of such merge situations is currently small, 604 // So dont Merge anything for better timing. 605 false.B 606 } 607 608 def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = { 609 data_not_refilled && new_req.isFromLoad 610 } 611 612 // Note that late prefetch will be ignored 613 614 def should_merge(new_req: MissReqWoStoreData): Bool = { 615 val block_match = get_block(req.addr) === get_block(new_req.addr) 616 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 617 block_match && alias_match && 618 ( 619 before_req_sent_can_merge(new_req) || 620 before_data_refill_can_merge(new_req) 621 ) 622 } 623 624 def before_req_sent_merge_iskeyword(new_req: MissReqWoStoreData): Bool = { 625 val need_check_isKeyword = acquire_not_sent && req.isFromLoad && new_req.isFromLoad && should_merge(new_req) 626 val use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx) 627 Mux( 628 need_check_isKeyword, 629 Mux( 630 use_new_req_isKeyword, 631 new_req.vaddr(5).asBool, 632 req.vaddr(5).asBool 633 ), 634 isKeyword 635 ) 636 } 637 638 // store can be merged before io.mem_acquire.fire 639 // store can not be merged the cycle that io.mem_acquire.fire 640 // load can be merged before io.mem_grant.fire 641 // 642 // TODO: merge store if possible? mem_acquire may need to be re-issued, 643 // but sbuffer entry can be freed 644 def should_reject(new_req: MissReqWoStoreData): Bool = { 645 val block_match = get_block(req.addr) === get_block(new_req.addr) 646 val set_match = set === addr_to_dcache_set(new_req.vaddr) 647 val alias_match = is_alias_match(req.vaddr, new_req.vaddr) 648 649 req_valid && Mux( 650 block_match, 651 (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match, 652 false.B 653 ) 654 } 655 656 // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req 657 when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) { 658 // can accept prefetch req 659 io.primary_ready := !req_valid && !GatedValidRegNext(primary_fire) 660 }.otherwise { 661 // cannot accept prefetch req except when a memset patten is detected 662 io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !GatedValidRegNext(primary_fire) 663 } 664 io.secondary_ready := should_merge(io.req.bits) 665 io.secondary_reject := should_reject(io.req.bits) 666 667 // generate primary_ready & secondary_(ready | reject) for each miss request 668 for (i <- 0 until reqNum) { 669 when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) { 670 io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire) 671 }.otherwise { 672 io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire) && 673 (!io.queryME(i).req.bits.isFromPrefetch || io.memSetPattenDetected) 674 } 675 io.queryME(i).secondary_ready := should_merge(io.queryME(i).req.bits) 676 io.queryME(i).secondary_reject := should_reject(io.queryME(i).req.bits) 677 } 678 679 // should not allocate, merge or reject at the same time 680 assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U || !io.req.valid)) 681 682 val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => { 683 val data = refill_and_store_data.asUInt 684 data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth) 685 }))) 686 // when granted data is all ready, wakeup lq's miss load 687 val refill_to_ldq_en = !w_grantlast && io.mem_grant.fire 688 io.refill_to_ldq.valid := GatedValidRegNext(refill_to_ldq_en) 689 io.refill_to_ldq.bits.addr := RegEnable(req.addr + ((refill_count ^ isKeyword) << refillOffBits), refill_to_ldq_en) 690 io.refill_to_ldq.bits.data := refill_data_splited(RegEnable(refill_count ^ isKeyword, refill_to_ldq_en)) 691 io.refill_to_ldq.bits.error := RegEnable(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied, refill_to_ldq_en) 692 io.refill_to_ldq.bits.refill_done := RegEnable(refill_done && io.mem_grant.fire, refill_to_ldq_en) 693 io.refill_to_ldq.bits.hasdata := hasData 694 io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt 695 io.refill_to_ldq.bits.id := io.id 696 697 // if the entry has a pending merge req, wait for it 698 // Note: now, only wait for store, because store may acquire T 699 io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore) 700 val grow_param = req.req_coh.onAccess(req.cmd)._2 701 val acquireBlock = edge.AcquireBlock( 702 fromSource = io.id, 703 toAddress = req.addr, 704 lgSize = (log2Up(cfg.blockBytes)).U, 705 growPermissions = grow_param 706 )._2 707 val acquirePerm = edge.AcquirePerm( 708 fromSource = io.id, 709 toAddress = req.addr, 710 lgSize = (log2Up(cfg.blockBytes)).U, 711 growPermissions = grow_param 712 )._2 713 io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock) 714 // resolve cache alias by L2 715 io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12)) 716 // pass vaddr to l2 717 io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits)) 718 // pass keyword to L2 719 io.mem_acquire.bits.echo.lift(IsKeywordKey).foreach(_ := isKeyword) 720 // trigger prefetch 721 io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B)) 722 // req source 723 when(prefetch && !secondary_fired) { 724 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 725 }.otherwise { 726 when(req.isFromStore) { 727 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U) 728 }.elsewhen(req.isFromLoad) { 729 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U) 730 }.elsewhen(req.isFromAMO) { 731 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U) 732 }.otherwise { 733 io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U) 734 } 735 } 736 require(nSets <= 256) 737 738 // io.mem_grant.ready := !w_grantlast && s_acquire 739 io.mem_grant.ready := true.B 740 assert(!(io.mem_grant.valid && !(!w_grantlast && s_acquire)), "dcache should always be ready for mem_grant now") 741 742 val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire) 743 assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits))) 744 io.mem_finish.valid := !s_grantack && w_grantfirst 745 io.mem_finish.bits := grantack 746 747 // Send mainpipe_req when receive hint from L2 or receive data without hint 748 io.main_pipe_req.valid := !s_mainpipe_req && (w_l2hint || w_grantlast) 749 io.main_pipe_req.bits := DontCare 750 io.main_pipe_req.bits.miss := true.B 751 io.main_pipe_req.bits.miss_id := io.id 752 io.main_pipe_req.bits.probe := false.B 753 io.main_pipe_req.bits.source := req.source 754 io.main_pipe_req.bits.cmd := req.cmd 755 io.main_pipe_req.bits.vaddr := req.vaddr 756 io.main_pipe_req.bits.addr := req.addr 757 io.main_pipe_req.bits.word_idx := req.word_idx 758 io.main_pipe_req.bits.amo_data := req.amo_data 759 io.main_pipe_req.bits.amo_mask := req.amo_mask 760 io.main_pipe_req.bits.id := req.id 761 io.main_pipe_req.bits.pf_source := req.pf_source 762 io.main_pipe_req.bits.access := access 763 764 io.block_addr.valid := req_valid && w_grantlast 765 io.block_addr.bits := req.addr 766 767 io.req_addr.valid := req_valid 768 io.req_addr.bits := req.addr 769 770 io.refill_info.valid := req_valid && w_grantlast 771 io.refill_info.bits.store_data := refill_and_store_data.asUInt 772 io.refill_info.bits.store_mask := ~0.U(blockBytes.W) 773 io.refill_info.bits.miss_param := grant_param 774 io.refill_info.bits.miss_dirty := isDirty 775 io.refill_info.bits.error := error 776 777 XSPerfAccumulate("miss_refill_mainpipe_req", io.main_pipe_req.fire) 778 XSPerfAccumulate("miss_refill_without_hint", io.main_pipe_req.fire && !mainpipe_req_fired && !w_l2hint) 779 XSPerfAccumulate("miss_refill_replay", io.main_pipe_replay) 780 781 val w_grantfirst_forward_info = Mux(isKeyword, w_grantlast, w_grantfirst) 782 val w_grantlast_forward_info = Mux(isKeyword, w_grantfirst, w_grantlast) 783 io.forwardInfo.apply(req_valid, req.addr, refill_and_store_data, w_grantfirst_forward_info, w_grantlast_forward_info) 784 785 io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch 786 io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch 787 788 when(io.prefetch_info.late_prefetch) { 789 prefetch := false.B 790 } 791 792 // refill latency monitor 793 val start_counting = GatedValidRegNext(io.mem_acquire.fire) || (GatedValidRegNextN(primary_fire, 2) && s_acquire) 794 io.latency_monitor.load_miss_refilling := req_valid && req_primary_fire.isFromLoad && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 795 io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 796 io.latency_monitor.amo_miss_refilling := req_valid && req_primary_fire.isFromAMO && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 797 io.latency_monitor.pf_miss_refilling := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true) 798 799 XSPerfAccumulate("miss_req_primary", primary_fire) 800 XSPerfAccumulate("miss_req_merged", secondary_fire) 801 XSPerfAccumulate("load_miss_penalty_to_use", 802 should_refill_data && 803 BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true) 804 ) 805 XSPerfAccumulate("penalty_between_grantlast_and_release", 806 BoolStopWatch(!RegNext(w_grantlast) && w_grantlast, release_entry, true) 807 ) 808 XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp)) 809 XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready) 810 XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid) 811 XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready) 812 XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U) 813 XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U) 814 XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore) 815 816 val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(GatedValidRegNextN(primary_fire, 2), release_entry) 817 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true) 818 XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false) 819 820 val load_miss_begin = primary_fire && io.req.bits.isFromLoad 821 val refill_finished = GatedValidRegNext(!w_grantlast && refill_done) && should_refill_data 822 val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time 823 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true) 824 XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false) 825 826 val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, GatedValidRegNext(io.mem_grant.fire && refill_done)) 827 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true) 828 XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false) 829} 830 831class MissQueue(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule 832 with HasPerfEvents 833 { 834 val io = IO(new Bundle { 835 val hartId = Input(UInt(hartIdLen.W)) 836 val req = Flipped(DecoupledIO(new MissReq)) 837 val resp = Output(new MissResp) 838 val refill_to_ldq = ValidIO(new Refill) 839 840 val queryMQ = Vec(reqNum, Flipped(new DCacheMQQueryIOBundle)) 841 842 val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle)) 843 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 844 val mem_finish = DecoupledIO(new TLBundleE(edge.bundle)) 845 846 val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache 847 848 val main_pipe_req = DecoupledIO(new MainPipeReq) 849 val main_pipe_resp = Flipped(ValidIO(new MainPipeResp)) 850 851 val mainpipe_info = Input(new MainPipeInfoToMQ) 852 val refill_info = ValidIO(new MissQueueRefillInfo) 853 854 // block probe 855 val probe_addr = Input(UInt(PAddrBits.W)) 856 val probe_block = Output(Bool()) 857 858 // block replace when release an addr valid in mshr 859 val replace_addr = Flipped(ValidIO(UInt(PAddrBits.W))) 860 val replace_block = Output(Bool()) 861 862 // req blocked by wbq 863 val wbq_block_miss_req = Input(Bool()) 864 865 val full = Output(Bool()) 866 867 // forward missqueue 868 val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO) 869 val l2_pf_store_only = Input(Bool()) 870 871 val memSetPattenDetected = Output(Bool()) 872 val lqEmpty = Input(Bool()) 873 874 val prefetch_info = new Bundle { 875 val naive = new Bundle { 876 val late_miss_prefetch = Output(Bool()) 877 } 878 879 val fdp = new Bundle { 880 val late_miss_prefetch = Output(Bool()) 881 val prefetch_monitor_cnt = Output(Bool()) 882 val total_prefetch = Output(Bool()) 883 } 884 } 885 886 val mq_enq_cancel = Output(Bool()) 887 888 val debugTopDown = new DCacheTopDownIO 889 }) 890 891 // 128KBL1: FIXME: provide vaddr for l2 892 893 val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge, reqNum))) 894 895 val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge))) 896 val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire)) 897 898 val primary_ready_vec = entries.map(_.io.primary_ready) 899 val secondary_ready_vec = entries.map(_.io.secondary_ready) 900 val secondary_reject_vec = entries.map(_.io.secondary_reject) 901 val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr } 902 903 val merge = ParallelORR(Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits)))) 904 val reject = ParallelORR(Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits)))) 905 val alloc = !reject && !merge && ParallelORR(Cat(primary_ready_vec)) 906 val accept = alloc || merge 907 908 // generate req_ready for each miss request for better timing 909 for (i <- 0 until reqNum) { 910 val _primary_ready_vec = entries.map(_.io.queryME(i).primary_ready) 911 val _secondary_ready_vec = entries.map(_.io.queryME(i).secondary_ready) 912 val _secondary_reject_vec = entries.map(_.io.queryME(i).secondary_reject) 913 val _merge = ParallelORR(Cat(_secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.queryMQ(i).req.bits)))) 914 val _reject = ParallelORR(Cat(_secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.queryMQ(i).req.bits)))) 915 val _alloc = !_reject && !_merge && ParallelORR(Cat(_primary_ready_vec)) 916 val _accept = _alloc || _merge 917 918 io.queryMQ(i).ready := _accept 919 } 920 921 val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry) 922 // merged to pipeline reg 923 val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) && io.req.valid 924 assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg") 925 assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req") 926 io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id) 927 io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled 928 io.resp.merged := merge 929 930 /* MissQueue enq logic is now splitted into 2 cycles 931 * 932 */ 933 when(io.req.valid){ 934 miss_req_pipe_reg.req := io.req.bits 935 } 936 // miss_req_pipe_reg.req := io.req.bits 937 miss_req_pipe_reg.alloc := alloc && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req 938 miss_req_pipe_reg.merge := merge && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req 939 miss_req_pipe_reg.cancel := io.wbq_block_miss_req 940 miss_req_pipe_reg.mshr_id := io.resp.id 941 942 assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!") 943 944 val source_except_load_cnt = RegInit(0.U(10.W)) 945 when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) { 946 when(io.req.bits.isFromLoad) { 947 source_except_load_cnt := 0.U 948 }.otherwise { 949 when(io.req.bits.isFromStore) { 950 source_except_load_cnt := source_except_load_cnt + 1.U 951 } 952 } 953 } 954 val Threshold = 8 955 val memSetPattenDetected = GatedValidRegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty) 956 957 io.memSetPattenDetected := memSetPattenDetected 958 959 val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo)) 960 (0 until LoadPipelineWidth).map(i => { 961 val id = io.forward(i).mshrid 962 val req_valid = io.forward(i).valid 963 val paddr = io.forward(i).paddr 964 965 val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr) 966 io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr) 967 io.forward(i).forward_mshr := forward_mshr 968 io.forward(i).forwardData := forwardData 969 }) 970 971 assert(RegNext(PopCount(secondary_ready_vec) <= 1.U || !io.req.valid)) 972// assert(RegNext(PopCount(secondary_reject_vec) <= 1.U)) 973 // It is possible that one mshr wants to merge a req, while another mshr wants to reject it. 974 // That is, a coming req has the same paddr as that of mshr_0 (merge), 975 // while it has the same set and the same way as mshr_1 (reject). 976 // In this situation, the coming req should be merged by mshr_0 977// assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U)) 978 979 def select_valid_one[T <: Bundle]( 980 in: Seq[DecoupledIO[T]], 981 out: DecoupledIO[T], 982 name: Option[String] = None): Unit = { 983 984 if (name.nonEmpty) { out.suggestName(s"${name.get}_select") } 985 out.valid := Cat(in.map(_.valid)).orR 986 out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits)) 987 in.map(_.ready := out.ready) 988 assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U)) 989 } 990 991 io.mem_grant.ready := false.B 992 993 val nMaxPrefetchEntry = Constantin.createRecord(s"nMaxPrefetchEntry${p(XSCoreParamsKey).HartId}", initValue = 14) 994 entries.zipWithIndex.foreach { 995 case (e, i) => 996 val former_primary_ready = if(i == 0) 997 false.B 998 else 999 Cat((0 until i).map(j => entries(j).io.primary_ready)).orR 1000 1001 e.io.hartId := io.hartId 1002 e.io.id := i.U 1003 e.io.l2_pf_store_only := io.l2_pf_store_only 1004 e.io.req.valid := io.req.valid 1005 e.io.wbq_block_miss_req := io.wbq_block_miss_req 1006 e.io.primary_valid := io.req.valid && 1007 !merge && 1008 !reject && 1009 !former_primary_ready && 1010 e.io.primary_ready 1011 e.io.req.bits := io.req.bits.toMissReqWoStoreData() 1012 1013 e.io.mem_grant.valid := false.B 1014 e.io.mem_grant.bits := DontCare 1015 when (io.mem_grant.bits.source === i.U) { 1016 e.io.mem_grant <> io.mem_grant 1017 } 1018 1019 when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) { 1020 e.io.miss_req_pipe_reg := miss_req_pipe_reg 1021 }.otherwise { 1022 e.io.miss_req_pipe_reg := DontCare 1023 e.io.miss_req_pipe_reg.merge := false.B 1024 e.io.miss_req_pipe_reg.alloc := false.B 1025 } 1026 1027 e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire 1028 1029 e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U 1030 e.io.main_pipe_replay := io.mainpipe_info.s2_valid && io.mainpipe_info.s2_replay_to_mq && io.mainpipe_info.s2_miss_id === i.U 1031 e.io.main_pipe_refill_resp := io.mainpipe_info.s3_valid && io.mainpipe_info.s3_refill_resp && io.mainpipe_info.s3_miss_id === i.U 1032 1033 e.io.memSetPattenDetected := memSetPattenDetected 1034 e.io.nMaxPrefetchEntry := nMaxPrefetchEntry 1035 1036 e.io.main_pipe_req.ready := io.main_pipe_req.ready 1037 1038 for (j <- 0 until reqNum) { 1039 e.io.queryME(j).req.valid := io.queryMQ(j).req.valid 1040 e.io.queryME(j).req.bits := io.queryMQ(j).req.bits.toMissReqWoStoreData() 1041 } 1042 1043 when(io.l2_hint.bits.sourceId === i.U) { 1044 e.io.l2_hint <> io.l2_hint 1045 } .otherwise { 1046 e.io.l2_hint.valid := false.B 1047 e.io.l2_hint.bits := DontCare 1048 } 1049 } 1050 1051 io.req.ready := accept 1052 io.mq_enq_cancel := io.req.bits.cancel 1053 io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR 1054 io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits)) 1055 1056 io.refill_info.valid := VecInit(entries.zipWithIndex.map{ case(e,i) => e.io.refill_info.valid && io.mainpipe_info.s2_valid && io.mainpipe_info.s2_miss_id === i.U}).asUInt.orR 1057 io.refill_info.bits := Mux1H(entries.zipWithIndex.map{ case(e,i) => (io.mainpipe_info.s2_miss_id === i.U) -> e.io.refill_info.bits }) 1058 1059 acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits) 1060 acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only) 1061 1062 XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire) 1063 XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid()) 1064 1065 val acquire_sources = Seq(acquire_from_pipereg) ++ entries.map(_.io.mem_acquire) 1066 TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*) 1067 TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*) 1068 1069 // amo's main pipe req out 1070 fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req")) 1071 1072 io.probe_block := Cat(probe_block_vec).orR 1073 1074 io.replace_block := io.replace_addr.valid && Cat(entries.map(e => e.io.req_addr.valid && e.io.req_addr.bits === io.replace_addr.bits) ++ Seq(miss_req_pipe_reg.block_match(io.replace_addr.bits))).orR 1075 1076 io.full := ~Cat(entries.map(_.io.primary_ready)).andR 1077 1078 // prefetch related 1079 io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR) 1080 1081 io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR) 1082 io.prefetch_info.fdp.prefetch_monitor_cnt := io.main_pipe_req.fire 1083 io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source) 1084 1085 // L1MissTrace Chisel DB 1086 val debug_miss_trace = Wire(new L1MissTrace) 1087 debug_miss_trace.vaddr := io.req.bits.vaddr 1088 debug_miss_trace.paddr := io.req.bits.addr 1089 debug_miss_trace.source := io.req.bits.source 1090 debug_miss_trace.pc := io.req.bits.pc 1091 1092 val isWriteL1MissQMissTable = Constantin.createRecord(s"isWriteL1MissQMissTable${p(XSCoreParamsKey).HartId}") 1093 val table = ChiselDB.createTable(s"L1MissQMissTrace_hart${p(XSCoreParamsKey).HartId}", new L1MissTrace) 1094 table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset) 1095 1096 // Difftest 1097 if (env.EnableDifftest) { 1098 val difftest = DifftestModule(new DiffRefillEvent, dontCare = true) 1099 difftest.coreid := io.hartId 1100 difftest.index := 1.U 1101 difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done 1102 difftest.addr := io.refill_to_ldq.bits.addr 1103 difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data) 1104 difftest.idtfr := DontCare 1105 } 1106 1107 // Perf count 1108 XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel) 1109 XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc) 1110 XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad) 1111 XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore) 1112 XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO) 1113 XSPerfAccumulate("miss_req_prefetch_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch) 1114 XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad) 1115 XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad) 1116 XSPerfAccumulate("probe_blocked_by_miss", io.probe_block) 1117 XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch) 1118 XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch) 1119 XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected) 1120 val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W)) 1121 val num_valids = PopCount(~Cat(primary_ready_vec).asUInt) 1122 when (num_valids > max_inflight) { 1123 max_inflight := num_valids 1124 } 1125 // max inflight (average) = max_inflight_total / cycle cnt 1126 XSPerfAccumulate("max_inflight", max_inflight) 1127 QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U) 1128 io.full := num_valids === cfg.nMissEntries.U 1129 XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1) 1130 1131 XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1) 1132 XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1) 1133 XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1) 1134 1135 XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling))) 1136 XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling))) 1137 XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling))) 1138 XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling))) 1139 1140 val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR 1141 1142 entries.foreach { 1143 case e => { 1144 e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid 1145 e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits 1146 } 1147 } 1148 1149 io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache 1150 1151 val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready)))) 1152 val perfEvents = Seq( 1153 ("dcache_missq_req ", io.req.fire), 1154 ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))), 1155 ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))), 1156 ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))), 1157 ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))), 1158 ) 1159 generatePerfEvent() 1160}