xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision 399766d91deff47155ba413cb78039ffb1cd66ee)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] David Kroft. "[Lockup-free instruction fetch/prefetch cache organization.]
21* (https://dl.acm.org/doi/10.5555/800052.801868)" 8th Annual Symposium on Computer Architecture (ISCA). 1981.
22***************************************************************************************/
23
24package xiangshan.cache
25
26import chisel3._
27import chisel3.util._
28import coupledL2.VaddrKey
29import coupledL2.IsKeywordKey
30import difftest._
31import freechips.rocketchip.tilelink.ClientStates._
32import freechips.rocketchip.tilelink.MemoryOpCategories._
33import freechips.rocketchip.tilelink.TLPermissions._
34import freechips.rocketchip.tilelink._
35import huancun.{AliasKey, DirtyKey, PrefetchKey}
36import org.chipsalliance.cde.config.Parameters
37import utility._
38import utils._
39import xiangshan._
40import xiangshan.mem.AddPipelineReg
41import xiangshan.mem.prefetch._
42import xiangshan.mem.trace._
43import xiangshan.mem.LqPtr
44
45class MissReqWoStoreData(implicit p: Parameters) extends DCacheBundle {
46  val source = UInt(sourceTypeWidth.W)
47  val pf_source = UInt(L1PfSourceBits.W)
48  val cmd = UInt(M_SZ.W)
49  val addr = UInt(PAddrBits.W)
50  val vaddr = UInt(VAddrBits.W)
51  val pc = UInt(VAddrBits.W)
52
53  val lqIdx = new LqPtr
54  // store
55  val full_overwrite = Bool()
56
57  // which word does amo work on?
58  val word_idx = UInt(log2Up(blockWords).W)
59  val amo_data = UInt(DataBits.W)
60  val amo_mask = UInt((DataBits / 8).W)
61
62  val req_coh = new ClientMetadata
63  val id = UInt(reqIdWidth.W)
64
65  // For now, miss queue entry req is actually valid when req.valid && !cancel
66  // * req.valid is fast to generate
67  // * cancel is slow to generate, it will not be used until the last moment
68  //
69  // cancel may come from the following sources:
70  // 1. miss req blocked by writeback queue:
71  //      a writeback req of the same address is in progress
72  // 2. pmp check failed
73  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
74
75  // Req source decode
76  // Note that req source is NOT cmd type
77  // For instance, a req which isFromPrefetch may have R or W cmd
78  def isFromLoad = source === LOAD_SOURCE.U
79  def isFromStore = source === STORE_SOURCE.U
80  def isFromAMO = source === AMO_SOURCE.U
81  def isFromPrefetch = source >= DCACHE_PREFETCH_SOURCE.U
82  def isPrefetchWrite = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFW
83  def isPrefetchRead = source === DCACHE_PREFETCH_SOURCE.U && cmd === MemoryOpConstants.M_PFR
84  def hit = req_coh.isValid()
85}
86
87class MissReqStoreData(implicit p: Parameters) extends DCacheBundle {
88  // store data and store mask will be written to miss queue entry
89  // 1 cycle after req.fire() and meta write
90  val store_data = UInt((cfg.blockBytes * 8).W)
91  val store_mask = UInt(cfg.blockBytes.W)
92}
93
94class MissQueueRefillInfo(implicit p: Parameters) extends MissReqStoreData {
95  // refill_info for mainpipe req awake
96  val miss_param = UInt(TLPermissions.bdWidth.W)
97  val miss_dirty = Bool()
98  val error      = Bool()
99}
100
101class MissReq(implicit p: Parameters) extends MissReqWoStoreData {
102  // store data and store mask will be written to miss queue entry
103  // 1 cycle after req.fire() and meta write
104  val store_data = UInt((cfg.blockBytes * 8).W)
105  val store_mask = UInt(cfg.blockBytes.W)
106
107  def toMissReqStoreData(): MissReqStoreData = {
108    val out = Wire(new MissReqStoreData)
109    out.store_data := store_data
110    out.store_mask := store_mask
111    out
112  }
113
114  def toMissReqWoStoreData(): MissReqWoStoreData = {
115    val out = Wire(new MissReqWoStoreData)
116    out.source := source
117    out.pf_source := pf_source
118    out.cmd := cmd
119    out.addr := addr
120    out.vaddr := vaddr
121    out.full_overwrite := full_overwrite
122    out.word_idx := word_idx
123    out.amo_data := amo_data
124    out.amo_mask := amo_mask
125    out.req_coh := req_coh
126    out.id := id
127    out.cancel := cancel
128    out.pc := pc
129    out.lqIdx := lqIdx
130    out
131  }
132}
133
134class MissResp(implicit p: Parameters) extends DCacheBundle {
135  val id = UInt(log2Up(cfg.nMissEntries).W)
136  // cache miss request is handled by miss queue, either merged or newly allocated
137  val handled = Bool()
138  // cache req missed, merged into one of miss queue entries
139  // i.e. !miss_merged means this access is the first miss for this cacheline
140  val merged = Bool()
141}
142
143
144/**
145  * miss queue enq logic: enq is now splited into 2 cycles
146  *  +---------------------------------------------------------------------+    pipeline reg  +-------------------------+
147  *  +         s0: enq source arbiter, judge mshr alloc or merge           +     +-------+    + s1: real alloc or merge +
148  *  +                      +-----+          primary_fire?       ->        +     | alloc |    +                         +
149  *  + mainpipe  -> req0 -> |     |          secondary_fire?     ->        +     | merge |    +                         +
150  *  + loadpipe0 -> req1 -> | arb | -> req                       ->        +  -> | req   | -> +                         +
151  *  + loadpipe1 -> req2 -> |     |          mshr id             ->        +     | id    |    +                         +
152  *  +                      +-----+                                        +     +-------+    +                         +
153  *  +---------------------------------------------------------------------+                  +-------------------------+
154  */
155
156// a pipeline reg between MissReq and MissEntry
157class MissReqPipeRegBundle(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheBundle
158 with HasCircularQueuePtrHelper
159 {
160  val req           = new MissReq
161  // this request is about to merge to an existing mshr
162  val merge         = Bool()
163  // this request is about to allocate a new mshr
164  val alloc         = Bool()
165  val cancel        = Bool()
166  val mshr_id       = UInt(log2Up(cfg.nMissEntries).W)
167
168  def reg_valid(): Bool = {
169    (merge || alloc)
170  }
171
172  def matched(new_req: MissReq): Bool = {
173    val block_match = get_block(req.addr) === get_block(new_req.addr)
174    block_match && reg_valid() && !(req.isFromPrefetch)
175  }
176
177  def prefetch_late_en(new_req: MissReqWoStoreData, new_req_valid: Bool): Bool = {
178    val block_match = get_block(req.addr) === get_block(new_req.addr)
179    new_req_valid && alloc && block_match && (req.isFromPrefetch) && !(new_req.isFromPrefetch)
180  }
181
182  def reject_req(new_req: MissReq): Bool = {
183    val block_match = get_block(req.addr) === get_block(new_req.addr)
184    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
185    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
186    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
187    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
188
189    val set_match = addr_to_dcache_set(req.vaddr) === addr_to_dcache_set(new_req.vaddr)
190
191    Mux(
192        alloc,
193        block_match && (!alias_match || !(merge_load || merge_store)),
194        false.B
195      )
196  }
197
198  def merge_req(new_req: MissReq): Bool = {
199    val block_match = get_block(req.addr) === get_block(new_req.addr)
200    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
201    val merge_load = (req.isFromLoad || req.isFromStore || req.isFromPrefetch) && new_req.isFromLoad
202    // store merge to a store is disabled, sbuffer should avoid this situation, as store to same address should preserver their program order to match memory model
203    val merge_store = (req.isFromLoad || req.isFromPrefetch) && new_req.isFromStore
204    Mux(
205        alloc,
206        block_match && alias_match && (merge_load || merge_store),
207        false.B
208      )
209  }
210
211  def merge_isKeyword(new_req: MissReq): Bool = {
212    val load_merge_load  = merge_req(new_req) && req.isFromLoad  && new_req.isFromLoad
213    val store_merge_load = merge_req(new_req) && req.isFromStore && new_req.isFromLoad
214    val load_merge_load_use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
215    val use_new_req_isKeyword = (load_merge_load && load_merge_load_use_new_req_isKeyword) || store_merge_load
216    Mux (
217      use_new_req_isKeyword,
218        new_req.vaddr(5).asBool,
219        req.vaddr(5).asBool
220      )
221  }
222
223  def isKeyword(): Bool= {
224    val alloc_isKeyword = Mux(
225                           alloc,
226                           Mux(
227                            req.isFromLoad,
228                            req.vaddr(5).asBool,
229                            false.B),
230                            false.B)
231    Mux(
232      merge_req(req),
233      merge_isKeyword(req),
234      alloc_isKeyword
235    )
236  }
237  // send out acquire as soon as possible
238  // if a new store miss req is about to merge into this pipe reg, don't send acquire now
239  def can_send_acquire(valid: Bool, new_req: MissReq): Bool = {
240    alloc && !(valid && merge_req(new_req) && new_req.isFromStore)
241  }
242
243  def get_acquire(l2_pf_store_only: Bool): TLBundleA = {
244    val acquire = Wire(new TLBundleA(edge.bundle))
245    val grow_param = req.req_coh.onAccess(req.cmd)._2
246    val acquireBlock = edge.AcquireBlock(
247      fromSource = mshr_id,
248      toAddress = get_block_addr(req.addr),
249      lgSize = (log2Up(cfg.blockBytes)).U,
250      growPermissions = grow_param
251    )._2
252    val acquirePerm = edge.AcquirePerm(
253      fromSource = mshr_id,
254      toAddress = get_block_addr(req.addr),
255      lgSize = (log2Up(cfg.blockBytes)).U,
256      growPermissions = grow_param
257    )._2
258    acquire := Mux(req.full_overwrite, acquirePerm, acquireBlock)
259    // resolve cache alias by L2
260    acquire.user.lift(AliasKey).foreach(_ := req.vaddr(13, 12))
261    // pass vaddr to l2
262    acquire.user.lift(VaddrKey).foreach(_ := req.vaddr(VAddrBits - 1, blockOffBits))
263
264    // miss req pipe reg pass keyword to L2, is priority
265    acquire.echo.lift(IsKeywordKey).foreach(_ := isKeyword())
266
267    // trigger prefetch
268    acquire.user.lift(PrefetchKey).foreach(_ := Mux(l2_pf_store_only, req.isFromStore, true.B))
269    // req source
270    when(req.isFromLoad) {
271      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
272    }.elsewhen(req.isFromStore) {
273      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
274    }.elsewhen(req.isFromAMO) {
275      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
276    }.otherwise {
277      acquire.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
278    }
279
280    acquire
281  }
282
283  def block_match(release_addr: UInt): Bool = {
284    reg_valid() && get_block(req.addr) === get_block(release_addr)
285  }
286}
287
288class MissEntry(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
289  with HasCircularQueuePtrHelper
290 {
291  val io = IO(new Bundle() {
292    val hartId = Input(UInt(hartIdLen.W))
293    // MSHR ID
294    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
295    // client requests
296    // MSHR update request, MSHR state and addr will be updated when req.fire
297    val req = Flipped(ValidIO(new MissReqWoStoreData))
298    val wbq_block_miss_req = Input(Bool())
299    // pipeline reg
300    val miss_req_pipe_reg = Input(new MissReqPipeRegBundle(edge))
301    // allocate this entry for new req
302    val primary_valid = Input(Bool())
303    // this entry is free and can be allocated to new reqs
304    val primary_ready = Output(Bool())
305    // this entry is busy, but it can merge the new req
306    val secondary_ready = Output(Bool())
307    // this entry is busy and it can not merge the new req
308    val secondary_reject = Output(Bool())
309    // way selected for replacing, used to support plru update
310    // bus
311    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
312    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
313    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
314
315    val queryME = Vec(reqNum, Flipped(new DCacheMEQueryIOBundle))
316
317    // send refill info to load queue, useless now
318    val refill_to_ldq = ValidIO(new Refill)
319
320    // replace pipe
321    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
322
323    // main pipe: amo miss
324    val main_pipe_req = DecoupledIO(new MainPipeReq)
325    val main_pipe_resp = Input(Bool())
326    val main_pipe_refill_resp = Input(Bool())
327    val main_pipe_replay = Input(Bool())
328
329    // for main pipe s2
330    val refill_info = ValidIO(new MissQueueRefillInfo)
331
332    val block_addr = ValidIO(UInt(PAddrBits.W))
333
334    val req_addr = ValidIO(UInt(PAddrBits.W))
335
336    val req_handled_by_this_entry = Output(Bool())
337
338    val forwardInfo = Output(new MissEntryForwardIO)
339    val l2_pf_store_only = Input(Bool())
340
341    // whether the pipeline reg has send out an acquire
342    val acquire_fired_by_pipe_reg = Input(Bool())
343    val memSetPattenDetected = Input(Bool())
344
345    val perf_pending_prefetch = Output(Bool())
346    val perf_pending_normal   = Output(Bool())
347
348    val rob_head_query = new DCacheBundle {
349      val vaddr = Input(UInt(VAddrBits.W))
350      val query_valid = Input(Bool())
351
352      val resp = Output(Bool())
353
354      def hit(e_vaddr: UInt): Bool = {
355        require(e_vaddr.getWidth == VAddrBits)
356        query_valid && vaddr(VAddrBits - 1, DCacheLineOffset) === e_vaddr(VAddrBits - 1, DCacheLineOffset)
357      }
358    }
359
360    val latency_monitor = new DCacheBundle {
361      val load_miss_refilling  = Output(Bool())
362      val store_miss_refilling = Output(Bool())
363      val amo_miss_refilling   = Output(Bool())
364      val pf_miss_refilling    = Output(Bool())
365    }
366
367    val prefetch_info = new DCacheBundle {
368      val late_prefetch = Output(Bool())
369    }
370    val nMaxPrefetchEntry = Input(UInt(64.W))
371    val matched = Output(Bool())
372  })
373
374  assert(!RegNext(io.primary_valid && !io.primary_ready))
375
376  val req = Reg(new MissReqWoStoreData)
377  val req_primary_fire = Reg(new MissReqWoStoreData) // for perf use
378  val req_store_mask = Reg(UInt(cfg.blockBytes.W))
379  val req_valid = RegInit(false.B)
380  val set = addr_to_dcache_set(req.vaddr)
381  // initial keyword
382  val isKeyword = RegInit(false.B)
383
384  val miss_req_pipe_reg_bits = io.miss_req_pipe_reg.req
385
386  val input_req_is_prefetch = isPrefetch(miss_req_pipe_reg_bits.cmd)
387
388  val s_acquire = RegInit(true.B)
389  val s_grantack = RegInit(true.B)
390  val s_mainpipe_req = RegInit(true.B)
391
392  val w_grantfirst = RegInit(true.B)
393  val w_grantlast = RegInit(true.B)
394  val w_mainpipe_resp = RegInit(true.B)
395  val w_refill_resp = RegInit(true.B)
396  val w_l2hint = RegInit(true.B)
397
398  val mainpipe_req_fired = RegInit(true.B)
399
400  val release_entry = s_grantack && w_mainpipe_resp && w_refill_resp
401
402  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
403  val data_not_refilled = !w_grantfirst
404
405  val error = RegInit(false.B)
406  val prefetch = RegInit(false.B)
407  val access = RegInit(false.B)
408
409  val should_refill_data_reg =  Reg(Bool())
410  val should_refill_data = WireInit(should_refill_data_reg)
411
412  val should_replace = RegInit(false.B)
413
414  val full_overwrite = Reg(Bool())
415
416  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
417  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
418
419  // refill data with store data, this reg will be used to store:
420  // 1. store data (if needed), before l2 refill data
421  // 2. store data and l2 refill data merged result (i.e. new cacheline taht will be write to data array)
422  val refill_and_store_data = Reg(Vec(blockRows, UInt(rowBits.W)))
423  // raw data refilled to l1 by l2
424  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
425
426  // allocate current miss queue entry for a miss req
427  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel && !io.wbq_block_miss_req)
428  val primary_accept = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
429  // merge miss req to current miss queue entry
430  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel && !io.wbq_block_miss_req)
431  val secondary_accept = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
432
433  val req_handled_by_this_entry = primary_accept || secondary_accept
434
435  // for perf use
436  val secondary_fired = RegInit(false.B)
437
438  io.perf_pending_prefetch := req_valid && prefetch && !secondary_fired
439  io.perf_pending_normal   := req_valid && (!prefetch || secondary_fired)
440
441  io.rob_head_query.resp   := io.rob_head_query.hit(req.vaddr) && req_valid
442
443  io.req_handled_by_this_entry := req_handled_by_this_entry
444
445  when (release_entry && req_valid) {
446    req_valid := false.B
447  }
448
449  when (io.miss_req_pipe_reg.alloc && !io.miss_req_pipe_reg.cancel) {
450    assert(RegNext(primary_fire), "after 1 cycle of primary_fire, entry will be allocated")
451    req_valid := true.B
452
453    req := miss_req_pipe_reg_bits.toMissReqWoStoreData()
454    req_primary_fire := miss_req_pipe_reg_bits.toMissReqWoStoreData()
455    req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
456    //only  load miss need keyword
457    isKeyword := Mux(miss_req_pipe_reg_bits.isFromLoad, miss_req_pipe_reg_bits.vaddr(5).asBool,false.B)
458
459    s_acquire := io.acquire_fired_by_pipe_reg
460    s_grantack := false.B
461    s_mainpipe_req := false.B
462
463    w_grantfirst := false.B
464    w_grantlast := false.B
465    w_l2hint := false.B
466    mainpipe_req_fired := false.B
467
468    when(miss_req_pipe_reg_bits.isFromStore) {
469      req_store_mask := miss_req_pipe_reg_bits.store_mask
470      for (i <- 0 until blockRows) {
471        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
472      }
473    }
474    full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
475
476    when (!miss_req_pipe_reg_bits.isFromAMO) {
477      w_refill_resp := false.B
478    }
479
480    when (miss_req_pipe_reg_bits.isFromAMO) {
481      w_mainpipe_resp := false.B
482    }
483
484    should_refill_data_reg := miss_req_pipe_reg_bits.isFromLoad
485    error := false.B
486    prefetch := input_req_is_prefetch && !io.miss_req_pipe_reg.prefetch_late_en(io.req.bits, io.req.valid)
487    access := false.B
488    secondary_fired := false.B
489  }
490
491  when (io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel) {
492    assert(RegNext(secondary_fire) || RegNext(RegNext(primary_fire)), "after 1 cycle of secondary_fire or 2 cycle of primary_fire, entry will be merged")
493    assert(miss_req_pipe_reg_bits.req_coh.state <= req.req_coh.state || (prefetch && !access))
494    assert(!(miss_req_pipe_reg_bits.isFromAMO || req.isFromAMO))
495    // use the most uptodate meta
496    req.req_coh := miss_req_pipe_reg_bits.req_coh
497
498    isKeyword := Mux(
499      before_req_sent_can_merge(miss_req_pipe_reg_bits),
500      before_req_sent_merge_iskeyword(miss_req_pipe_reg_bits),
501      isKeyword)
502    assert(!miss_req_pipe_reg_bits.isFromPrefetch, "can not merge a prefetch req, late prefetch should always be ignored!")
503
504    when (miss_req_pipe_reg_bits.isFromStore) {
505      req := miss_req_pipe_reg_bits
506      req.addr := get_block_addr(miss_req_pipe_reg_bits.addr)
507      req_store_mask := miss_req_pipe_reg_bits.store_mask
508      for (i <- 0 until blockRows) {
509        refill_and_store_data(i) := miss_req_pipe_reg_bits.store_data(rowBits * (i + 1) - 1, rowBits * i)
510      }
511      full_overwrite := miss_req_pipe_reg_bits.isFromStore && miss_req_pipe_reg_bits.full_overwrite
512      assert(is_alias_match(req.vaddr, miss_req_pipe_reg_bits.vaddr), "alias bits should be the same when merging store")
513    }
514
515    should_refill_data := should_refill_data_reg || miss_req_pipe_reg_bits.isFromLoad
516    should_refill_data_reg := should_refill_data
517    when (!input_req_is_prefetch) {
518      access := true.B // when merge non-prefetch req, set access bit
519    }
520    secondary_fired := true.B
521  }
522
523  when (io.mem_acquire.fire) {
524    s_acquire := true.B
525  }
526
527  // merge data refilled by l2 and store data, update miss queue entry, gen refill_req
528  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
529  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
530  // merge refilled data and store data (if needed)
531  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
532    val full_wmask = FillInterleaved(8, wmask)
533    (~full_wmask & old_data | full_wmask & new_data)
534  }
535  for (i <- 0 until blockRows) {
536    // new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
537    new_data(i) := refill_and_store_data(i)
538    // we only need to merge data for Store
539    new_mask(i) := Mux(req.isFromStore, req_store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
540  }
541
542  val hasData = RegInit(true.B)
543  val isDirty = RegInit(false.B)
544  when (io.mem_grant.fire) {
545    w_grantfirst := true.B
546    grant_param := io.mem_grant.bits.param
547    when (edge.hasData(io.mem_grant.bits)) {
548      // GrantData
549      when (isKeyword) {
550       for (i <- 0 until beatRows) {
551         val idx = ((refill_count << log2Floor(beatRows)) + i.U) ^ 4.U
552         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
553         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
554        }
555      }
556      .otherwise{
557       for (i <- 0 until beatRows) {
558         val idx = (refill_count << log2Floor(beatRows)) + i.U
559         val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
560         refill_and_store_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
561        }
562      }
563      w_grantlast := w_grantlast || refill_done
564      hasData := true.B
565    }.otherwise {
566      // Grant
567      assert(full_overwrite)
568      for (i <- 0 until blockRows) {
569        refill_and_store_data(i) := new_data(i)
570      }
571      w_grantlast := true.B
572      hasData := false.B
573    }
574
575    error := io.mem_grant.bits.denied || io.mem_grant.bits.corrupt || error
576
577    refill_data_raw(refill_count ^ isKeyword) := io.mem_grant.bits.data
578    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
579  }
580
581  when (io.mem_finish.fire) {
582    s_grantack := true.B
583  }
584
585  when (io.main_pipe_req.fire) {
586    s_mainpipe_req := true.B
587    mainpipe_req_fired := true.B
588  }
589
590  when (io.main_pipe_replay) {
591    s_mainpipe_req := false.B
592  }
593
594  when (io.main_pipe_resp) {
595    w_mainpipe_resp := true.B
596  }
597
598  when(io.main_pipe_refill_resp) {
599    w_refill_resp := true.B
600  }
601
602  when (io.l2_hint.valid) {
603    w_l2hint := true.B
604  }
605
606  def before_req_sent_can_merge(new_req: MissReqWoStoreData): Bool = {
607    // acquire_not_sent && (new_req.isFromLoad || new_req.isFromStore)
608
609    // Since most acquire requests have been issued from pipe_reg,
610    // the number of such merge situations is currently small,
611    // So dont Merge anything for better timing.
612    false.B
613  }
614
615  def before_data_refill_can_merge(new_req: MissReqWoStoreData): Bool = {
616    data_not_refilled && new_req.isFromLoad
617  }
618
619  // Note that late prefetch will be ignored
620
621  def should_merge(new_req: MissReqWoStoreData): Bool = {
622    val block_match = get_block(req.addr) === get_block(new_req.addr)
623    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
624    block_match && alias_match &&
625    (
626      before_req_sent_can_merge(new_req) ||
627      before_data_refill_can_merge(new_req)
628    )
629  }
630
631  def before_req_sent_merge_iskeyword(new_req: MissReqWoStoreData): Bool = {
632    val need_check_isKeyword = acquire_not_sent && req.isFromLoad && new_req.isFromLoad && should_merge(new_req)
633    val use_new_req_isKeyword = isAfter(req.lqIdx, new_req.lqIdx)
634    Mux(
635      need_check_isKeyword,
636      Mux(
637        use_new_req_isKeyword,
638        new_req.vaddr(5).asBool,
639        req.vaddr(5).asBool
640      ),
641      isKeyword
642      )
643  }
644
645  // store can be merged before io.mem_acquire.fire
646  // store can not be merged the cycle that io.mem_acquire.fire
647  // load can be merged before io.mem_grant.fire
648  //
649  // TODO: merge store if possible? mem_acquire may need to be re-issued,
650  // but sbuffer entry can be freed
651  def should_reject(new_req: MissReqWoStoreData): Bool = {
652    val block_match = get_block(req.addr) === get_block(new_req.addr)
653    val set_match = set === addr_to_dcache_set(new_req.vaddr)
654    val alias_match = is_alias_match(req.vaddr, new_req.vaddr)
655
656    req_valid && Mux(
657        block_match,
658        (!before_req_sent_can_merge(new_req) && !before_data_refill_can_merge(new_req)) || !alias_match,
659        false.B
660      )
661  }
662
663  // req_valid will be updated 1 cycle after primary_fire, so next cycle, this entry cannot accept a new req
664  when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
665    // can accept prefetch req
666    io.primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
667  }.otherwise {
668    // cannot accept prefetch req except when a memset patten is detected
669    io.primary_ready := !req_valid && (!io.req.bits.isFromPrefetch || io.memSetPattenDetected) && !GatedValidRegNext(primary_fire)
670  }
671  io.secondary_ready := should_merge(io.req.bits)
672  io.secondary_reject := should_reject(io.req.bits)
673
674  // generate primary_ready & secondary_(ready | reject) for each miss request
675  for (i <- 0 until reqNum) {
676    when(GatedValidRegNext(io.id >= ((cfg.nMissEntries).U - io.nMaxPrefetchEntry))) {
677      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire)
678    }.otherwise {
679      io.queryME(i).primary_ready := !req_valid && !GatedValidRegNext(primary_fire) &&
680                                    (!io.queryME(i).req.bits.isFromPrefetch || io.memSetPattenDetected)
681    }
682    io.queryME(i).secondary_ready  := should_merge(io.queryME(i).req.bits)
683    io.queryME(i).secondary_reject := should_reject(io.queryME(i).req.bits)
684  }
685
686  // should not allocate, merge or reject at the same time
687  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U || !io.req.valid))
688
689  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
690    val data = refill_and_store_data.asUInt
691    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
692  })))
693  // when granted data is all ready, wakeup lq's miss load
694  val refill_to_ldq_en = !w_grantlast && io.mem_grant.fire
695  io.refill_to_ldq.valid := GatedValidRegNext(refill_to_ldq_en)
696  io.refill_to_ldq.bits.addr := RegEnable(req.addr + ((refill_count ^ isKeyword) << refillOffBits), refill_to_ldq_en)
697  io.refill_to_ldq.bits.data := refill_data_splited(RegEnable(refill_count ^ isKeyword, refill_to_ldq_en))
698  io.refill_to_ldq.bits.error := RegEnable(io.mem_grant.bits.corrupt || io.mem_grant.bits.denied, refill_to_ldq_en)
699  io.refill_to_ldq.bits.refill_done := RegEnable(refill_done && io.mem_grant.fire, refill_to_ldq_en)
700  io.refill_to_ldq.bits.hasdata := hasData
701  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
702  io.refill_to_ldq.bits.id := io.id
703
704  // if the entry has a pending merge req, wait for it
705  // Note: now, only wait for store, because store may acquire T
706  io.mem_acquire.valid := !s_acquire && !(io.miss_req_pipe_reg.merge && !io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
707  val grow_param = req.req_coh.onAccess(req.cmd)._2
708  val acquireBlock = edge.AcquireBlock(
709    fromSource = io.id,
710    toAddress = req.addr,
711    lgSize = (log2Up(cfg.blockBytes)).U,
712    growPermissions = grow_param
713  )._2
714  val acquirePerm = edge.AcquirePerm(
715    fromSource = io.id,
716    toAddress = req.addr,
717    lgSize = (log2Up(cfg.blockBytes)).U,
718    growPermissions = grow_param
719  )._2
720  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
721  // resolve cache alias by L2
722  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
723  // pass vaddr to l2
724  io.mem_acquire.bits.user.lift(VaddrKey).foreach( _ := req.vaddr(VAddrBits-1, blockOffBits))
725  // pass keyword to L2
726  io.mem_acquire.bits.echo.lift(IsKeywordKey).foreach(_ := isKeyword)
727  // trigger prefetch
728  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := Mux(io.l2_pf_store_only, req.isFromStore, true.B))
729  // req source
730  when(prefetch && !secondary_fired) {
731    io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
732  }.otherwise {
733    when(req.isFromStore) {
734      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUStoreData.id.U)
735    }.elsewhen(req.isFromLoad) {
736      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPULoadData.id.U)
737    }.elsewhen(req.isFromAMO) {
738      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.CPUAtomicData.id.U)
739    }.otherwise {
740      io.mem_acquire.bits.user.lift(ReqSourceKey).foreach(_ := MemReqSource.L1DataPrefetch.id.U)
741    }
742  }
743  require(nSets <= 256)
744
745  // io.mem_grant.ready := !w_grantlast && s_acquire
746  io.mem_grant.ready := true.B
747  assert(!(io.mem_grant.valid && !(!w_grantlast && s_acquire)), "dcache should always be ready for mem_grant now")
748
749  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire)
750  assert(RegNext(!io.mem_grant.fire || edge.isRequest(io.mem_grant.bits)))
751  io.mem_finish.valid := !s_grantack && w_grantfirst
752  io.mem_finish.bits := grantack
753
754  // Send mainpipe_req when receive hint from L2 or receive data without hint
755  io.main_pipe_req.valid := !s_mainpipe_req && (w_l2hint || w_grantlast)
756  io.main_pipe_req.bits := DontCare
757  io.main_pipe_req.bits.miss := true.B
758  io.main_pipe_req.bits.miss_id := io.id
759  io.main_pipe_req.bits.probe := false.B
760  io.main_pipe_req.bits.source := req.source
761  io.main_pipe_req.bits.cmd := req.cmd
762  io.main_pipe_req.bits.vaddr := req.vaddr
763  io.main_pipe_req.bits.addr := req.addr
764  io.main_pipe_req.bits.word_idx := req.word_idx
765  io.main_pipe_req.bits.amo_data := req.amo_data
766  io.main_pipe_req.bits.amo_mask := req.amo_mask
767  io.main_pipe_req.bits.id := req.id
768  io.main_pipe_req.bits.pf_source := req.pf_source
769  io.main_pipe_req.bits.access := access
770
771  io.block_addr.valid := req_valid && w_grantlast
772  io.block_addr.bits := req.addr
773
774  io.req_addr.valid := req_valid
775  io.req_addr.bits := req.addr
776
777  io.refill_info.valid := req_valid && w_grantlast
778  io.refill_info.bits.store_data := refill_and_store_data.asUInt
779  io.refill_info.bits.store_mask := ~0.U(blockBytes.W)
780  io.refill_info.bits.miss_param := grant_param
781  io.refill_info.bits.miss_dirty := isDirty
782  io.refill_info.bits.error      := error
783
784  XSPerfAccumulate("miss_refill_mainpipe_req", io.main_pipe_req.fire)
785  XSPerfAccumulate("miss_refill_without_hint", io.main_pipe_req.fire && !mainpipe_req_fired && !w_l2hint)
786  XSPerfAccumulate("miss_refill_replay", io.main_pipe_replay)
787
788  val w_grantfirst_forward_info = Mux(isKeyword, w_grantlast, w_grantfirst)
789  val w_grantlast_forward_info = Mux(isKeyword, w_grantfirst, w_grantlast)
790  io.forwardInfo.apply(req_valid, req.addr, refill_and_store_data, w_grantfirst_forward_info, w_grantlast_forward_info)
791
792  io.matched := req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && !prefetch
793  io.prefetch_info.late_prefetch := io.req.valid && !(io.req.bits.isFromPrefetch) && req_valid && (get_block(req.addr) === get_block(io.req.bits.addr)) && prefetch
794
795  when(io.prefetch_info.late_prefetch) {
796    prefetch := false.B
797  }
798
799  // refill latency monitor
800  val start_counting = GatedValidRegNext(io.mem_acquire.fire) || (GatedValidRegNextN(primary_fire, 2) && s_acquire)
801  io.latency_monitor.load_miss_refilling  := req_valid && req_primary_fire.isFromLoad     && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
802  io.latency_monitor.store_miss_refilling := req_valid && req_primary_fire.isFromStore    && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
803  io.latency_monitor.amo_miss_refilling   := req_valid && req_primary_fire.isFromAMO      && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
804  io.latency_monitor.pf_miss_refilling    := req_valid && req_primary_fire.isFromPrefetch && BoolStopWatch(start_counting, io.mem_grant.fire && !refill_done, true, true)
805
806  XSPerfAccumulate("miss_req_primary", primary_fire)
807  XSPerfAccumulate("miss_req_merged", secondary_fire)
808  XSPerfAccumulate("load_miss_penalty_to_use",
809    should_refill_data &&
810      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
811  )
812  XSPerfAccumulate("penalty_between_grantlast_and_release",
813    BoolStopWatch(!RegNext(w_grantlast) && w_grantlast, release_entry, true)
814  )
815  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire, io.main_pipe_resp))
816  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
817  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
818  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
819  XSPerfAccumulate("prefetch_req_primary", primary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
820  XSPerfAccumulate("prefetch_req_merged", secondary_fire && io.req.bits.source === DCACHE_PREFETCH_SOURCE.U)
821  XSPerfAccumulate("can_not_send_acquire_because_of_merging_store", !s_acquire && io.miss_req_pipe_reg.merge && io.miss_req_pipe_reg.cancel && miss_req_pipe_reg_bits.isFromStore)
822
823  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(GatedValidRegNextN(primary_fire, 2), release_entry)
824  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
825  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
826
827  val load_miss_begin = primary_fire && io.req.bits.isFromLoad
828  val refill_finished = GatedValidRegNext(!w_grantlast && refill_done) && should_refill_data
829  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
830  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
831  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
832
833  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(start_counting, GatedValidRegNext(io.mem_grant.fire && refill_done))
834  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
835  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
836}
837
838class MissQueue(edge: TLEdgeOut, reqNum: Int)(implicit p: Parameters) extends DCacheModule
839  with HasPerfEvents
840  {
841  val io = IO(new Bundle {
842    val hartId = Input(UInt(hartIdLen.W))
843    val req = Flipped(DecoupledIO(new MissReq))
844    val resp = Output(new MissResp)
845    val refill_to_ldq = ValidIO(new Refill)
846
847    val queryMQ = Vec(reqNum, Flipped(new DCacheMQQueryIOBundle))
848
849    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
850    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
851    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
852
853    val l2_hint = Input(Valid(new L2ToL1Hint())) // Hint from L2 Cache
854
855    val main_pipe_req = DecoupledIO(new MainPipeReq)
856    val main_pipe_resp = Flipped(ValidIO(new MainPipeResp))
857
858    val mainpipe_info = Input(new MainPipeInfoToMQ)
859    val refill_info = ValidIO(new MissQueueRefillInfo)
860
861    // block probe
862    val probe_addr = Input(UInt(PAddrBits.W))
863    val probe_block = Output(Bool())
864
865    // block replace when release an addr valid in mshr
866    val replace_addr = Flipped(ValidIO(UInt(PAddrBits.W)))
867    val replace_block = Output(Bool())
868
869    // req blocked by wbq
870    val wbq_block_miss_req = Input(Bool())
871
872    val full = Output(Bool())
873
874    // forward missqueue
875    val forward = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO)
876    val l2_pf_store_only = Input(Bool())
877
878    val memSetPattenDetected = Output(Bool())
879    val lqEmpty = Input(Bool())
880
881    val prefetch_info = new Bundle {
882      val naive = new Bundle {
883        val late_miss_prefetch = Output(Bool())
884      }
885
886      val fdp = new Bundle {
887        val late_miss_prefetch = Output(Bool())
888        val prefetch_monitor_cnt = Output(Bool())
889        val total_prefetch = Output(Bool())
890      }
891    }
892
893    val mq_enq_cancel = Output(Bool())
894
895    val debugTopDown = new DCacheTopDownIO
896  })
897
898  // 128KBL1: FIXME: provide vaddr for l2
899
900  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge, reqNum)))
901
902  val miss_req_pipe_reg = RegInit(0.U.asTypeOf(new MissReqPipeRegBundle(edge)))
903  val acquire_from_pipereg = Wire(chiselTypeOf(io.mem_acquire))
904
905  val primary_ready_vec = entries.map(_.io.primary_ready)
906  val secondary_ready_vec = entries.map(_.io.secondary_ready)
907  val secondary_reject_vec = entries.map(_.io.secondary_reject)
908  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
909
910  val merge = ParallelORR(Cat(secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.req.bits))))
911  val reject = ParallelORR(Cat(secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.req.bits))))
912  val alloc = !reject && !merge && ParallelORR(Cat(primary_ready_vec))
913  val accept = alloc || merge
914
915  // generate req_ready for each miss request for better timing
916  for (i <- 0 until reqNum) {
917    val _primary_ready_vec = entries.map(_.io.queryME(i).primary_ready)
918    val _secondary_ready_vec = entries.map(_.io.queryME(i).secondary_ready)
919    val _secondary_reject_vec = entries.map(_.io.queryME(i).secondary_reject)
920    val _merge = ParallelORR(Cat(_secondary_ready_vec ++ Seq(miss_req_pipe_reg.merge_req(io.queryMQ(i).req.bits))))
921    val _reject = ParallelORR(Cat(_secondary_reject_vec ++ Seq(miss_req_pipe_reg.reject_req(io.queryMQ(i).req.bits))))
922    val _alloc = !_reject && !_merge && ParallelORR(Cat(_primary_ready_vec))
923    val _accept = _alloc || _merge
924
925    io.queryMQ(i).ready := _accept
926  }
927
928  val req_mshr_handled_vec = entries.map(_.io.req_handled_by_this_entry)
929  // merged to pipeline reg
930  val req_pipeline_reg_handled = miss_req_pipe_reg.merge_req(io.req.bits) && io.req.valid
931  assert(PopCount(Seq(req_pipeline_reg_handled, VecInit(req_mshr_handled_vec).asUInt.orR)) <= 1.U, "miss req will either go to mshr or pipeline reg")
932  assert(PopCount(req_mshr_handled_vec) <= 1.U, "Only one mshr can handle a req")
933  io.resp.id := Mux(!req_pipeline_reg_handled, OHToUInt(req_mshr_handled_vec), miss_req_pipe_reg.mshr_id)
934  io.resp.handled := Cat(req_mshr_handled_vec).orR || req_pipeline_reg_handled
935  io.resp.merged := merge
936
937  /*  MissQueue enq logic is now splitted into 2 cycles
938   *
939   */
940  when(io.req.valid){
941    miss_req_pipe_reg.req     := io.req.bits
942  }
943  // miss_req_pipe_reg.req     := io.req.bits
944  miss_req_pipe_reg.alloc   := alloc && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
945  miss_req_pipe_reg.merge   := merge && io.req.valid && !io.req.bits.cancel && !io.wbq_block_miss_req
946  miss_req_pipe_reg.cancel  := io.wbq_block_miss_req
947  miss_req_pipe_reg.mshr_id := io.resp.id
948
949  assert(PopCount(Seq(alloc && io.req.valid, merge && io.req.valid)) <= 1.U, "allocate and merge a mshr in same cycle!")
950
951  val source_except_load_cnt = RegInit(0.U(10.W))
952  when(VecInit(req_mshr_handled_vec).asUInt.orR || req_pipeline_reg_handled) {
953    when(io.req.bits.isFromLoad) {
954      source_except_load_cnt := 0.U
955    }.otherwise {
956      when(io.req.bits.isFromStore) {
957        source_except_load_cnt := source_except_load_cnt + 1.U
958      }
959    }
960  }
961  val Threshold = 8
962  val memSetPattenDetected = GatedValidRegNext((source_except_load_cnt >= Threshold.U) && io.lqEmpty)
963
964  io.memSetPattenDetected := memSetPattenDetected
965
966  val forwardInfo_vec = VecInit(entries.map(_.io.forwardInfo))
967  (0 until LoadPipelineWidth).map(i => {
968    val id = io.forward(i).mshrid
969    val req_valid = io.forward(i).valid
970    val paddr = io.forward(i).paddr
971
972    val (forward_mshr, forwardData) = forwardInfo_vec(id).forward(req_valid, paddr)
973    io.forward(i).forward_result_valid := forwardInfo_vec(id).check(req_valid, paddr)
974    io.forward(i).forward_mshr := forward_mshr
975    io.forward(i).forwardData := forwardData
976  })
977
978  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U || !io.req.valid))
979//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
980  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
981  // That is, a coming req has the same paddr as that of mshr_0 (merge),
982  // while it has the same set and the same way as mshr_1 (reject).
983  // In this situation, the coming req should be merged by mshr_0
984//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
985
986  def select_valid_one[T <: Bundle](
987    in: Seq[DecoupledIO[T]],
988    out: DecoupledIO[T],
989    name: Option[String] = None): Unit = {
990
991    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
992    out.valid := Cat(in.map(_.valid)).orR
993    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
994    in.map(_.ready := out.ready)
995    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
996  }
997
998  io.mem_grant.ready := false.B
999
1000  val nMaxPrefetchEntry = Constantin.createRecord(s"nMaxPrefetchEntry${p(XSCoreParamsKey).HartId}", initValue = 14)
1001  entries.zipWithIndex.foreach {
1002    case (e, i) =>
1003      val former_primary_ready = if(i == 0)
1004        false.B
1005      else
1006        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
1007
1008      e.io.hartId := io.hartId
1009      e.io.id := i.U
1010      e.io.l2_pf_store_only := io.l2_pf_store_only
1011      e.io.req.valid := io.req.valid
1012      e.io.wbq_block_miss_req := io.wbq_block_miss_req
1013      e.io.primary_valid := io.req.valid &&
1014        !merge &&
1015        !reject &&
1016        !former_primary_ready &&
1017        e.io.primary_ready
1018      e.io.req.bits := io.req.bits.toMissReqWoStoreData()
1019
1020      e.io.mem_grant.valid := false.B
1021      e.io.mem_grant.bits := DontCare
1022      when (io.mem_grant.bits.source === i.U) {
1023        e.io.mem_grant <> io.mem_grant
1024      }
1025
1026      when(miss_req_pipe_reg.reg_valid() && miss_req_pipe_reg.mshr_id === i.U) {
1027        e.io.miss_req_pipe_reg := miss_req_pipe_reg
1028      }.otherwise {
1029        e.io.miss_req_pipe_reg       := DontCare
1030        e.io.miss_req_pipe_reg.merge := false.B
1031        e.io.miss_req_pipe_reg.alloc := false.B
1032      }
1033
1034      e.io.acquire_fired_by_pipe_reg := acquire_from_pipereg.fire
1035
1036      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
1037      e.io.main_pipe_replay := io.mainpipe_info.s2_valid && io.mainpipe_info.s2_replay_to_mq && io.mainpipe_info.s2_miss_id === i.U
1038      e.io.main_pipe_refill_resp := io.mainpipe_info.s3_valid && io.mainpipe_info.s3_refill_resp && io.mainpipe_info.s3_miss_id === i.U
1039
1040      e.io.memSetPattenDetected := memSetPattenDetected
1041      e.io.nMaxPrefetchEntry := nMaxPrefetchEntry
1042
1043      e.io.main_pipe_req.ready := io.main_pipe_req.ready
1044
1045      for (j <- 0 until reqNum) {
1046        e.io.queryME(j).req.valid := io.queryMQ(j).req.valid
1047        e.io.queryME(j).req.bits  := io.queryMQ(j).req.bits.toMissReqWoStoreData()
1048      }
1049
1050      when(io.l2_hint.bits.sourceId === i.U) {
1051        e.io.l2_hint <> io.l2_hint
1052      } .otherwise {
1053        e.io.l2_hint.valid := false.B
1054        e.io.l2_hint.bits := DontCare
1055      }
1056  }
1057
1058  io.req.ready := accept
1059  io.mq_enq_cancel := io.req.bits.cancel
1060  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
1061  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
1062
1063  io.refill_info.valid := VecInit(entries.zipWithIndex.map{ case(e,i) => e.io.refill_info.valid && io.mainpipe_info.s2_valid && io.mainpipe_info.s2_miss_id === i.U}).asUInt.orR
1064  io.refill_info.bits := Mux1H(entries.zipWithIndex.map{ case(e,i) => (io.mainpipe_info.s2_miss_id === i.U) -> e.io.refill_info.bits })
1065
1066  acquire_from_pipereg.valid := miss_req_pipe_reg.can_send_acquire(io.req.valid, io.req.bits)
1067  acquire_from_pipereg.bits := miss_req_pipe_reg.get_acquire(io.l2_pf_store_only)
1068
1069  XSPerfAccumulate("acquire_fire_from_pipereg", acquire_from_pipereg.fire)
1070  XSPerfAccumulate("pipereg_valid", miss_req_pipe_reg.reg_valid())
1071
1072  val acquire_sources = Seq(acquire_from_pipereg) ++ entries.map(_.io.mem_acquire)
1073  TLArbiter.lowest(edge, io.mem_acquire, acquire_sources:_*)
1074  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
1075
1076  // amo's main pipe req out
1077  fastArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
1078
1079  io.probe_block := Cat(probe_block_vec).orR
1080
1081  io.replace_block := io.replace_addr.valid && Cat(entries.map(e => e.io.req_addr.valid && e.io.req_addr.bits === io.replace_addr.bits) ++ Seq(miss_req_pipe_reg.block_match(io.replace_addr.bits))).orR
1082
1083  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
1084
1085  // prefetch related
1086  io.prefetch_info.naive.late_miss_prefetch := io.req.valid && io.req.bits.isPrefetchRead && (miss_req_pipe_reg.matched(io.req.bits) || Cat(entries.map(_.io.matched)).orR)
1087
1088  io.prefetch_info.fdp.late_miss_prefetch := (miss_req_pipe_reg.prefetch_late_en(io.req.bits.toMissReqWoStoreData(), io.req.valid) || Cat(entries.map(_.io.prefetch_info.late_prefetch)).orR)
1089  io.prefetch_info.fdp.prefetch_monitor_cnt := io.main_pipe_req.fire
1090  io.prefetch_info.fdp.total_prefetch := alloc && io.req.valid && !io.req.bits.cancel && isFromL1Prefetch(io.req.bits.pf_source)
1091
1092  // L1MissTrace Chisel DB
1093  val debug_miss_trace = Wire(new L1MissTrace)
1094  debug_miss_trace.vaddr := io.req.bits.vaddr
1095  debug_miss_trace.paddr := io.req.bits.addr
1096  debug_miss_trace.source := io.req.bits.source
1097  debug_miss_trace.pc := io.req.bits.pc
1098
1099  val isWriteL1MissQMissTable = Constantin.createRecord(s"isWriteL1MissQMissTable${p(XSCoreParamsKey).HartId}")
1100  val table = ChiselDB.createTable(s"L1MissQMissTrace_hart${p(XSCoreParamsKey).HartId}", new L1MissTrace)
1101  table.log(debug_miss_trace, isWriteL1MissQMissTable.orR && io.req.valid && !io.req.bits.cancel && alloc, "MissQueue", clock, reset)
1102
1103  // Difftest
1104  if (env.EnableDifftest) {
1105    val difftest = DifftestModule(new DiffRefillEvent, dontCare = true)
1106    difftest.coreid := io.hartId
1107    difftest.index := 1.U
1108    difftest.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
1109    difftest.addr := io.refill_to_ldq.bits.addr
1110    difftest.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.data)
1111    difftest.idtfr := DontCare
1112  }
1113
1114  // Perf count
1115  XSPerfAccumulate("miss_req", io.req.fire && !io.req.bits.cancel)
1116  XSPerfAccumulate("miss_req_allocate", io.req.fire && !io.req.bits.cancel && alloc)
1117  XSPerfAccumulate("miss_req_load_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromLoad)
1118  XSPerfAccumulate("miss_req_store_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromStore)
1119  XSPerfAccumulate("miss_req_amo_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromAMO)
1120  XSPerfAccumulate("miss_req_prefetch_allocate", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1121  XSPerfAccumulate("miss_req_merge_load", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromLoad)
1122  XSPerfAccumulate("miss_req_reject_load", io.req.valid && !io.req.bits.cancel && reject && io.req.bits.isFromLoad)
1123  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
1124  XSPerfAccumulate("prefetch_primary_fire", io.req.fire && !io.req.bits.cancel && alloc && io.req.bits.isFromPrefetch)
1125  XSPerfAccumulate("prefetch_secondary_fire", io.req.fire && !io.req.bits.cancel && merge && io.req.bits.isFromPrefetch)
1126  XSPerfAccumulate("memSetPattenDetected", memSetPattenDetected)
1127  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
1128  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
1129  when (num_valids > max_inflight) {
1130    max_inflight := num_valids
1131  }
1132  // max inflight (average) = max_inflight_total / cycle cnt
1133  XSPerfAccumulate("max_inflight", max_inflight)
1134  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
1135  io.full := num_valids === cfg.nMissEntries.U
1136  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
1137
1138  XSPerfHistogram("L1DMLP_CPUData", PopCount(VecInit(entries.map(_.io.perf_pending_normal)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1139  XSPerfHistogram("L1DMLP_Prefetch", PopCount(VecInit(entries.map(_.io.perf_pending_prefetch)).asUInt), true.B, 0, cfg.nMissEntries, 1)
1140  XSPerfHistogram("L1DMLP_Total", num_valids, true.B, 0, cfg.nMissEntries, 1)
1141
1142  XSPerfAccumulate("miss_load_refill_latency", PopCount(entries.map(_.io.latency_monitor.load_miss_refilling)))
1143  XSPerfAccumulate("miss_store_refill_latency", PopCount(entries.map(_.io.latency_monitor.store_miss_refilling)))
1144  XSPerfAccumulate("miss_amo_refill_latency", PopCount(entries.map(_.io.latency_monitor.amo_miss_refilling)))
1145  XSPerfAccumulate("miss_pf_refill_latency", PopCount(entries.map(_.io.latency_monitor.pf_miss_refilling)))
1146
1147  val rob_head_miss_in_dcache = VecInit(entries.map(_.io.rob_head_query.resp)).asUInt.orR
1148
1149  entries.foreach {
1150    case e => {
1151      e.io.rob_head_query.query_valid := io.debugTopDown.robHeadVaddr.valid
1152      e.io.rob_head_query.vaddr := io.debugTopDown.robHeadVaddr.bits
1153    }
1154  }
1155
1156  io.debugTopDown.robHeadMissInDCache := rob_head_miss_in_dcache
1157
1158  val perfValidCount = RegNext(PopCount(entries.map(entry => (!entry.io.primary_ready))))
1159  val perfEvents = Seq(
1160    ("dcache_missq_req      ", io.req.fire),
1161    ("dcache_missq_1_4_valid", (perfValidCount < (cfg.nMissEntries.U/4.U))),
1162    ("dcache_missq_2_4_valid", (perfValidCount > (cfg.nMissEntries.U/4.U)) & (perfValidCount <= (cfg.nMissEntries.U/2.U))),
1163    ("dcache_missq_3_4_valid", (perfValidCount > (cfg.nMissEntries.U/2.U)) & (perfValidCount <= (cfg.nMissEntries.U*3.U/4.U))),
1164    ("dcache_missq_4_4_valid", (perfValidCount > (cfg.nMissEntries.U*3.U/4.U))),
1165  )
1166  generatePerfEvent()
1167}