xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision 1b5e3cda2e8bbc4254b900b0321cbc4d396ef041)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import freechips.rocketchip.tilelink._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import difftest._
28import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey}
29
30class MissReq(implicit p: Parameters) extends DCacheBundle {
31  val source = UInt(sourceTypeWidth.W)
32  val cmd = UInt(M_SZ.W)
33  val addr = UInt(PAddrBits.W)
34  val vaddr = UInt(VAddrBits.W)
35  val way_en = UInt(DCacheWays.W)
36
37  // store
38  val store_data = UInt((cfg.blockBytes * 8).W)
39  val store_mask = UInt(cfg.blockBytes.W)
40
41  // which word does amo work on?
42  val word_idx = UInt(log2Up(blockWords).W)
43  val amo_data = UInt(DataBits.W)
44  val amo_mask = UInt((DataBits / 8).W)
45
46  val req_coh = new ClientMetadata
47  val replace_coh = new ClientMetadata
48  val replace_tag = UInt(tagBits.W)
49  val id = UInt(reqIdWidth.W)
50
51  // For now, miss queue entry req is actually valid when req.valid && !cancel
52  // * req.valid is fast to generate
53  // * cancel is slow to generate, it will not be used until the last moment
54  //
55  // cancel may come from the following sources:
56  // 1. miss req blocked by writeback queue:
57  //      a writeback req of the same address is in progress
58  // 2. pmp check failed
59  val cancel = Bool() // cancel is slow to generate, it will cancel missreq.valid
60
61  def isLoad = source === LOAD_SOURCE.U
62  def isStore = source === STORE_SOURCE.U
63  def isAMO = source === AMO_SOURCE.U
64  def hit = req_coh.isValid()
65}
66
67class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
68  val io = IO(new Bundle() {
69    // MSHR ID
70    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
71    // client requests
72    val req    = Flipped(ValidIO(new MissReq))
73    // allocate this entry for new req
74    val primary_valid = Input(Bool())
75    // this entry is free and can be allocated to new reqs
76    val primary_ready = Output(Bool())
77    // this entry is busy, but it can merge the new req
78    val secondary_ready = Output(Bool())
79    // this entry is busy and it can not merge the new req
80    val secondary_reject = Output(Bool())
81
82    val refill_to_ldq = ValidIO(new Refill)
83
84    // bus
85    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
86    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
87    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
88
89    // refill pipe
90    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
91
92    // replace pipe
93    val replace_pipe_req = DecoupledIO(new MainPipeReq)
94    val replace_pipe_resp = Input(Bool())
95
96    // main pipe: amo miss
97    val main_pipe_req = DecoupledIO(new MainPipeReq)
98    val main_pipe_resp = Input(Bool())
99
100    val block_addr = ValidIO(UInt(PAddrBits.W))
101
102    val debug_early_replace = ValidIO(new Bundle() {
103      // info about the block that has been replaced
104      val idx = UInt(idxBits.W) // vaddr
105      val tag = UInt(tagBits.W) // paddr
106    })
107  })
108
109  assert(!RegNext(io.primary_valid && !io.primary_ready))
110
111  val req = Reg(new MissReq)
112  val req_valid = RegInit(false.B)
113  val set = addr_to_dcache_set(req.vaddr)
114
115  val s_acquire = RegInit(true.B)
116  val s_grantack = RegInit(true.B)
117  val s_replace_req = RegInit(true.B)
118  val s_refill = RegInit(true.B)
119  val s_mainpipe_req = RegInit(true.B)
120
121  val w_grantfirst = RegInit(true.B)
122  val w_grantlast = RegInit(true.B)
123  val w_replace_resp = RegInit(true.B)
124  val w_mainpipe_resp = RegInit(true.B)
125
126  val release_entry = s_grantack && s_refill && w_mainpipe_resp
127
128  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
129  val data_not_refilled = !w_grantlast
130
131  val should_refill_data_reg =  Reg(Bool())
132  val should_refill_data = WireInit(should_refill_data_reg)
133
134  val full_overwrite = req.isStore && req.store_mask.andR
135
136  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
137  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
138
139  val grant_beats = RegInit(0.U(beatBits.W))
140
141  when (release_entry && req_valid) {
142    req_valid := false.B
143  }
144
145  val primary_fire = WireInit(io.req.valid && io.primary_ready && io.primary_valid && !io.req.bits.cancel)
146  when (primary_fire) {
147    req_valid := true.B
148    req := io.req.bits
149    req.addr := get_block_addr(io.req.bits.addr)
150
151    s_acquire := false.B
152    s_grantack := false.B
153
154    w_grantfirst := false.B
155    w_grantlast := false.B
156
157    when (!io.req.bits.isAMO) {
158      s_refill := false.B
159    }
160
161    when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) {
162      s_replace_req := false.B
163      w_replace_resp := false.B
164    }
165
166    when (io.req.bits.isAMO) {
167      s_mainpipe_req := false.B
168      w_mainpipe_resp := false.B
169    }
170
171    should_refill_data_reg := io.req.bits.isLoad
172    grant_beats := 0.U
173  }
174
175  val secondary_fire = WireInit(io.req.valid && io.secondary_ready && !io.req.bits.cancel)
176  when (secondary_fire) {
177    assert(io.req.bits.req_coh.state <= req.req_coh.state)
178    assert(!(io.req.bits.isAMO || req.isAMO))
179    // use the most uptodate meta
180    req.req_coh := io.req.bits.req_coh
181
182    when (io.req.bits.isStore) {
183      req := io.req.bits
184      req.addr := get_block_addr(io.req.bits.addr)
185      req.way_en := req.way_en
186      req.replace_coh := req.replace_coh
187      req.replace_tag := req.replace_tag
188    }
189
190    should_refill_data := should_refill_data_reg || io.req.bits.isLoad
191    should_refill_data_reg := should_refill_data
192  }
193
194  when (io.mem_acquire.fire()) {
195    s_acquire := true.B
196  }
197
198  val refill_data = Reg(Vec(blockRows, UInt(rowBits.W)))
199  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
200  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
201  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
202  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
203    val full_wmask = FillInterleaved(8, wmask)
204    (~full_wmask & old_data | full_wmask & new_data)
205  }
206  for (i <- 0 until blockRows) {
207    new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
208    // we only need to merge data for Store
209    new_mask(i) := Mux(req.isStore, req.store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
210  }
211  val hasData = RegInit(true.B)
212  val isDirty = RegInit(false.B)
213  when (io.mem_grant.fire()) {
214    w_grantfirst := true.B
215    grant_param := io.mem_grant.bits.param
216    when (edge.hasData(io.mem_grant.bits)) {
217      // GrantData
218      for (i <- 0 until beatRows) {
219        val idx = (refill_count << log2Floor(beatRows)) + i.U
220        val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
221        refill_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
222      }
223      w_grantlast := w_grantlast || refill_done
224      hasData := true.B
225      grant_beats := grant_beats + 1.U
226    }.otherwise {
227      // Grant
228      assert(full_overwrite)
229      for (i <- 0 until blockRows) {
230        refill_data(i) := new_data(i)
231      }
232      w_grantlast := true.B
233      hasData := false.B
234    }
235
236    refill_data_raw(refill_count) := io.mem_grant.bits.data
237    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
238  }
239
240  when (io.mem_finish.fire()) {
241    s_grantack := true.B
242  }
243
244  when (io.replace_pipe_req.fire()) {
245    s_replace_req := true.B
246  }
247
248  when (io.replace_pipe_resp) {
249    w_replace_resp := true.B
250  }
251
252  when (io.refill_pipe_req.fire()) {
253    s_refill := true.B
254  }
255
256  when (io.main_pipe_req.fire()) {
257    s_mainpipe_req := true.B
258  }
259
260  when (io.main_pipe_resp) {
261    w_mainpipe_resp := true.B
262  }
263
264  def before_read_sent_can_merge(new_req: MissReq): Bool = {
265    acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore)
266  }
267
268  def before_data_refill_can_merge(new_req: MissReq): Bool = {
269    data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad
270  }
271
272  def should_merge(new_req: MissReq): Bool = {
273    val block_match = req.addr === get_block_addr(new_req.addr)
274    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
275    block_match &&
276    (before_read_sent_can_merge(new_req) ||
277      beat_match && before_data_refill_can_merge(new_req))
278  }
279
280  def should_reject(new_req: MissReq): Bool = {
281    val block_match = req.addr === get_block_addr(new_req.addr)
282    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
283    val set_match = set === addr_to_dcache_set(new_req.vaddr)
284
285    req_valid &&
286      Mux(
287        block_match,
288        !before_read_sent_can_merge(new_req) &&
289          !(beat_match && before_data_refill_can_merge(new_req)),
290        set_match && new_req.way_en === req.way_en
291      )
292  }
293
294  io.primary_ready := !req_valid
295  io.secondary_ready := should_merge(io.req.bits)
296  io.secondary_reject := should_reject(io.req.bits)
297
298  // should not allocate, merge or reject at the same time
299  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
300
301  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
302    val data = refill_data.asUInt
303    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
304  })))
305  io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data
306  io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits))
307  io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count))
308  io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire())
309  io.refill_to_ldq.bits.hasdata := hasData
310  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
311
312  io.mem_acquire.valid := !s_acquire
313  val grow_param = req.req_coh.onAccess(req.cmd)._2
314  val acquireBlock = edge.AcquireBlock(
315    fromSource = io.id,
316    toAddress = req.addr,
317    lgSize = (log2Up(cfg.blockBytes)).U,
318    growPermissions = grow_param
319  )._2
320  val acquirePerm = edge.AcquirePerm(
321    fromSource = io.id,
322    toAddress = req.addr,
323    lgSize = (log2Up(cfg.blockBytes)).U,
324    growPermissions = grow_param
325  )._2
326  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
327  // resolve cache alias by L2
328  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
329  // trigger prefetch
330  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B)
331  // prefer not to cache data in L2 by default
332  io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B)
333  require(nSets <= 256)
334
335  io.mem_grant.ready := !w_grantlast && s_acquire
336
337  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
338  assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits)))
339  io.mem_finish.valid := !s_grantack && w_grantfirst
340  io.mem_finish.bits := grantack
341
342  io.replace_pipe_req.valid := !s_replace_req
343  val replace = io.replace_pipe_req.bits
344  replace := DontCare
345  replace.miss := false.B
346  replace.miss_id := io.id
347  replace.miss_dirty := false.B
348  replace.probe := false.B
349  replace.probe_need_data := false.B
350  replace.source := LOAD_SOURCE.U
351  replace.vaddr := req.vaddr // only untag bits are needed
352  replace.addr := Cat(req.replace_tag, 0.U(pgUntagBits.W)) // only tag bits are needed
353  replace.store_mask := 0.U
354  replace.replace := true.B
355  replace.replace_way_en := req.way_en
356
357  io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast
358  val refill = io.refill_pipe_req.bits
359  refill.source := req.source
360  refill.addr := req.addr
361  refill.way_en := req.way_en
362  refill.wmask := Mux(
363    hasData || req.isLoad,
364    ~0.U(DCacheBanks.W),
365    VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req.store_mask).orR)).asUInt
366  )
367  refill.data := refill_data.asTypeOf((new RefillPipeReq).data)
368  refill.miss_id := io.id
369  refill.id := req.id
370  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
371    val c = categorize(cmd)
372    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
373      //(effect param) -> (next)
374      Cat(rd, toB, false.B)  -> Branch,
375      Cat(rd, toB, true.B)   -> Branch,
376      Cat(rd, toT, false.B)  -> Trunk,
377      Cat(rd, toT, true.B)   -> Dirty,
378      Cat(wi, toT, false.B)  -> Trunk,
379      Cat(wi, toT, true.B)   -> Dirty,
380      Cat(wr, toT, false.B)  -> Dirty,
381      Cat(wr, toT, true.B)   -> Dirty))
382  }
383  refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty))
384  refill.alias := req.vaddr(13, 12) // TODO
385
386  io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast
387  io.main_pipe_req.bits := DontCare
388  io.main_pipe_req.bits.miss := true.B
389  io.main_pipe_req.bits.miss_id := io.id
390  io.main_pipe_req.bits.miss_param := grant_param
391  io.main_pipe_req.bits.miss_dirty := isDirty
392  io.main_pipe_req.bits.probe := false.B
393  io.main_pipe_req.bits.source := req.source
394  io.main_pipe_req.bits.cmd := req.cmd
395  io.main_pipe_req.bits.vaddr := req.vaddr
396  io.main_pipe_req.bits.addr := req.addr
397  io.main_pipe_req.bits.store_data := refill_data.asUInt
398  io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W)
399  io.main_pipe_req.bits.word_idx := req.word_idx
400  io.main_pipe_req.bits.amo_data := req.amo_data
401  io.main_pipe_req.bits.amo_mask := req.amo_mask
402  io.main_pipe_req.bits.id := req.id
403
404  io.block_addr.valid := req_valid && w_grantlast && !s_refill
405  io.block_addr.bits := req.addr
406
407  io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire())
408  io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr)
409  io.debug_early_replace.bits.tag := req.replace_tag
410
411  XSPerfAccumulate("miss_req_primary", primary_fire)
412  XSPerfAccumulate("miss_req_merged", secondary_fire)
413  XSPerfAccumulate("load_miss_penalty_to_use",
414    should_refill_data &&
415      BoolStopWatch(primary_fire, io.refill_to_ldq.valid, true)
416  )
417  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp))
418  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
419  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
420  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
421  XSPerfAccumulate("penalty_from_grant_to_refill", !s_refill && w_grantlast)
422  XSPerfAccumulate("soft_prefetch_number", primary_fire && io.req.bits.source === SOFT_PREFETCH.U)
423
424  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(primary_fire), release_entry)
425  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
426  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
427
428  val load_miss_begin = primary_fire && io.req.bits.isLoad
429  val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data
430  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
431  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
432  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
433
434  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done)
435  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
436  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
437}
438
439class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
440  val io = IO(new Bundle {
441    val hartId = Input(UInt(8.W))
442    val req = Flipped(DecoupledIO(new MissReq))
443    val refill_to_ldq = ValidIO(new Refill)
444
445    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
446    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
447    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
448
449    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
450
451    val replace_pipe_req = DecoupledIO(new MainPipeReq)
452    val replace_pipe_resp = Flipped(ValidIO(UInt(log2Up(cfg.nMissEntries).W)))
453
454    val main_pipe_req = DecoupledIO(new MainPipeReq)
455    val main_pipe_resp = Flipped(ValidIO(new AtomicsResp))
456
457    // block probe
458    val probe_addr = Input(UInt(PAddrBits.W))
459    val probe_block = Output(Bool())
460
461    val full = Output(Bool())
462
463    // only for performance counter
464    // This is valid when an mshr has finished replacing a block (w_replace_resp),
465    // but hasn't received Grant from L2 (!w_grantlast)
466    val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() {
467      // info about the block that has been replaced
468      val idx = UInt(idxBits.W) // vaddr
469      val tag = UInt(tagBits.W) // paddr
470    }))
471  })
472
473  // 128KBL1: FIXME: provide vaddr for l2
474
475  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
476
477  val primary_ready_vec = entries.map(_.io.primary_ready)
478  val secondary_ready_vec = entries.map(_.io.secondary_ready)
479  val secondary_reject_vec = entries.map(_.io.secondary_reject)
480  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
481
482  val merge = Cat(secondary_ready_vec).orR
483  val reject = Cat(secondary_reject_vec).orR
484  val alloc = !reject && !merge && Cat(primary_ready_vec).orR
485  val accept = alloc || merge
486
487  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U))
488//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
489  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
490  // That is, a coming req has the same paddr as that of mshr_0 (merge),
491  // while it has the same set and the same way as mshr_1 (reject).
492  // In this situation, the coming req should be merged by mshr_0
493//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
494
495  def select_valid_one[T <: Bundle](
496    in: Seq[DecoupledIO[T]],
497    out: DecoupledIO[T],
498    name: Option[String] = None): Unit = {
499
500    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
501    out.valid := Cat(in.map(_.valid)).orR
502    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
503    in.map(_.ready := out.ready)
504    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
505  }
506
507  io.mem_grant.ready := false.B
508
509  entries.zipWithIndex.foreach {
510    case (e, i) =>
511      val former_primary_ready = if(i == 0)
512        false.B
513      else
514        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
515
516      e.io.id := i.U
517      e.io.req.valid := io.req.valid
518      e.io.primary_valid := io.req.valid &&
519        !merge &&
520        !reject &&
521        !former_primary_ready &&
522        e.io.primary_ready
523      e.io.req.bits := io.req.bits
524
525      e.io.mem_grant.valid := false.B
526      e.io.mem_grant.bits := DontCare
527      when (io.mem_grant.bits.source === i.U) {
528        e.io.mem_grant <> io.mem_grant
529      }
530
531      e.io.replace_pipe_resp := io.replace_pipe_resp.valid && io.replace_pipe_resp.bits === i.U
532      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
533
534      io.debug_early_replace(i) := e.io.debug_early_replace
535  }
536
537  io.req.ready := accept
538  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
539  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
540
541  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
542  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
543
544  arbiter(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, Some("refill_pipe_req"))
545  arbiter(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req"))
546  arbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
547
548  io.probe_block := Cat(probe_block_vec).orR
549
550  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
551
552  if (env.EnableDifftest) {
553    val difftest = Module(new DifftestRefillEvent)
554    difftest.io.clock := clock
555    difftest.io.coreid := io.hartId
556    difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
557    difftest.io.addr := io.refill_to_ldq.bits.addr
558    difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data)
559  }
560
561  XSPerfAccumulate("miss_req", io.req.fire())
562  XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc)
563  XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad)
564  XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad)
565  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
566  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
567  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
568  when (num_valids > max_inflight) {
569    max_inflight := num_valids
570  }
571  // max inflight (average) = max_inflight_total / cycle cnt
572  XSPerfAccumulate("max_inflight", max_inflight)
573  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
574  io.full := num_valids === cfg.nMissEntries.U
575  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
576  val perfinfo = IO(new Bundle(){
577    val perfEvents = Output(new PerfEventsBundle(5))
578  })
579  val perfEvents = Seq(
580    ("dcache_missq_req          ", io.req.fire()                                                                                                                                                                       ),
581    ("dcache_missq_1/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) < (cfg.nMissEntries.U/4.U))                                                                                              ),
582    ("dcache_missq_2/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/4.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U/2.U))    ),
583    ("dcache_missq_3/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/2.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U*3.U/4.U))),
584    ("dcache_missq_4/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U*3.U/4.U))                                                                                          ),
585  )
586
587  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
588    perf_out.incr_step := RegNext(perf)
589  }
590}
591