xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision 5668a921eb594c3ea72da43594b3fb54e05959a3)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import freechips.rocketchip.tilelink._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import difftest._
28import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey}
29
30class MissReq(implicit p: Parameters) extends DCacheBundle {
31  val source = UInt(sourceTypeWidth.W)
32  val cmd = UInt(M_SZ.W)
33  val addr = UInt(PAddrBits.W)
34  val vaddr = UInt(VAddrBits.W)
35  val way_en = UInt(DCacheWays.W)
36
37  // store
38  val store_data = UInt((cfg.blockBytes * 8).W)
39  val store_mask = UInt(cfg.blockBytes.W)
40
41  // which word does amo work on?
42  val word_idx = UInt(log2Up(blockWords).W)
43  val amo_data = UInt(DataBits.W)
44  val amo_mask = UInt((DataBits / 8).W)
45
46  val req_coh = new ClientMetadata
47  val replace_coh = new ClientMetadata
48  val replace_tag = UInt(tagBits.W)
49  val id = UInt(reqIdWidth.W)
50
51  def isLoad = source === LOAD_SOURCE.U
52  def isStore = source === STORE_SOURCE.U
53  def isAMO = source === AMO_SOURCE.U
54  def hit = req_coh.isValid()
55}
56
57class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
58  val io = IO(new Bundle() {
59    // MSHR ID
60    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
61    // client requests
62    // allocate this entry for new req
63    val primary_valid = Input(Bool())
64    // this entry is free and can be allocated to new reqs
65    val primary_ready = Output(Bool())
66    // this entry is busy, but it can merge the new req
67    val secondary_ready = Output(Bool())
68    // this entry is busy and it can not merge the new req
69    val secondary_reject = Output(Bool())
70    val req    = Flipped(ValidIO(new MissReq))
71    val refill_to_ldq = ValidIO(new Refill)
72    // TODO: bypass refill data to load pipe
73
74    // bus
75    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
76    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
77    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
78
79    // refill pipe
80    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
81
82    // replace pipe
83    val replace_pipe_req = DecoupledIO(new ReplacePipeReq)
84    val replace_pipe_resp = Input(Bool())
85
86    // main pipe: amo miss
87    val main_pipe_req = DecoupledIO(new MainPipeReq)
88    val main_pipe_resp = Input(Bool())
89
90    val block_addr = ValidIO(UInt(PAddrBits.W))
91
92    val debug_early_replace = ValidIO(new Bundle() {
93      // info about the block that has been replaced
94      val idx = UInt(idxBits.W) // vaddr
95      val tag = UInt(tagBits.W) // paddr
96    })
97  })
98
99  assert(!RegNext(io.primary_valid && !io.primary_ready))
100
101  val req = Reg(new MissReq)
102  val req_valid = RegInit(false.B)
103  val set = addr_to_dcache_set(req.vaddr)
104
105  val s_acquire = RegInit(true.B)
106  val s_grantack = RegInit(true.B)
107  val s_replace_req = RegInit(true.B)
108  val s_refill = RegInit(true.B)
109  val s_mainpipe_req = RegInit(true.B)
110
111  val w_grantfirst = RegInit(true.B)
112  val w_grantlast = RegInit(true.B)
113  val w_replace_resp = RegInit(true.B)
114  val w_mainpipe_resp = RegInit(true.B)
115
116  val release_entry = s_grantack && s_refill && w_mainpipe_resp
117
118  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
119  val data_not_refilled = !w_grantlast
120
121  val should_refill_data_reg =  Reg(Bool())
122  val should_refill_data = WireInit(should_refill_data_reg)
123
124  val full_overwrite = req.isStore && req.store_mask.andR
125
126  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
127  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
128
129  val grant_beats = RegInit(0.U(beatBits.W))
130
131  when (io.req.valid && io.primary_ready && io.primary_valid) {
132    req_valid := true.B
133    req := io.req.bits
134    req.addr := get_block_addr(io.req.bits.addr)
135
136    s_acquire := false.B
137    s_grantack := false.B
138
139    w_grantfirst := false.B
140    w_grantlast := false.B
141
142    when (!io.req.bits.isAMO) {
143      s_refill := false.B
144    }
145
146    when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) {
147      s_replace_req := false.B
148      w_replace_resp := false.B
149    }
150
151    when (io.req.bits.isAMO) {
152      s_mainpipe_req := false.B
153      w_mainpipe_resp := false.B
154    }
155
156    should_refill_data_reg := io.req.bits.isLoad
157    grant_beats := 0.U
158  }.elsewhen (release_entry) {
159    req_valid := false.B
160  }
161
162  when (io.req.valid && io.secondary_ready) {
163    assert(io.req.bits.req_coh.state <= req.req_coh.state)
164    assert(!(io.req.bits.isAMO || req.isAMO))
165    // use the most uptodate meta
166    req.req_coh := io.req.bits.req_coh
167
168    when (io.req.bits.isStore) {
169      req := io.req.bits
170      req.addr := get_block_addr(io.req.bits.addr)
171      req.way_en := req.way_en
172      req.replace_coh := req.replace_coh
173      req.replace_tag := req.replace_tag
174    }
175
176    should_refill_data := should_refill_data_reg || io.req.bits.isLoad
177    should_refill_data_reg := should_refill_data
178  }
179
180  when (io.mem_acquire.fire()) {
181    s_acquire := true.B
182  }
183
184  val refill_data = Reg(Vec(blockRows, UInt(rowBits.W)))
185  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
186  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
187  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
188  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
189    val full_wmask = FillInterleaved(8, wmask)
190    (~full_wmask & old_data | full_wmask & new_data)
191  }
192  for (i <- 0 until blockRows) {
193    new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
194    // we only need to merge data for Store
195    new_mask(i) := Mux(req.isStore, req.store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
196  }
197  val hasData = RegInit(true.B)
198  val isDirty = RegInit(false.B)
199  when (io.mem_grant.fire()) {
200    w_grantfirst := true.B
201    grant_param := io.mem_grant.bits.param
202    when (edge.hasData(io.mem_grant.bits)) {
203      // GrantData
204      for (i <- 0 until beatRows) {
205        val idx = (refill_count << log2Floor(beatRows)) + i.U
206        val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
207        refill_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
208      }
209      w_grantlast := w_grantlast || refill_done
210      hasData := true.B
211      grant_beats := grant_beats + 1.U
212    }.otherwise {
213      // Grant
214      assert(full_overwrite)
215      for (i <- 0 until blockRows) {
216        refill_data(i) := new_data(i)
217      }
218      w_grantlast := true.B
219      hasData := false.B
220    }
221
222    refill_data_raw(refill_count) := io.mem_grant.bits.data
223    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
224  }
225
226  when (io.mem_finish.fire()) {
227    s_grantack := true.B
228  }
229
230  when (io.replace_pipe_req.fire()) {
231    s_replace_req := true.B
232  }
233
234  when (io.replace_pipe_resp) {
235    w_replace_resp := true.B
236  }
237
238  when (io.refill_pipe_req.fire()) {
239    s_refill := true.B
240  }
241
242  when (io.main_pipe_req.fire()) {
243    s_mainpipe_req := true.B
244  }
245
246  when (io.main_pipe_resp) {
247    w_mainpipe_resp := true.B
248  }
249
250  def before_read_sent_can_merge(new_req: MissReq): Bool = {
251    acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore)
252  }
253
254  def before_data_refill_can_merge(new_req: MissReq): Bool = {
255    data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad
256  }
257
258  def should_merge(new_req: MissReq): Bool = {
259    val block_match = req.addr === get_block_addr(new_req.addr)
260    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
261    block_match &&
262    (before_read_sent_can_merge(new_req) ||
263      beat_match && before_data_refill_can_merge(new_req))
264  }
265
266  def should_reject(new_req: MissReq): Bool = {
267    val block_match = req.addr === get_block_addr(new_req.addr)
268    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
269    val set_match = set === addr_to_dcache_set(new_req.vaddr)
270
271    req_valid &&
272      Mux(
273        block_match,
274        !before_read_sent_can_merge(new_req) &&
275          !(beat_match && before_data_refill_can_merge(new_req)),
276        set_match && new_req.way_en === req.way_en
277      )
278  }
279
280  io.primary_ready := !req_valid
281  io.secondary_ready := should_merge(io.req.bits)
282  io.secondary_reject := should_reject(io.req.bits)
283
284  // should not allocate, merge or reject at the same time
285  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
286
287  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
288    val data = refill_data.asUInt
289    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
290  })))
291  io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data
292  io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits))
293  io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count))
294  io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire())
295  io.refill_to_ldq.bits.hasdata := hasData
296  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
297
298  io.mem_acquire.valid := !s_acquire
299  val grow_param = req.req_coh.onAccess(req.cmd)._2
300  val acquireBlock = edge.AcquireBlock(
301    fromSource = io.id,
302    toAddress = req.addr,
303    lgSize = (log2Up(cfg.blockBytes)).U,
304    growPermissions = grow_param
305  )._2
306  val acquirePerm = edge.AcquirePerm(
307    fromSource = io.id,
308    toAddress = req.addr,
309    lgSize = (log2Up(cfg.blockBytes)).U,
310    growPermissions = grow_param
311  )._2
312  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
313  // resolve cache alias by L2
314  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
315  // trigger prefetch
316  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B)
317  // prefer not to cache data in L2 by default
318  io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B)
319  require(nSets <= 256)
320
321  io.mem_grant.ready := !w_grantlast && s_acquire
322
323  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
324  assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits)))
325  io.mem_finish.valid := !s_grantack && w_grantfirst
326  io.mem_finish.bits := grantack
327
328  io.replace_pipe_req.valid := !s_replace_req
329  val replace = io.replace_pipe_req.bits
330  replace.miss_id := io.id
331  replace.way_en := req.way_en
332  replace.vaddr := req.vaddr
333  replace.tag := req.replace_tag
334
335  io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast
336  val refill = io.refill_pipe_req.bits
337  refill.source := req.source
338  refill.addr := req.addr
339  refill.way_en := req.way_en
340  refill.wmask := Mux(
341    hasData || req.isLoad,
342    ~0.U(DCacheBanks.W),
343    VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req.store_mask).orR)).asUInt
344  )
345  refill.data := refill_data.asTypeOf((new RefillPipeReq).data)
346  refill.miss_id := io.id
347  refill.id := req.id
348  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
349    val c = categorize(cmd)
350    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
351      //(effect param) -> (next)
352      Cat(rd, toB, false.B)  -> Branch,
353      Cat(rd, toB, true.B)   -> Branch,
354      Cat(rd, toT, false.B)  -> Trunk,
355      Cat(rd, toT, true.B)   -> Dirty,
356      Cat(wi, toT, false.B)  -> Trunk,
357      Cat(wi, toT, true.B)   -> Dirty,
358      Cat(wr, toT, false.B)  -> Dirty,
359      Cat(wr, toT, true.B)   -> Dirty))
360  }
361  refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty))
362  refill.alias := req.vaddr(13, 12) // TODO
363
364  io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast
365  io.main_pipe_req.bits := DontCare
366  io.main_pipe_req.bits.miss := true.B
367  io.main_pipe_req.bits.miss_id := io.id
368  io.main_pipe_req.bits.miss_param := grant_param
369  io.main_pipe_req.bits.miss_dirty := isDirty
370  io.main_pipe_req.bits.probe := false.B
371  io.main_pipe_req.bits.source := req.source
372  io.main_pipe_req.bits.cmd := req.cmd
373  io.main_pipe_req.bits.vaddr := req.vaddr
374  io.main_pipe_req.bits.addr := req.addr
375  io.main_pipe_req.bits.store_data := refill_data.asUInt
376  io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W)
377  io.main_pipe_req.bits.word_idx := req.word_idx
378  io.main_pipe_req.bits.amo_data := req.amo_data
379  io.main_pipe_req.bits.amo_mask := req.amo_mask
380  io.main_pipe_req.bits.id := req.id
381
382  io.block_addr.valid := req_valid && w_grantlast && !s_refill
383  io.block_addr.bits := req.addr
384
385  io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire())
386  io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr)
387  io.debug_early_replace.bits.tag := req.replace_tag
388
389  XSPerfAccumulate("miss_req_primary", io.req.valid && io.primary_ready && io.primary_valid)
390  XSPerfAccumulate("miss_req_merged", io.req.valid && io.secondary_ready)
391  XSPerfAccumulate("load_miss_penalty_to_use",
392    should_refill_data &&
393      BoolStopWatch(io.req.valid && io.primary_ready && io.primary_valid, io.refill_to_ldq.valid, true)
394  )
395  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp))
396  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
397  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
398  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
399  XSPerfAccumulate("penalty_from_grant_to_refill", !s_refill && w_grantlast)
400  XSPerfAccumulate("soft_prefetch_number", io.req.valid && io.primary_ready && io.primary_valid && io.req.bits.source === SOFT_PREFETCH.U)
401
402  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(io.req.valid && io.primary_ready && io.primary_valid), release_entry)
403  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 20, 1, true, true)
404  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 20, 100, 10, true, false)
405
406  val load_miss_begin = io.req.valid && io.primary_ready && io.primary_valid && io.req.bits.isLoad
407  val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data
408  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
409  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 20, 1, true, true)
410  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 20, 100, 10, true, false)
411
412  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done)
413  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 20, 1, true, true)
414  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 20, 100, 10, true, false)
415}
416
417class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
418  val io = IO(new Bundle {
419    val hartId = Input(UInt(8.W))
420    val req = Flipped(DecoupledIO(new MissReq))
421    val refill_to_ldq = ValidIO(new Refill)
422
423    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
424    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
425    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
426
427    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
428
429    val replace_pipe_req = DecoupledIO(new ReplacePipeReq)
430    val replace_pipe_resp = Flipped(Vec(numReplaceRespPorts, ValidIO(new ReplacePipeResp)))
431
432    val main_pipe_req = DecoupledIO(new MainPipeReq)
433    val main_pipe_resp = Flipped(ValidIO(new AtomicsResp))
434
435    // block probe
436    val probe_addr = Input(UInt(PAddrBits.W))
437    val probe_block = Output(Bool())
438
439    val full = Output(Bool())
440
441    // only for performance counter
442    // This is valid when an mshr has finished replacing a block (w_replace_resp),
443    // but hasn't received Grant from L2 (!w_grantlast)
444    val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() {
445      // info about the block that has been replaced
446      val idx = UInt(idxBits.W) // vaddr
447      val tag = UInt(tagBits.W) // paddr
448    }))
449  })
450
451  // 128KBL1: FIXME: provide vaddr for l2
452
453  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
454
455  val primary_ready_vec = entries.map(_.io.primary_ready)
456  val secondary_ready_vec = entries.map(_.io.secondary_ready)
457  val secondary_reject_vec = entries.map(_.io.secondary_reject)
458  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
459
460  val merge = Cat(secondary_ready_vec).orR
461  // val merge_idx = PriorityEncoder(secondary_ready_vec)
462
463  val reject = Cat(secondary_reject_vec).orR
464
465  val alloc = !reject && !merge && Cat(primary_ready_vec).orR
466  // val alloc_idx = PriorityEncoder(primary_ready_vec)
467
468  val accept = alloc || merge
469  // val entry_idx = Mux(alloc, alloc_idx, merge_idx)
470
471  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U))
472//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
473  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
474  // That is, a coming req has the same paddr as that of mshr_0 (merge),
475  // while it has the same set and the same way as mshr_1 (reject).
476  // In this situation, the coming req should be merged by mshr_0
477//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
478
479  def arbiter[T <: Bundle](
480    in: Seq[DecoupledIO[T]],
481    out: DecoupledIO[T],
482    name: Option[String] = None): Unit = {
483    val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size))
484    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
485    for ((a, req) <- arb.io.in.zip(in)) {
486      a <> req
487    }
488    out <> arb.io.out
489  }
490
491  def rrArbiter[T <: Bundle](
492    in: Seq[DecoupledIO[T]],
493    out: DecoupledIO[T],
494    name: Option[String] = None): Unit = {
495    val arb = Module(new RRArbiter[T](chiselTypeOf(out.bits), in.size))
496    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
497    for ((a, req) <- arb.io.in.zip(in)) {
498      a <> req
499    }
500    out <> arb.io.out
501  }
502
503  def select_valid_one[T <: Bundle](
504    in: Seq[DecoupledIO[T]],
505    out: DecoupledIO[T],
506    name: Option[String] = None): Unit = {
507
508    if (name.nonEmpty) { out.suggestName(s"${name.get}_select") }
509    out.valid := Cat(in.map(_.valid)).orR
510    out.bits := ParallelMux(in.map(_.valid) zip in.map(_.bits))
511    in.map(_.ready := out.ready)
512    assert(!RegNext(out.valid && PopCount(Cat(in.map(_.valid))) > 1.U))
513  }
514
515  io.mem_grant.ready := false.B
516
517  entries.zipWithIndex.foreach {
518    case (e, i) =>
519      val former_primary_ready = if(i == 0)
520        false.B
521      else
522        Cat((0 until i).map(j => entries(j).io.primary_ready)).orR
523
524      e.io.id := i.U
525      e.io.req.valid := io.req.valid
526      e.io.primary_valid := io.req.valid &&
527        !merge &&
528        !reject &&
529        !former_primary_ready &&
530        e.io.primary_ready
531      e.io.req.bits := io.req.bits
532
533      e.io.mem_grant.valid := false.B
534      e.io.mem_grant.bits := DontCare
535      when (io.mem_grant.bits.source === i.U) {
536        e.io.mem_grant <> io.mem_grant
537      }
538
539      e.io.replace_pipe_resp := Cat(io.replace_pipe_resp.map { case r => r.valid && r.bits.miss_id === i.U }).orR
540      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
541
542      io.debug_early_replace(i) := e.io.debug_early_replace
543  }
544
545  io.req.ready := accept
546  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
547  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
548
549  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
550  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
551
552  arbiter(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, Some("refill_pipe_req"))
553  arbiter(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req"))
554  arbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
555
556  io.probe_block := Cat(probe_block_vec).orR
557
558  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
559
560  if (env.EnableDifftest) {
561    val difftest = Module(new DifftestRefillEvent)
562    difftest.io.clock := clock
563    difftest.io.coreid := io.hartId
564    difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
565    difftest.io.addr := io.refill_to_ldq.bits.addr
566    difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data)
567  }
568
569  XSPerfAccumulate("miss_req", io.req.fire())
570  XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc)
571  XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad)
572  XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad)
573  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
574  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
575  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
576  when (num_valids > max_inflight) {
577    max_inflight := num_valids
578  }
579  // max inflight (average) = max_inflight_total / cycle cnt
580  XSPerfAccumulate("max_inflight", max_inflight)
581  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
582  io.full := num_valids === cfg.nMissEntries.U
583  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
584  val perfinfo = IO(new Bundle(){
585    val perfEvents = Output(new PerfEventsBundle(5))
586  })
587  val perfEvents = Seq(
588    ("dcache_missq_req          ", io.req.fire()                                                                                                                                                                       ),
589    ("dcache_missq_1/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) < (cfg.nMissEntries.U/4.U))                                                                                              ),
590    ("dcache_missq_2/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/4.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U/2.U))    ),
591    ("dcache_missq_3/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/2.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U*3.U/4.U))),
592    ("dcache_missq_4/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U*3.U/4.U))                                                                                          ),
593  )
594
595  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
596    perf_out.incr_step := RegNext(perf)
597  }
598}
599