xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MissQueue.scala (revision a273862e37f1d43bee748f2a6353320a2f52f6f4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import freechips.rocketchip.tilelink._
24import freechips.rocketchip.tilelink.ClientStates._
25import freechips.rocketchip.tilelink.MemoryOpCategories._
26import freechips.rocketchip.tilelink.TLPermissions._
27import bus.tilelink.TLMessages._
28import difftest._
29import huancun.{AliasKey, DirtyKey, PreferCacheKey, PrefetchKey}
30
31class MissReq(implicit p: Parameters) extends DCacheBundle {
32  val source = UInt(sourceTypeWidth.W)
33  val cmd = UInt(M_SZ.W)
34  val addr = UInt(PAddrBits.W)
35  val vaddr = UInt(VAddrBits.W)
36  val way_en = UInt(DCacheWays.W)
37
38  // store
39  val store_data = UInt((cfg.blockBytes * 8).W)
40  val store_mask = UInt(cfg.blockBytes.W)
41
42  // which word does amo work on?
43  val word_idx = UInt(log2Up(blockWords).W)
44  val amo_data = UInt(DataBits.W)
45  val amo_mask = UInt((DataBits / 8).W)
46
47  val req_coh = new ClientMetadata
48  val replace_coh = new ClientMetadata
49  val replace_tag = UInt(tagBits.W)
50  val id = UInt(reqIdWidth.W)
51
52  def isLoad = source === LOAD_SOURCE.U
53  def isStore = source === STORE_SOURCE.U
54  def isAMO = source === AMO_SOURCE.U
55  def hit = req_coh.isValid()
56}
57
58class MissEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
59  val io = IO(new Bundle() {
60    // MSHR ID
61    val id = Input(UInt(log2Up(cfg.nMissEntries).W))
62    // client requests
63    // this entry is free and can be allocated to new reqs
64    val primary_ready = Output(Bool())
65    // this entry is busy, but it can merge the new req
66    val secondary_ready = Output(Bool())
67    // this entry is busy and it can not merge the new req
68    val secondary_reject = Output(Bool())
69    val req    = Flipped(ValidIO(new MissReq))
70    val refill_to_ldq = ValidIO(new Refill)
71    // TODO: bypass refill data to load pipe
72
73    // bus
74    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
75    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
76    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
77
78    // refill pipe
79    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
80
81    // replace pipe
82    val replace_pipe_req = DecoupledIO(new ReplacePipeReq)
83    val replace_pipe_resp = Input(Bool())
84
85    // main pipe: amo miss
86    val main_pipe_req = DecoupledIO(new MainPipeReq)
87    val main_pipe_resp = Input(Bool())
88
89    val block_addr = ValidIO(UInt(PAddrBits.W))
90
91    val debug_early_replace = ValidIO(new Bundle() {
92      // info about the block that has been replaced
93      val idx = UInt(idxBits.W) // vaddr
94      val tag = UInt(tagBits.W) // paddr
95    })
96  })
97
98  val req = Reg(new MissReq)
99  val req_valid = RegInit(false.B)
100  val set = addr_to_dcache_set(req.vaddr)
101
102  val s_acquire = RegInit(true.B)
103  val s_grantack = RegInit(true.B)
104  val s_replace_req = RegInit(true.B)
105  val s_refill = RegInit(true.B)
106  val s_mainpipe_req = RegInit(true.B)
107
108  val w_grantfirst = RegInit(true.B)
109  val w_grantlast = RegInit(true.B)
110  val w_replace_resp = RegInit(true.B)
111  val w_mainpipe_resp = RegInit(true.B)
112
113  val release_entry = s_grantack && s_refill && w_mainpipe_resp
114
115  val acquire_not_sent = !s_acquire && !io.mem_acquire.ready
116  val data_not_refilled = !w_grantlast
117
118  val should_refill_data_reg =  Reg(Bool())
119  val should_refill_data = WireInit(should_refill_data_reg)
120
121  val full_overwrite = req.isStore && req.store_mask.andR
122
123  val (_, _, refill_done, refill_count) = edge.count(io.mem_grant)
124  val grant_param = Reg(UInt(TLPermissions.bdWidth.W))
125
126  val grant_beats = RegInit(0.U(beatBits.W))
127
128  when (io.req.valid && io.primary_ready) {
129    req_valid := true.B
130    req := io.req.bits
131    req.addr := get_block_addr(io.req.bits.addr)
132
133    s_acquire := false.B
134    s_grantack := false.B
135
136    w_grantfirst := false.B
137    w_grantlast := false.B
138
139    when (!io.req.bits.isAMO) {
140      s_refill := false.B
141    }
142
143    when (!io.req.bits.hit && io.req.bits.replace_coh.isValid() && !io.req.bits.isAMO) {
144      s_replace_req := false.B
145      w_replace_resp := false.B
146    }
147
148    when (io.req.bits.isAMO) {
149      s_mainpipe_req := false.B
150      w_mainpipe_resp := false.B
151    }
152
153    should_refill_data_reg := io.req.bits.isLoad
154    grant_beats := 0.U
155  }.elsewhen (release_entry) {
156    req_valid := false.B
157  }
158
159  when (io.req.valid && io.secondary_ready) {
160    assert(io.req.bits.req_coh.state <= req.req_coh.state)
161    assert(!(io.req.bits.isAMO || req.isAMO))
162    // use the most uptodate meta
163    req.req_coh := io.req.bits.req_coh
164
165    when (io.req.bits.isStore) {
166      req := io.req.bits
167      req.addr := get_block_addr(io.req.bits.addr)
168      req.way_en := req.way_en
169      req.replace_coh := req.replace_coh
170      req.replace_tag := req.replace_tag
171    }
172
173    should_refill_data := should_refill_data_reg || io.req.bits.isLoad
174    should_refill_data_reg := should_refill_data
175  }
176
177  when (io.mem_acquire.fire()) {
178    s_acquire := true.B
179  }
180
181  val refill_data = Reg(Vec(blockRows, UInt(rowBits.W)))
182  val refill_data_raw = Reg(Vec(blockBytes/beatBytes, UInt(beatBits.W)))
183  val new_data = Wire(Vec(blockRows, UInt(rowBits.W)))
184  val new_mask = Wire(Vec(blockRows, UInt(rowBytes.W)))
185  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
186    val full_wmask = FillInterleaved(8, wmask)
187    (~full_wmask & old_data | full_wmask & new_data)
188  }
189  for (i <- 0 until blockRows) {
190    new_data(i) := req.store_data(rowBits * (i + 1) - 1, rowBits * i)
191    // we only need to merge data for Store
192    new_mask(i) := Mux(req.isStore, req.store_mask(rowBytes * (i + 1) - 1, rowBytes * i), 0.U)
193  }
194  val hasData = RegInit(true.B)
195  val isDirty = RegInit(false.B)
196  when (io.mem_grant.fire()) {
197    w_grantfirst := true.B
198    grant_param := io.mem_grant.bits.param
199    when (edge.hasData(io.mem_grant.bits)) {
200      // GrantData
201      for (i <- 0 until beatRows) {
202        val idx = (refill_count << log2Floor(beatRows)) + i.U
203        val grant_row = io.mem_grant.bits.data(rowBits * (i + 1) - 1, rowBits * i)
204        refill_data(idx) := mergePutData(grant_row, new_data(idx), new_mask(idx))
205      }
206      w_grantlast := w_grantlast || refill_done
207      hasData := true.B
208      grant_beats := grant_beats + 1.U
209    }.otherwise {
210      // Grant
211      assert(full_overwrite)
212      for (i <- 0 until blockRows) {
213        refill_data(i) := new_data(i)
214      }
215      w_grantlast := true.B
216      hasData := false.B
217    }
218
219    refill_data_raw(refill_count) := io.mem_grant.bits.data
220    isDirty := io.mem_grant.bits.echo.lift(DirtyKey).getOrElse(false.B)
221  }
222
223  when (io.mem_finish.fire()) {
224    s_grantack := true.B
225  }
226
227  when (io.replace_pipe_req.fire()) {
228    s_replace_req := true.B
229  }
230
231  when (io.replace_pipe_resp) {
232    w_replace_resp := true.B
233  }
234
235  when (io.refill_pipe_req.fire()) {
236    s_refill := true.B
237  }
238
239  when (io.main_pipe_req.fire()) {
240    s_mainpipe_req := true.B
241  }
242
243  when (io.main_pipe_resp) {
244    w_mainpipe_resp := true.B
245  }
246
247  def before_read_sent_can_merge(new_req: MissReq): Bool = {
248    acquire_not_sent && req.isLoad && (new_req.isLoad || new_req.isStore)
249  }
250
251  def before_data_refill_can_merge(new_req: MissReq): Bool = {
252    data_not_refilled && (req.isLoad || req.isStore) && new_req.isLoad
253  }
254
255  def should_merge(new_req: MissReq): Bool = {
256    val block_match = req.addr === get_block_addr(new_req.addr)
257    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
258    block_match &&
259    (before_read_sent_can_merge(new_req) ||
260      beat_match && before_data_refill_can_merge(new_req))
261  }
262
263  def should_reject(new_req: MissReq): Bool = {
264    val block_match = req.addr === get_block_addr(new_req.addr)
265    val beat_match = new_req.addr(blockOffBits - 1, beatOffBits) >= grant_beats
266    val set_match = set === addr_to_dcache_set(new_req.vaddr)
267
268    req_valid &&
269      Mux(
270        block_match,
271        !before_read_sent_can_merge(new_req) &&
272          !(beat_match && before_data_refill_can_merge(new_req)),
273        set_match && new_req.way_en === req.way_en
274      )
275  }
276
277  io.primary_ready := !req_valid
278  io.secondary_ready := should_merge(io.req.bits)
279  io.secondary_reject := should_reject(io.req.bits)
280
281  // should not allocate, merge or reject at the same time
282  assert(RegNext(PopCount(Seq(io.primary_ready, io.secondary_ready, io.secondary_reject)) <= 1.U))
283
284  val refill_data_splited = WireInit(VecInit(Seq.tabulate(cfg.blockBytes * 8 / l1BusDataWidth)(i => {
285    val data = refill_data.asUInt
286    data((i + 1) * l1BusDataWidth - 1, i * l1BusDataWidth)
287  })))
288  io.refill_to_ldq.valid := RegNext(!w_grantlast && io.mem_grant.fire()) && should_refill_data
289  io.refill_to_ldq.bits.addr := RegNext(req.addr + (refill_count << refillOffBits))
290  io.refill_to_ldq.bits.data := refill_data_splited(RegNext(refill_count))
291  io.refill_to_ldq.bits.refill_done := RegNext(refill_done && io.mem_grant.fire())
292  io.refill_to_ldq.bits.hasdata := hasData
293  io.refill_to_ldq.bits.data_raw := refill_data_raw.asUInt
294
295  io.mem_acquire.valid := !s_acquire
296  val grow_param = req.req_coh.onAccess(req.cmd)._2
297  val acquireBlock = edge.AcquireBlock(
298    fromSource = io.id,
299    toAddress = req.addr,
300    lgSize = (log2Up(cfg.blockBytes)).U,
301    growPermissions = grow_param
302  )._2
303  val acquirePerm = edge.AcquirePerm(
304    fromSource = io.id,
305    toAddress = req.addr,
306    lgSize = (log2Up(cfg.blockBytes)).U,
307    growPermissions = grow_param
308  )._2
309  io.mem_acquire.bits := Mux(full_overwrite, acquirePerm, acquireBlock)
310  // resolve cache alias by L2
311  io.mem_acquire.bits.user.lift(AliasKey).foreach( _ := req.vaddr(13, 12))
312  // trigger prefetch
313  io.mem_acquire.bits.user.lift(PrefetchKey).foreach(_ := true.B)
314  // prefer not to cache data in L2 by default
315  io.mem_acquire.bits.user.lift(PreferCacheKey).foreach(_ := false.B)
316  require(nSets <= 256)
317
318  io.mem_grant.ready := !w_grantlast && s_acquire
319
320  val grantack = RegEnable(edge.GrantAck(io.mem_grant.bits), io.mem_grant.fire())
321  assert(RegNext(!io.mem_grant.fire() || edge.isRequest(io.mem_grant.bits)))
322  io.mem_finish.valid := !s_grantack && w_grantfirst
323  io.mem_finish.bits := grantack
324
325  io.replace_pipe_req.valid := !s_replace_req
326  val replace = io.replace_pipe_req.bits
327  replace.miss_id := io.id
328  replace.way_en := req.way_en
329  replace.vaddr := req.vaddr
330  replace.tag := req.replace_tag
331
332  io.refill_pipe_req.valid := !s_refill && w_replace_resp && w_grantlast
333  val refill = io.refill_pipe_req.bits
334  refill.source := req.source
335  refill.addr := req.addr
336  refill.way_en := req.way_en
337  refill.wmask := Mux(
338    hasData || req.isLoad,
339    ~0.U(DCacheBanks.W),
340    VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, req.store_mask).orR)).asUInt
341  )
342  refill.data := refill_data.asTypeOf((new RefillPipeReq).data)
343  refill.miss_id := io.id
344  refill.id := req.id
345  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
346    val c = categorize(cmd)
347    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
348      //(effect param) -> (next)
349      Cat(rd, toB, false.B)  -> Branch,
350      Cat(rd, toB, true.B)   -> Branch,
351      Cat(rd, toT, false.B)  -> Trunk,
352      Cat(rd, toT, true.B)   -> Dirty,
353      Cat(wi, toT, false.B)  -> Trunk,
354      Cat(wi, toT, true.B)   -> Dirty,
355      Cat(wr, toT, false.B)  -> Dirty,
356      Cat(wr, toT, true.B)   -> Dirty))
357  }
358  refill.meta.coh := ClientMetadata(missCohGen(req.cmd, grant_param, isDirty))
359  refill.alias := req.vaddr(13, 12) // TODO
360
361  io.main_pipe_req.valid := !s_mainpipe_req && w_grantlast
362  io.main_pipe_req.bits := DontCare
363  io.main_pipe_req.bits.miss := true.B
364  io.main_pipe_req.bits.miss_id := io.id
365  io.main_pipe_req.bits.miss_param := grant_param
366  io.main_pipe_req.bits.miss_dirty := isDirty
367  io.main_pipe_req.bits.probe := false.B
368  io.main_pipe_req.bits.source := req.source
369  io.main_pipe_req.bits.cmd := req.cmd
370  io.main_pipe_req.bits.vaddr := req.vaddr
371  io.main_pipe_req.bits.addr := req.addr
372  io.main_pipe_req.bits.store_data := refill_data.asUInt
373  io.main_pipe_req.bits.store_mask := ~0.U(blockBytes.W)
374  io.main_pipe_req.bits.word_idx := req.word_idx
375  io.main_pipe_req.bits.amo_data := req.amo_data
376  io.main_pipe_req.bits.amo_mask := req.amo_mask
377  io.main_pipe_req.bits.id := req.id
378
379  io.block_addr.valid := req_valid && w_grantlast && !s_refill
380  io.block_addr.bits := req.addr
381
382  io.debug_early_replace.valid := BoolStopWatch(io.replace_pipe_resp, io.refill_pipe_req.fire())
383  io.debug_early_replace.bits.idx := addr_to_dcache_set(req.vaddr)
384  io.debug_early_replace.bits.tag := req.replace_tag
385
386  XSPerfAccumulate("miss_req_primary", io.req.valid && io.primary_ready)
387  XSPerfAccumulate("miss_req_merged", io.req.valid && io.secondary_ready)
388  XSPerfAccumulate("load_miss_penalty_to_use",
389    should_refill_data &&
390      BoolStopWatch(io.req.valid && io.primary_ready, io.refill_to_ldq.valid, true)
391  )
392  XSPerfAccumulate("main_pipe_penalty", BoolStopWatch(io.main_pipe_req.fire(), io.main_pipe_resp))
393  XSPerfAccumulate("penalty_blocked_by_channel_A", io.mem_acquire.valid && !io.mem_acquire.ready)
394  XSPerfAccumulate("penalty_waiting_for_channel_D", s_acquire && !w_grantlast && !io.mem_grant.valid)
395  XSPerfAccumulate("penalty_waiting_for_channel_E", io.mem_finish.valid && !io.mem_finish.ready)
396  XSPerfAccumulate("penalty_from_grant_to_refill", !s_refill && w_grantlast)
397  XSPerfAccumulate("soft_prefetch_number", io.req.valid && io.primary_ready && io.req.bits.source === SOFT_PREFETCH.U)
398
399  val (mshr_penalty_sample, mshr_penalty) = TransactionLatencyCounter(RegNext(io.req.valid && io.primary_ready), release_entry)
400  XSPerfHistogram("miss_penalty", mshr_penalty, mshr_penalty_sample, 0, 100, 10)
401
402  val load_miss_begin = io.req.valid && io.primary_ready && io.req.bits.isLoad
403  val refill_finished = RegNext(!w_grantlast && refill_done) && should_refill_data
404  val (load_miss_penalty_sample, load_miss_penalty) = TransactionLatencyCounter(load_miss_begin, refill_finished) // not real refill finish time
405  XSPerfHistogram("load_miss_penalty_to_use", load_miss_penalty, load_miss_penalty_sample, 0, 100, 10)
406
407  val (a_to_d_penalty_sample, a_to_d_penalty) = TransactionLatencyCounter(io.mem_acquire.fire(), io.mem_grant.fire() && refill_done)
408  XSPerfHistogram("a_to_d_penalty", a_to_d_penalty, a_to_d_penalty_sample, 0, 100, 10)
409}
410
411class MissQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule {
412  val io = IO(new Bundle {
413    val req = Flipped(DecoupledIO(new MissReq))
414    val refill_to_ldq = ValidIO(new Refill)
415
416    val mem_acquire = DecoupledIO(new TLBundleA(edge.bundle))
417    val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle)))
418    val mem_finish = DecoupledIO(new TLBundleE(edge.bundle))
419
420    val refill_pipe_req = DecoupledIO(new RefillPipeReq)
421
422    val replace_pipe_req = DecoupledIO(new ReplacePipeReq)
423    val replace_pipe_resp = Flipped(Vec(numReplaceRespPorts, ValidIO(new ReplacePipeResp)))
424
425    val main_pipe_req = DecoupledIO(new MainPipeReq)
426    val main_pipe_resp = Flipped(ValidIO(new AtomicsResp))
427
428    // block probe
429    val probe_addr = Input(UInt(PAddrBits.W))
430    val probe_block = Output(Bool())
431
432    val full = Output(Bool())
433
434    // only for performance counter
435    // This is valid when an mshr has finished replacing a block (w_replace_resp),
436    // but hasn't received Grant from L2 (!w_grantlast)
437    val debug_early_replace = Vec(cfg.nMissEntries, ValidIO(new Bundle() {
438      // info about the block that has been replaced
439      val idx = UInt(idxBits.W) // vaddr
440      val tag = UInt(tagBits.W) // paddr
441    }))
442  })
443
444  // 128KBL1: FIXME: provide vaddr for l2
445
446  val entries = Seq.fill(cfg.nMissEntries)(Module(new MissEntry(edge)))
447
448  val primary_ready_vec = entries.map(_.io.primary_ready)
449  val secondary_ready_vec = entries.map(_.io.secondary_ready)
450  val secondary_reject_vec = entries.map(_.io.secondary_reject)
451  val probe_block_vec = entries.map { case e => e.io.block_addr.valid && e.io.block_addr.bits === io.probe_addr }
452
453  val merge = Cat(secondary_ready_vec).orR
454  val merge_idx = PriorityEncoder(secondary_ready_vec)
455
456  val reject = Cat(secondary_reject_vec).orR
457
458  val alloc = !reject && !merge && Cat(primary_ready_vec).orR
459  val alloc_idx = PriorityEncoder(primary_ready_vec)
460
461  val accept = alloc || merge
462  val entry_idx = Mux(alloc, alloc_idx, merge_idx)
463
464  assert(RegNext(PopCount(secondary_ready_vec) <= 1.U))
465//  assert(RegNext(PopCount(secondary_reject_vec) <= 1.U))
466  // It is possible that one mshr wants to merge a req, while another mshr wants to reject it.
467  // That is, a coming req has the same paddr as that of mshr_0 (merge),
468  // while it has the same set and the same way as mshr_1 (reject).
469  // In this situation, the coming req should be merged by mshr_0
470//  assert(RegNext(PopCount(Seq(merge, reject)) <= 1.U))
471
472  def rrArbiter[T <: Bundle](
473    in: Seq[DecoupledIO[T]],
474    out: DecoupledIO[T],
475    name: Option[String] = None): Unit = {
476    val arb = Module(new RRArbiter[T](chiselTypeOf(out.bits), in.size))
477    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
478    for ((a, req) <- arb.io.in.zip(in)) {
479      a <> req
480    }
481    out <> arb.io.out
482  }
483
484  io.mem_grant.ready := false.B
485
486  entries.zipWithIndex.foreach {
487    case (e, i) =>
488      e.io.id := i.U
489      e.io.req.valid := entry_idx === i.U && accept && io.req.valid
490      e.io.req.bits := io.req.bits
491
492      e.io.mem_grant.valid := false.B
493      e.io.mem_grant.bits := DontCare
494      when (io.mem_grant.bits.source === i.U) {
495        e.io.mem_grant <> io.mem_grant
496      }
497
498      e.io.replace_pipe_resp := Cat(io.replace_pipe_resp.map { case r => r.valid && r.bits.miss_id === i.U }).orR
499      e.io.main_pipe_resp := io.main_pipe_resp.valid && io.main_pipe_resp.bits.ack_miss_queue && io.main_pipe_resp.bits.miss_id === i.U
500
501      io.debug_early_replace(i) := e.io.debug_early_replace
502  }
503
504  io.req.ready := accept
505  io.refill_to_ldq.valid := Cat(entries.map(_.io.refill_to_ldq.valid)).orR
506  io.refill_to_ldq.bits := ParallelMux(entries.map(_.io.refill_to_ldq.valid) zip entries.map(_.io.refill_to_ldq.bits))
507
508  TLArbiter.lowest(edge, io.mem_acquire, entries.map(_.io.mem_acquire):_*)
509  TLArbiter.lowest(edge, io.mem_finish, entries.map(_.io.mem_finish):_*)
510
511  rrArbiter(entries.map(_.io.refill_pipe_req), io.refill_pipe_req, Some("refill_pipe_req"))
512  rrArbiter(entries.map(_.io.replace_pipe_req), io.replace_pipe_req, Some("replace_pipe_req"))
513  rrArbiter(entries.map(_.io.main_pipe_req), io.main_pipe_req, Some("main_pipe_req"))
514
515  io.probe_block := Cat(probe_block_vec).orR
516
517  io.full := ~Cat(entries.map(_.io.primary_ready)).andR
518
519  if (!env.FPGAPlatform) {
520    val difftest = Module(new DifftestRefillEvent)
521    difftest.io.clock := clock
522    difftest.io.coreid := hardId.U
523    difftest.io.valid := io.refill_to_ldq.valid && io.refill_to_ldq.bits.hasdata && io.refill_to_ldq.bits.refill_done
524    difftest.io.addr := io.refill_to_ldq.bits.addr
525    difftest.io.data := io.refill_to_ldq.bits.data_raw.asTypeOf(difftest.io.data)
526  }
527
528  XSPerfAccumulate("miss_req", io.req.fire())
529  XSPerfAccumulate("miss_req_allocate", io.req.fire() && alloc)
530  XSPerfAccumulate("miss_req_merge_load", io.req.fire() && merge && io.req.bits.isLoad)
531  XSPerfAccumulate("miss_req_reject_load", io.req.valid && reject && io.req.bits.isLoad)
532  XSPerfAccumulate("probe_blocked_by_miss", io.probe_block)
533  val max_inflight = RegInit(0.U((log2Up(cfg.nMissEntries) + 1).W))
534  val num_valids = PopCount(~Cat(primary_ready_vec).asUInt)
535  when (num_valids > max_inflight) {
536    max_inflight := num_valids
537  }
538  // max inflight (average) = max_inflight_total / cycle cnt
539  XSPerfAccumulate("max_inflight", max_inflight)
540  QueuePerf(cfg.nMissEntries, num_valids, num_valids === cfg.nMissEntries.U)
541  io.full := num_valids === cfg.nMissEntries.U
542  XSPerfHistogram("num_valids", num_valids, true.B, 0, cfg.nMissEntries, 1)
543  val perfinfo = IO(new Bundle(){
544    val perfEvents = Output(new PerfEventsBundle(5))
545  })
546  val perfEvents = Seq(
547    ("dcache_missq_req          ", io.req.fire()                                                                                                                                                                       ),
548    ("dcache_missq_1/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) < (cfg.nMissEntries.U/4.U))                                                                                              ),
549    ("dcache_missq_2/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/4.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U/2.U))    ),
550    ("dcache_missq_3/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U/2.U)) & (PopCount(entries.map(entry => (!entry.io.primary_ready))) <= (cfg.nMissEntries.U*3.U/4.U))),
551    ("dcache_missq_4/4_valid    ", (PopCount(entries.map(entry => (!entry.io.primary_ready))) > (cfg.nMissEntries.U*3.U/4.U))                                                                                          ),
552  )
553
554  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
555    perf_out.incr_step := RegNext(perf)
556  }
557}
558