xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/loadpipe/LoadPipe.scala (revision 1b46b9591920008655d659ac88cd0250db769664)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.tilelink.ClientMetadata
23import utils.{HasPerfEvents, XSDebug, XSPerfAccumulate}
24import utility.{ParallelPriorityMux, OneHot}
25import xiangshan.L1CacheErrorInfo
26import xiangshan.cache.wpu._
27
28class LoadPipe(id: Int)(implicit p: Parameters) extends DCacheModule with HasPerfEvents {
29  val io = IO(new DCacheBundle {
30    // incoming requests
31    val lsu = Flipped(new DCacheLoadIO)
32    val dwpu = Flipped(new DwpuBaseIO(nWays = nWays, nPorts = 1))
33    val load128Req = Input(Bool())
34    // req got nacked in stage 0?
35    val nack      = Input(Bool())
36
37    // meta and data array read port
38    val meta_read = DecoupledIO(new MetaReadReq)
39    val meta_resp = Input(Vec(nWays, new Meta))
40    val extra_meta_resp = Input(Vec(nWays, new DCacheExtraMeta))
41
42    val tag_read = DecoupledIO(new TagReadReq)
43    val tag_resp = Input(Vec(nWays, UInt(encTagBits.W)))
44    val vtag_update = Flipped(DecoupledIO(new TagWriteReq))
45
46    val banked_data_read = DecoupledIO(new L1BankedDataReadReqWithMask)
47    val is128Req = Output(Bool())
48    val banked_data_resp = Input(Vec(VLEN/DCacheSRAMRowBits, new L1BankedDataReadResult()))
49    val read_error_delayed = Input(Vec(VLEN/DCacheSRAMRowBits, Bool()))
50
51    // access bit update
52    val access_flag_write = DecoupledIO(new FlagMetaWriteReq)
53
54    // banked data read conflict
55    val bank_conflict_slow = Input(Bool())
56
57    // send miss request to miss queue
58    val miss_req    = DecoupledIO(new MissReq)
59    val miss_resp   = Input(new MissResp)
60
61    // update state vec in replacement algo
62    val replace_access = ValidIO(new ReplacementAccessBundle)
63    // find the way to be replaced
64    val replace_way = new ReplacementWayReqIO
65
66    // load fast wakeup should be disabled when data read is not ready
67    val disable_ld_fast_wakeup = Input(Bool())
68
69    // ecc error
70    val error = Output(new L1CacheErrorInfo())
71
72    // // debug_ls_info
73    // val debug_s2_cache_miss = Bool()
74  })
75
76  assert(RegNext(io.meta_read.ready))
77
78  val s1_ready = Wire(Bool())
79  val s2_ready = Wire(Bool())
80  // LSU requests
81  // it you got nacked, you can directly passdown
82  val not_nacked_ready = io.meta_read.ready && io.tag_read.ready && s1_ready
83  val nacked_ready     = true.B
84
85  // Pipeline
86  // --------------------------------------------------------------------------------
87  // stage 0
88  // --------------------------------------------------------------------------------
89  // read tag
90
91  // ready can wait for valid
92  io.lsu.req.ready := (!io.nack && not_nacked_ready) || (io.nack && nacked_ready)
93  io.meta_read.valid := io.lsu.req.fire() && !io.nack
94  io.tag_read.valid := io.lsu.req.fire() && !io.nack
95
96  val s0_valid = io.lsu.req.fire()
97  val s0_req = io.lsu.req.bits
98  val s0_fire = s0_valid && s1_ready
99  val s0_vaddr = s0_req.vaddr
100  val s0_replayCarry = s0_req.replayCarry
101  val s0_load128Req = io.load128Req
102  val s0_bank_oh_64 = UIntToOH(addr_to_dcache_bank(s0_vaddr))
103  val s0_bank_oh_128 = (s0_bank_oh_64 << 1.U).asUInt | s0_bank_oh_64.asUInt
104  val s0_bank_oh = Mux(s0_load128Req, s0_bank_oh_128, s0_bank_oh_64)
105  assert(RegNext(!(s0_valid && (s0_req.cmd =/= MemoryOpConstants.M_XRD && s0_req.cmd =/= MemoryOpConstants.M_PFR && s0_req.cmd =/= MemoryOpConstants.M_PFW))), "LoadPipe only accepts load req / softprefetch read or write!")
106  dump_pipeline_reqs("LoadPipe s0", s0_valid, s0_req)
107
108  // wpu
109  // val dwpu = Module(new DCacheWpuWrapper)
110  // req in s0
111  if(dwpuParam.enWPU){
112    io.dwpu.req(0).bits.vaddr := s0_vaddr
113    io.dwpu.req(0).bits.replayCarry := s0_replayCarry
114    io.dwpu.req(0).valid := s0_valid
115  }else{
116    io.dwpu.req(0).valid := false.B
117    io.dwpu.req(0).bits := DontCare
118  }
119
120
121  val meta_read = io.meta_read.bits
122  val tag_read = io.tag_read.bits
123
124  // Tag read for new requests
125  meta_read.idx := get_idx(io.lsu.req.bits.vaddr)
126  meta_read.way_en := ~0.U(nWays.W)
127  // meta_read.tag := DontCare
128
129  tag_read.idx := get_idx(io.lsu.req.bits.vaddr)
130  tag_read.way_en := ~0.U(nWays.W)
131
132  // --------------------------------------------------------------------------------
133  // stage 1
134  // --------------------------------------------------------------------------------
135  // tag match, read data
136
137  val s1_valid = RegInit(false.B)
138  val s1_req = RegEnable(s0_req, s0_fire)
139  // in stage 1, load unit gets the physical address
140  val s1_paddr_dup_lsu = io.lsu.s1_paddr_dup_lsu
141  val s1_paddr_dup_dcache = io.lsu.s1_paddr_dup_dcache
142  val s1_load128Req = RegEnable(s0_load128Req, s0_fire)
143  // LSU may update the address from io.lsu.s1_paddr, which affects the bank read enable only.
144  val s1_vaddr = Cat(s1_req.vaddr(VAddrBits - 1, blockOffBits), io.lsu.s1_paddr_dup_lsu(blockOffBits - 1, 0))
145  val s1_bank_oh = RegEnable(s0_bank_oh, s0_fire)
146  val s1_nack = RegNext(io.nack)
147  val s1_nack_data = !io.banked_data_read.ready
148  val s1_fire = s1_valid && s2_ready
149  s1_ready := !s1_valid || s1_fire
150
151  when (s0_fire) { s1_valid := true.B }
152  .elsewhen (s1_fire) { s1_valid := false.B }
153
154  dump_pipeline_reqs("LoadPipe s1", s1_valid, s1_req)
155
156  // tag check
157  val meta_resp = io.meta_resp
158  val tag_resp = io.tag_resp.map(r => r(tagBits - 1, 0))
159  def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
160
161  // resp in s1
162  val s1_tag_match_way_dup_dc = wayMap((w: Int) => tag_resp(w) === get_tag(s1_paddr_dup_dcache) && meta_resp(w).coh.isValid()).asUInt
163  val s1_tag_match_way_dup_lsu = wayMap((w: Int) => tag_resp(w) === get_tag(s1_paddr_dup_lsu) && meta_resp(w).coh.isValid()).asUInt
164  val s1_wpu_pred_valid = RegEnable(io.dwpu.resp(0).valid, s0_fire)
165  val s1_wpu_pred_way_en = RegEnable(io.dwpu.resp(0).bits.s0_pred_way_en, s0_fire)
166
167  // lookup update
168  io.dwpu.lookup_upd(0).valid := s1_valid
169  io.dwpu.lookup_upd(0).bits.vaddr := s1_vaddr
170  io.dwpu.lookup_upd(0).bits.s1_real_way_en := s1_tag_match_way_dup_dc
171  io.dwpu.lookup_upd(0).bits.s1_pred_way_en := s1_wpu_pred_way_en
172  // replace / tag write
173  io.vtag_update.ready := true.B
174  // dwpu.io.tagwrite_upd.valid := io.vtag_update.valid
175  // dwpu.io.tagwrite_upd.bits.vaddr := io.vtag_update.bits.vaddr
176  // dwpu.io.tagwrite_upd.bits.s1_real_way_en := io.vtag_update.bits.way_en
177
178  val s1_direct_map_way_num = get_direct_map_way(s1_req.vaddr)
179  if(dwpuParam.enCfPred || !env.FPGAPlatform){
180    /* method1: record the pc */
181    // if (!env.FPGAPlatform){
182    //    io.dwpu.cfpred(0).s0_vaddr := io.lsu.s0_pc
183    //    io.dwpu.cfpred(0).s1_vaddr := io.lsu.s1_pc
184    // }
185
186    /* method2: record the vaddr */
187    io.dwpu.cfpred(0).s0_vaddr := s0_vaddr
188    io.dwpu.cfpred(0).s1_vaddr := s1_vaddr
189    // whether direct_map_way miss with valid tag value
190    io.dwpu.cfpred(0).s1_dm_hit := wayMap((w: Int) => w.U === s1_direct_map_way_num && tag_resp(w) === get_tag(s1_paddr_dup_lsu) && meta_resp(w).coh.isValid()).asUInt.orR
191  }else{
192    io.dwpu.cfpred(0) := DontCare
193  }
194
195  val s1_pred_tag_match_way_dup_dc = Wire(UInt(nWays.W))
196  val s1_wpu_pred_fail = Wire(Bool())
197  val s1_wpu_pred_fail_and_real_hit = Wire(Bool())
198  if (dwpuParam.enWPU) {
199    when(s1_wpu_pred_valid) {
200      s1_pred_tag_match_way_dup_dc := s1_wpu_pred_way_en
201    }.otherwise {
202      s1_pred_tag_match_way_dup_dc := s1_tag_match_way_dup_dc
203    }
204    s1_wpu_pred_fail := s1_valid && s1_tag_match_way_dup_dc =/= s1_pred_tag_match_way_dup_dc
205    s1_wpu_pred_fail_and_real_hit := s1_wpu_pred_fail && s1_tag_match_way_dup_dc.orR
206  } else {
207    s1_pred_tag_match_way_dup_dc := s1_tag_match_way_dup_dc
208    s1_wpu_pred_fail := false.B
209    s1_wpu_pred_fail_and_real_hit := false.B
210  }
211
212  val s1_tag_match_dup_dc = s1_tag_match_way_dup_dc.orR
213  val s1_tag_match_dup_lsu = s1_tag_match_way_dup_lsu.orR
214  assert(RegNext(!s1_valid || PopCount(s1_tag_match_way_dup_dc) <= 1.U), "tag should not match with more than 1 way")
215
216  val s1_fake_meta = Wire(new Meta)
217  // s1_fake_meta.tag := get_tag(s1_paddr_dup_dcache)
218  s1_fake_meta.coh := ClientMetadata.onReset
219  val s1_fake_tag = get_tag(s1_paddr_dup_dcache)
220
221  // when there are no tag match, we give it a Fake Meta
222  // this simplifies our logic in s2 stage
223  val s1_hit_meta = Mux(s1_tag_match_dup_dc, Mux1H(s1_tag_match_way_dup_dc, wayMap((w: Int) => meta_resp(w))), s1_fake_meta)
224  val s1_hit_coh = s1_hit_meta.coh
225  val s1_hit_error = Mux(s1_tag_match_dup_dc, Mux1H(s1_tag_match_way_dup_dc, wayMap((w: Int) => io.extra_meta_resp(w).error)), false.B)
226  val s1_hit_prefetch = Mux(s1_tag_match_dup_dc, Mux1H(s1_tag_match_way_dup_dc, wayMap((w: Int) => io.extra_meta_resp(w).prefetch)), false.B)
227  val s1_hit_access = Mux(s1_tag_match_dup_dc, Mux1H(s1_tag_match_way_dup_dc, wayMap((w: Int) => io.extra_meta_resp(w).access)), false.B)
228
229  io.replace_way.set.valid := RegNext(s0_fire)
230  io.replace_way.set.bits := get_idx(s1_vaddr)
231  io.replace_way.dmWay := get_direct_map_way(s1_vaddr)
232  val s1_invalid_vec = wayMap(w => !meta_resp(w).coh.isValid())
233  val s1_have_invalid_way = s1_invalid_vec.asUInt.orR
234  val s1_invalid_way_en = ParallelPriorityMux(s1_invalid_vec.zipWithIndex.map(x => x._1 -> UIntToOH(x._2.U(nWays.W))))
235  val s1_repl_way_en_oh = Mux(s1_have_invalid_way, s1_invalid_way_en, UIntToOH(io.replace_way.way))
236  val s1_repl_way_en_enc = OHToUInt(s1_repl_way_en_oh)
237  val s1_repl_tag = Mux1H(s1_repl_way_en_oh, wayMap(w => tag_resp(w)))
238  val s1_repl_coh = Mux1H(s1_repl_way_en_oh, wayMap(w => meta_resp(w).coh))
239  val s1_repl_extra_meta = Mux1H(s1_repl_way_en_oh, wayMap(w => io.extra_meta_resp(w)))
240
241  val s1_need_replacement = !s1_tag_match_dup_dc
242  val s1_way_en = Mux(s1_need_replacement, s1_repl_way_en_oh, s1_tag_match_way_dup_dc)
243  val s1_coh = Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh)
244  val s1_tag = Mux(s1_need_replacement, s1_repl_tag, get_tag(s1_paddr_dup_dcache))
245
246  XSPerfAccumulate("load_has_invalid_way_but_select_valid_way", io.replace_way.set.valid && wayMap(w => !meta_resp(w).coh.isValid()).asUInt.orR && s1_need_replacement && s1_repl_coh.isValid())
247  XSPerfAccumulate("load_using_replacement", io.replace_way.set.valid && s1_need_replacement)
248
249  // data read
250  io.banked_data_read.valid := s1_fire && !s1_nack
251  io.banked_data_read.bits.addr := s1_vaddr
252  io.banked_data_read.bits.way_en := s1_pred_tag_match_way_dup_dc
253  io.banked_data_read.bits.bankMask := s1_bank_oh
254  io.is128Req := s1_load128Req
255
256  // get s1_will_send_miss_req in lpad_s1
257  val s1_has_permission = s1_hit_coh.onAccess(s1_req.cmd)._1
258  val s1_new_hit_coh = s1_hit_coh.onAccess(s1_req.cmd)._3
259  val s1_hit = s1_tag_match_dup_dc && s1_has_permission && s1_hit_coh === s1_new_hit_coh
260  val s1_will_send_miss_req = s1_valid && !s1_nack && !s1_nack_data && !s1_hit
261
262  // check ecc error
263  val s1_encTag = Mux1H(s1_tag_match_way_dup_dc, wayMap((w: Int) => io.tag_resp(w)))
264  val s1_flag_error = Mux(s1_need_replacement, false.B, s1_hit_error) // error reported by exist dcache error bit
265
266  // --------------------------------------------------------------------------------
267  // stage 2
268  // --------------------------------------------------------------------------------
269  // return data
270
271  // val s2_valid = RegEnable(next = s1_valid && !io.lsu.s1_kill, init = false.B, enable = s1_fire)
272  val s2_valid = RegInit(false.B)
273  val s2_req = RegEnable(s1_req, s1_fire)
274  val s2_load128Req = RegEnable(s1_load128Req, s1_fire)
275  val s2_paddr = RegEnable(s1_paddr_dup_dcache, s1_fire)
276  val s2_vaddr = RegEnable(s1_vaddr, s1_fire)
277  val s2_bank_oh = RegEnable(s1_bank_oh, s1_fire)
278  val s2_bank_oh_dup_0 = RegEnable(s1_bank_oh, s1_fire)
279  val s2_wpu_pred_fail = RegEnable(s1_wpu_pred_fail, s1_fire)
280  val s2_real_way_en = RegEnable(s1_tag_match_way_dup_dc, s1_fire)
281  val s2_pred_way_en = RegEnable(s1_pred_tag_match_way_dup_dc, s1_fire)
282  val s2_dm_way_num = RegEnable(s1_direct_map_way_num, s1_fire)
283  val s2_wpu_pred_fail_and_real_hit = RegEnable(s1_wpu_pred_fail_and_real_hit, s1_fire)
284
285  s2_ready := true.B
286
287  val s2_fire = s2_valid
288
289  when (s1_fire) { s2_valid := !io.lsu.s1_kill }
290  .elsewhen(io.lsu.resp.fire()) { s2_valid := false.B }
291
292  dump_pipeline_reqs("LoadPipe s2", s2_valid, s2_req)
293
294
295  // hit, miss, nack, permission checking
296  // dcache side tag match
297  val s2_tag_match_way = RegEnable(s1_tag_match_way_dup_dc, s1_fire)
298  val s2_tag_match = RegEnable(s1_tag_match_dup_dc, s1_fire)
299
300  // lsu side tag match
301  val s2_hit_dup_lsu = RegNext(s1_tag_match_dup_lsu)
302
303  io.lsu.s2_hit := s2_hit_dup_lsu && !s2_wpu_pred_fail
304
305  val s2_hit_meta = RegEnable(s1_hit_meta, s1_fire)
306  val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire)
307  val s2_has_permission = s2_hit_coh.onAccess(s2_req.cmd)._1 // for write prefetch
308  val s2_new_hit_coh = s2_hit_coh.onAccess(s2_req.cmd)._3 // for write prefetch
309
310  val s2_way_en = RegEnable(s1_way_en, s1_fire)
311  val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire)
312  val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire)
313  val s2_repl_extra_meta = RegEnable(s1_repl_extra_meta, s1_fire) // not used for now
314  val s2_encTag = RegEnable(s1_encTag, s1_fire)
315
316  // when req got nacked, upper levels should replay this request
317  // nacked or not
318  val s2_nack_hit = RegEnable(s1_nack, s1_fire)
319  // can no allocate mshr for load miss
320  val s2_nack_no_mshr = io.miss_req.valid && !io.miss_req.ready
321  // Bank conflict on data arrays
322  val s2_nack_data = RegEnable(!io.banked_data_read.ready, s1_fire)
323  val s2_nack = s2_nack_hit || s2_nack_no_mshr || s2_nack_data
324  // s2 miss merged
325  val s2_miss_merged = io.miss_req.fire && !io.miss_req.bits.cancel && io.miss_resp.merged
326
327  val s2_bank_addr = addr_to_dcache_bank(s2_paddr)
328  dontTouch(s2_bank_addr)
329
330  val s2_instrtype = s2_req.instrtype
331
332  val s2_tag_error = dcacheParameters.tagCode.decode(s2_encTag).error // error reported by tag ecc check
333  val s2_flag_error = RegEnable(s1_flag_error, s1_fire)
334
335  val s2_hit_prefetch = RegEnable(s1_hit_prefetch, s1_fire)
336  val s2_hit_access = RegEnable(s1_hit_access, s1_fire)
337
338  val s2_hit = s2_tag_match && s2_has_permission && s2_hit_coh === s2_new_hit_coh && !s2_wpu_pred_fail
339
340  // only dump these signals when they are actually valid
341  dump_pipeline_valids("LoadPipe s2", "s2_hit", s2_valid && s2_hit)
342  dump_pipeline_valids("LoadPipe s2", "s2_nack", s2_valid && s2_nack)
343  dump_pipeline_valids("LoadPipe s2", "s2_nack_hit", s2_valid && s2_nack_hit)
344  dump_pipeline_valids("LoadPipe s2", "s2_nack_no_mshr", s2_valid && s2_nack_no_mshr)
345
346  val s2_can_send_miss_req = RegEnable(s1_will_send_miss_req, s1_fire)
347
348  // send load miss to miss queue
349  io.miss_req.valid := s2_valid && s2_can_send_miss_req
350  io.miss_req.bits := DontCare
351  io.miss_req.bits.source := s2_instrtype
352  io.miss_req.bits.cmd := s2_req.cmd
353  io.miss_req.bits.addr := get_block_addr(s2_paddr)
354  io.miss_req.bits.vaddr := s2_vaddr
355  io.miss_req.bits.way_en := s2_way_en
356  io.miss_req.bits.req_coh := s2_hit_coh
357  io.miss_req.bits.replace_coh := s2_repl_coh
358  io.miss_req.bits.replace_tag := s2_repl_tag
359  io.miss_req.bits.cancel := io.lsu.s2_kill || s2_tag_error
360  io.miss_req.bits.pc := io.lsu.s2_pc
361
362  // send back response
363  val resp = Wire(ValidIO(new DCacheWordResp))
364  resp.valid := s2_valid
365  resp.bits := DontCare
366  // resp.bits.data := s2_word_decoded
367  // resp.bits.data := banked_data_resp_word.raw_data
368  // * on miss or nack, upper level should replay request
369  // but if we successfully sent the request to miss queue
370  // upper level does not need to replay request
371  // they can sit in load queue and wait for refill
372  //
373  // * report a miss if bank conflict is detected
374  val real_miss = !s2_real_way_en.orR
375  // io.debug_s2_cache_miss := real_miss
376  resp.bits.miss := real_miss
377  io.lsu.s2_first_hit := s2_req.isFirstIssue && s2_hit
378  // load pipe need replay when there is a bank conflict or wpu predict fail
379  resp.bits.replay := DontCare
380  resp.bits.replayCarry.valid := (resp.bits.miss && (!io.miss_req.fire() || s2_nack)) || io.bank_conflict_slow || s2_wpu_pred_fail
381  resp.bits.replayCarry.real_way_en := s2_real_way_en
382  resp.bits.meta_prefetch := s2_hit_prefetch
383  resp.bits.meta_access := s2_hit_access
384  resp.bits.tag_error := s2_tag_error // report tag_error in load s2
385  resp.bits.mshr_id := io.miss_resp.id
386  resp.bits.handled := io.miss_req.fire && !io.miss_req.bits.cancel && io.miss_resp.handled
387  resp.bits.debug_robIdx := s2_req.debug_robIdx
388  // debug info
389  io.lsu.s2_first_hit := s2_req.isFirstIssue && s2_hit
390  io.lsu.debug_s2_real_way_num := OneHot.OHToUIntStartOne(s2_real_way_en)
391  if(dwpuParam.enWPU) {
392    io.lsu.debug_s2_pred_way_num := OneHot.OHToUIntStartOne(s2_pred_way_en)
393  }else{
394    io.lsu.debug_s2_pred_way_num := 0.U
395  }
396  if(dwpuParam.enWPU && dwpuParam.enCfPred || !env.FPGAPlatform){
397    io.lsu.debug_s2_dm_way_num :=  s2_dm_way_num + 1.U
398  }else{
399    io.lsu.debug_s2_dm_way_num := 0.U
400  }
401
402
403  XSPerfAccumulate("dcache_read_bank_conflict", io.bank_conflict_slow && s2_valid)
404  XSPerfAccumulate("dcache_read_from_prefetched_line", s2_valid && s2_hit_prefetch && !resp.bits.miss)
405  XSPerfAccumulate("dcache_first_read_from_prefetched_line", s2_valid && s2_hit_prefetch && !resp.bits.miss && !s2_hit_access)
406
407  io.lsu.resp.valid := resp.valid
408  io.lsu.resp.bits := resp.bits
409  assert(RegNext(!(resp.valid && !io.lsu.resp.ready)), "lsu should be ready in s2")
410
411  when (resp.valid) {
412    resp.bits.dump()
413  }
414
415  io.lsu.debug_s1_hit_way := s1_tag_match_way_dup_dc
416  io.lsu.s1_disable_fast_wakeup := io.disable_ld_fast_wakeup
417  io.lsu.s2_bank_conflict := io.bank_conflict_slow
418  io.lsu.s2_wpu_pred_fail := s2_wpu_pred_fail_and_real_hit
419  io.lsu.s2_mq_nack       := (resp.bits.miss && (!io.miss_req.fire() || s2_nack))
420  assert(RegNext(s1_ready && s2_ready), "load pipeline should never be blocked")
421
422  // --------------------------------------------------------------------------------
423  // stage 3
424  // --------------------------------------------------------------------------------
425  // report ecc error and get selected dcache data
426
427  val s3_valid = RegNext(s2_valid)
428  val s3_load128Req = RegEnable(s2_load128Req, s2_fire)
429  val s3_vaddr = RegEnable(s2_vaddr, s2_fire)
430  val s3_paddr = RegEnable(s2_paddr, s2_fire)
431  val s3_hit = RegEnable(s2_hit, s2_fire)
432  val s3_tag_match_way = RegEnable(s2_tag_match_way, s2_fire)
433
434  val s3_data128bit = Cat(io.banked_data_resp(1).raw_data, io.banked_data_resp(0).raw_data)
435  val s3_data64bit = Fill(2, io.banked_data_resp(0).raw_data)
436  val s3_banked_data_resp_word = Mux(s3_load128Req, s3_data128bit, s3_data64bit)
437  val s3_data_error = Mux(s3_load128Req, io.read_error_delayed.asUInt.orR, io.read_error_delayed(0)) && s3_hit
438  val s3_tag_error = RegEnable(s2_tag_error, s2_fire)
439  val s3_flag_error = RegEnable(s2_flag_error, s2_fire)
440  val s3_error = s3_tag_error || s3_flag_error || s3_data_error
441
442  // error_delayed signal will be used to update uop.exception 1 cycle after load writeback
443  resp.bits.error_delayed := s3_error && (s3_hit || s3_tag_error) && s3_valid
444  resp.bits.data_delayed := s3_banked_data_resp_word
445  resp.bits.replacementUpdated := io.replace_access.valid
446
447  // report tag / data / l2 error (with paddr) to bus error unit
448  io.error := 0.U.asTypeOf(new L1CacheErrorInfo())
449  io.error.report_to_beu := (s3_tag_error || s3_data_error) && s3_valid
450  io.error.paddr := s3_paddr
451  io.error.source.tag := s3_tag_error
452  io.error.source.data := s3_data_error
453  io.error.source.l2 := s3_flag_error
454  io.error.opType.load := true.B
455  // report tag error / l2 corrupted to CACHE_ERROR csr
456  io.error.valid := s3_error && s3_valid
457
458  // update plru in s3
459  val s3_miss_merged = RegNext(s2_miss_merged)
460  val first_update = RegNext(RegNext(RegNext(!io.lsu.replacementUpdated)))
461  val hit_update_replace_en  = RegNext(s2_valid) && RegNext(!resp.bits.miss)
462  val miss_update_replace_en = RegNext(io.miss_req.fire) && RegNext(!io.miss_req.bits.cancel) && RegNext(io.miss_resp.handled)
463
464  if (!cfg.updateReplaceOn2ndmiss) {
465    // replacement is only updated on 1st miss
466    // io.replace_access.valid := RegNext(RegNext(
467    //   RegNext(io.meta_read.fire()) && s1_valid && !io.lsu.s1_kill) &&
468    //   !s2_nack_no_mshr &&
469    //   !s2_miss_merged
470    // )
471    io.replace_access.valid := (hit_update_replace_en || (miss_update_replace_en && !s3_miss_merged)) && first_update
472    io.replace_access.bits.set := RegNext(RegNext(get_idx(s1_req.vaddr)))
473    io.replace_access.bits.way := RegNext(RegNext(Mux(s1_tag_match_dup_dc, OHToUInt(s1_tag_match_way_dup_dc), s1_repl_way_en_enc)))
474  } else {
475    // replacement is updated on both 1st and 2nd miss
476    // timing is worse than !cfg.updateReplaceOn2ndmiss
477    // io.replace_access.valid := RegNext(RegNext(
478    //   RegNext(io.meta_read.fire()) && s1_valid && !io.lsu.s1_kill) &&
479    //   !s2_nack_no_mshr &&
480    //   // replacement is updated on 2nd miss only when this req is firstly issued
481    //   (!s2_miss_merged || s2_req.isFirstIssue)
482    // )
483    io.replace_access.valid := (hit_update_replace_en || miss_update_replace_en) && first_update
484    io.replace_access.bits.set := RegNext(RegNext(get_idx(s1_req.vaddr)))
485    io.replace_access.bits.way := RegNext(
486      Mux(
487        RegNext(s1_tag_match_dup_dc),
488        RegNext(OHToUInt(s1_tag_match_way_dup_dc)), // if hit, access hit way in plru
489        Mux( // if miss
490          !s2_miss_merged,
491          RegNext(s1_repl_way_en_enc), // 1st fire: access new selected replace way
492          OHToUInt(io.miss_resp.repl_way_en) // 2nd fire: access replace way selected at miss queue allocate time
493        )
494      )
495    )
496  }
497
498  // update access bit
499  io.access_flag_write.valid := s3_valid && s3_hit
500  io.access_flag_write.bits.idx := get_idx(s3_vaddr)
501  io.access_flag_write.bits.way_en := s3_tag_match_way
502  io.access_flag_write.bits.flag := true.B
503
504  // --------------------------------------------------------------------------------
505  // Debug logging functions
506  def dump_pipeline_reqs(pipeline_stage_name: String, valid: Bool,
507    req: DCacheWordReq ) = {
508      when (valid) {
509        XSDebug(s"$pipeline_stage_name: ")
510        req.dump()
511      }
512  }
513
514  def dump_pipeline_valids(pipeline_stage_name: String, signal_name: String, valid: Bool) = {
515    when (valid) {
516      XSDebug(s"$pipeline_stage_name $signal_name\n")
517    }
518  }
519
520  // performance counters
521  XSPerfAccumulate("load_req", io.lsu.req.fire())
522  XSPerfAccumulate("load_s1_kill", s1_fire && io.lsu.s1_kill)
523  XSPerfAccumulate("load_hit_way", s1_fire && s1_tag_match_dup_dc)
524  XSPerfAccumulate("load_replay", io.lsu.resp.fire() && resp.bits.replay)
525  XSPerfAccumulate("load_replay_for_dcache_data_nack", io.lsu.resp.fire() && resp.bits.replay && s2_nack_data)
526  XSPerfAccumulate("load_replay_for_dcache_no_mshr", io.lsu.resp.fire() && resp.bits.replay && s2_nack_no_mshr)
527  XSPerfAccumulate("load_replay_for_dcache_conflict", io.lsu.resp.fire() && resp.bits.replay && io.bank_conflict_slow)
528  XSPerfAccumulate("load_replay_for_dcache_wpu_pred_fail", io.lsu.resp.fire() && resp.bits.replay && s2_wpu_pred_fail)
529  XSPerfAccumulate("load_hit", io.lsu.resp.fire() && !real_miss)
530  XSPerfAccumulate("load_miss", io.lsu.resp.fire() && real_miss)
531  XSPerfAccumulate("load_succeed", io.lsu.resp.fire() && !resp.bits.miss && !resp.bits.replay)
532  XSPerfAccumulate("load_miss_or_conflict", io.lsu.resp.fire() && resp.bits.miss)
533  XSPerfAccumulate("actual_ld_fast_wakeup", s1_fire && s1_tag_match_dup_dc && !io.disable_ld_fast_wakeup)
534  XSPerfAccumulate("ideal_ld_fast_wakeup", io.banked_data_read.fire() && s1_tag_match_dup_dc)
535
536  val perfEvents = Seq(
537    ("load_req                 ", io.lsu.req.fire()                                               ),
538    ("load_replay              ", io.lsu.resp.fire() && resp.bits.replay                          ),
539    ("load_replay_for_data_nack", io.lsu.resp.fire() && resp.bits.replay && s2_nack_data          ),
540    ("load_replay_for_no_mshr  ", io.lsu.resp.fire() && resp.bits.replay && s2_nack_no_mshr       ),
541    ("load_replay_for_conflict ", io.lsu.resp.fire() && resp.bits.replay && io.bank_conflict_slow ),
542  )
543  generatePerfEvent()
544}
545