xref: /XiangShan/src/main/scala/xiangshan/mem/pipeline/LoadUnit.scala (revision 8a020714df826c6ac860308700137855ecd6ba07)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan.ExceptionNO._
25import xiangshan._
26import xiangshan.backend.Bundles.{DynInst, MemExuInput, MemExuOutput}
27import xiangshan.backend.fu.PMPRespBundle
28import xiangshan.backend.fu.FuConfig._
29import xiangshan.backend.ctrlblock.{DebugLsInfoBundle, LsTopdownInfo}
30import xiangshan.backend.rob.RobPtr
31import xiangshan.backend.ctrlblock.DebugLsInfoBundle
32import xiangshan.backend.fu.util.SdtrigExt
33
34import xiangshan.cache._
35import xiangshan.cache.wpu.ReplayCarry
36import xiangshan.cache.mmu._
37import xiangshan.mem.mdp._
38
39class LoadToLsqReplayIO(implicit p: Parameters) extends XSBundle
40  with HasDCacheParameters
41  with HasTlbConst
42{
43  // mshr refill index
44  val mshr_id         = UInt(log2Up(cfg.nMissEntries).W)
45  // get full data from store queue and sbuffer
46  val full_fwd        = Bool()
47  // wait for data from store inst's store queue index
48  val data_inv_sq_idx = new SqPtr
49  // wait for address from store queue index
50  val addr_inv_sq_idx = new SqPtr
51  // replay carry
52  val rep_carry       = new ReplayCarry(nWays)
53  // data in last beat
54  val last_beat       = Bool()
55  // replay cause
56  val cause           = Vec(LoadReplayCauses.allCauses, Bool())
57  // performance debug information
58  val debug           = new PerfDebugInfo
59  // tlb hint
60  val tlb_id          = UInt(log2Up(loadfiltersize).W)
61  val tlb_full        = Bool()
62
63  // alias
64  def mem_amb       = cause(LoadReplayCauses.C_MA)
65  def tlb_miss      = cause(LoadReplayCauses.C_TM)
66  def fwd_fail      = cause(LoadReplayCauses.C_FF)
67  def dcache_rep    = cause(LoadReplayCauses.C_DR)
68  def dcache_miss   = cause(LoadReplayCauses.C_DM)
69  def wpu_fail      = cause(LoadReplayCauses.C_WF)
70  def bank_conflict = cause(LoadReplayCauses.C_BC)
71  def rar_nack      = cause(LoadReplayCauses.C_RAR)
72  def raw_nack      = cause(LoadReplayCauses.C_RAW)
73  def nuke          = cause(LoadReplayCauses.C_NK)
74  def need_rep      = cause.asUInt.orR
75}
76
77
78class LoadToLsqIO(implicit p: Parameters) extends XSBundle {
79  val ldin            = DecoupledIO(new LqWriteBundle)
80  val uncache         = Flipped(DecoupledIO(new MemExuOutput))
81  val ld_raw_data     = Input(new LoadDataFromLQBundle)
82  val forward         = new PipeLoadForwardQueryIO
83  val stld_nuke_query = new LoadNukeQueryIO
84  val ldld_nuke_query = new LoadNukeQueryIO
85  val trigger         = Flipped(new LqTriggerIO)
86}
87
88class LoadToLoadIO(implicit p: Parameters) extends XSBundle {
89  val valid      = Bool()
90  val data       = UInt(XLEN.W) // load to load fast path is limited to ld (64 bit) used as vaddr src1 only
91  val dly_ld_err = Bool()
92}
93
94class LoadUnitTriggerIO(implicit p: Parameters) extends XSBundle {
95  val tdata2      = Input(UInt(64.W))
96  val matchType   = Input(UInt(2.W))
97  val tEnable     = Input(Bool()) // timing is calculated before this
98  val addrHit     = Output(Bool())
99}
100
101class LoadUnit(implicit p: Parameters) extends XSModule
102  with HasLoadHelper
103  with HasPerfEvents
104  with HasDCacheParameters
105  with HasCircularQueuePtrHelper
106  with HasVLSUParameters
107  with SdtrigExt
108{
109  val io = IO(new Bundle() {
110    // control
111    val redirect      = Flipped(ValidIO(new Redirect))
112    val csrCtrl       = Flipped(new CustomCSRCtrlIO)
113
114    // int issue path
115    val ldin          = Flipped(Decoupled(new MemExuInput))
116    val ldout         = Decoupled(new MemExuOutput)
117
118    // vec issue path
119    val vecldin = Flipped(Decoupled(new VecPipeBundle))
120    val vecldout = Decoupled(new VecPipelineFeedbackIO(isVStore = false))
121
122    // misalignBuffer issue path
123    val misalign_ldin = Flipped(Decoupled(new LsPipelineBundle))
124    val misalign_ldout = Valid(new LqWriteBundle)
125
126    // data path
127    val tlb           = new TlbRequestIO(2)
128    val pmp           = Flipped(new PMPRespBundle()) // arrive same to tlb now
129    val dcache        = new DCacheLoadIO
130    val sbuffer       = new LoadForwardQueryIO
131    val lsq           = new LoadToLsqIO
132    val tl_d_channel  = Input(new DcacheToLduForwardIO)
133    val forward_mshr  = Flipped(new LduToMissqueueForwardIO)
134   // val refill        = Flipped(ValidIO(new Refill))
135    val l2_hint       = Input(Valid(new L2ToL1Hint))
136    val tlb_hint      = Flipped(new TlbHintReq)
137    // fast wakeup
138    // TODO: implement vector fast wakeup
139    val fast_uop = ValidIO(new DynInst) // early wakeup signal generated in load_s1, send to RS in load_s2
140
141    // trigger
142    val trigger = Vec(TriggerNum, new LoadUnitTriggerIO)
143
144    // prefetch
145    val prefetch_train            = ValidIO(new LdPrefetchTrainBundle()) // provide prefetch info to sms
146    val prefetch_train_l1         = ValidIO(new LdPrefetchTrainBundle()) // provide prefetch info to stream & stride
147    // speculative for gated control
148    val s1_prefetch_spec = Output(Bool())
149    val s2_prefetch_spec = Output(Bool())
150
151    val prefetch_req              = Flipped(ValidIO(new L1PrefetchReq)) // hardware prefetch to l1 cache req
152    val canAcceptLowConfPrefetch  = Output(Bool())
153    val canAcceptHighConfPrefetch = Output(Bool())
154
155    // ifetchPrefetch
156    val ifetchPrefetch = ValidIO(new SoftIfetchPrefetchBundle)
157
158    // load to load fast path
159    val l2l_fwd_in    = Input(new LoadToLoadIO)
160    val l2l_fwd_out   = Output(new LoadToLoadIO)
161
162    val ld_fast_match    = Input(Bool())
163    val ld_fast_fuOpType = Input(UInt())
164    val ld_fast_imm      = Input(UInt(12.W))
165
166    // rs feedback
167    val wakeup = ValidIO(new DynInst)
168    val feedback_fast = ValidIO(new RSFeedback) // stage 2
169    val feedback_slow = ValidIO(new RSFeedback) // stage 3
170    val ldCancel = Output(new LoadCancelIO()) // use to cancel the uops waked by this load, and cancel load
171
172    // load ecc error
173    val s3_dly_ld_err = Output(Bool()) // Note that io.s3_dly_ld_err and io.lsq.s3_dly_ld_err is different
174
175    // schedule error query
176    val stld_nuke_query = Flipped(Vec(StorePipelineWidth, Valid(new StoreNukeQueryIO)))
177
178    // queue-based replay
179    val replay       = Flipped(Decoupled(new LsPipelineBundle))
180    val lq_rep_full  = Input(Bool())
181
182    // misc
183    val s2_ptr_chasing = Output(Bool()) // provide right pc for hw prefetch
184
185    // Load fast replay path
186    val fast_rep_in  = Flipped(Decoupled(new LqWriteBundle))
187    val fast_rep_out = Decoupled(new LqWriteBundle)
188
189    // to misalign buffer
190    val misalign_buf = Valid(new LqWriteBundle)
191
192    // Load RAR rollback
193    val rollback = Valid(new Redirect)
194
195    // perf
196    val debug_ls         = Output(new DebugLsInfoBundle)
197    val lsTopdownInfo    = Output(new LsTopdownInfo)
198    val correctMissTrain = Input(Bool())
199  })
200
201  val s1_ready, s2_ready, s3_ready = WireInit(false.B)
202
203  // Pipeline
204  // --------------------------------------------------------------------------------
205  // stage 0
206  // --------------------------------------------------------------------------------
207  // generate addr, use addr to query DCache and DTLB
208  val s0_valid         = Wire(Bool())
209  val s0_mmio_select   = Wire(Bool())
210  val s0_kill          = Wire(Bool())
211  val s0_can_go        = s1_ready
212  val s0_fire          = s0_valid && s0_can_go
213  val s0_mmio_fire     = s0_mmio_select && s0_can_go
214  val s0_out           = Wire(new LqWriteBundle)
215  val s0_tlb_vaddr     = Wire(UInt(VAddrBits.W))
216  val s0_dcache_vaddr  = Wire(UInt(VAddrBits.W))
217
218  // flow source bundle
219  class FlowSource extends Bundle {
220    val vaddr         = UInt(VAddrBits.W)
221    val mask          = UInt((VLEN/8).W)
222    val uop           = new DynInst
223    val try_l2l       = Bool()
224    val has_rob_entry = Bool()
225    val rep_carry     = new ReplayCarry(nWays)
226    val mshrid        = UInt(log2Up(cfg.nMissEntries).W)
227    val isFirstIssue  = Bool()
228    val fast_rep      = Bool()
229    val ld_rep        = Bool()
230    val l2l_fwd       = Bool()
231    val prf           = Bool()
232    val prf_rd        = Bool()
233    val prf_wr        = Bool()
234    val prf_i         = Bool()
235    val sched_idx     = UInt(log2Up(LoadQueueReplaySize+1).W)
236    val hlv           = Bool()
237    val hlvx          = Bool()
238    // Record the issue port idx of load issue queue. This signal is used by load cancel.
239    val deqPortIdx    = UInt(log2Ceil(LoadPipelineWidth).W)
240    val frm_mabuf     = Bool()
241    // vec only
242    val isvec         = Bool()
243    val is128bit      = Bool()
244    val uop_unit_stride_fof = Bool()
245    val reg_offset    = UInt(vOffsetBits.W)
246    val vecActive     = Bool() // 1: vector active element or scala mem operation, 0: vector not active element
247    val is_first_ele  = Bool()
248    // val flowPtr       = new VlflowPtr
249    val usSecondInv   = Bool()
250    val mbIndex       = UInt(vlmBindexBits.W)
251    val elemIdx       = UInt(elemIdxBits.W)
252    val elemIdxInsideVd = UInt(elemIdxBits.W)
253    val alignedType   = UInt(alignTypeBits.W)
254  }
255  val s0_sel_src = Wire(new FlowSource)
256
257  // load flow select/gen
258  // src0: misalignBuffer load (io.misalign_ldin)
259  // src1: super load replayed by LSQ (cache miss replay) (io.replay)
260  // src2: fast load replay (io.fast_rep_in)
261  // src3: mmio (io.lsq.uncache)
262  // src4: load replayed by LSQ (io.replay)
263  // src5: hardware prefetch from prefetchor (high confidence) (io.prefetch)
264  // NOTE: Now vec/int loads are sent from same RS
265  //       A vec load will be splited into multiple uops,
266  //       so as long as one uop is issued,
267  //       the other uops should have higher priority
268  // src6: vec read from RS (io.vecldin)
269  // src7: int read / software prefetch first issue from RS (io.in)
270  // src8: load try pointchaising when no issued or replayed load (io.fastpath)
271  // src9: hardware prefetch from prefetchor (high confidence) (io.prefetch)
272  // priority: high to low
273  val s0_rep_stall           = io.ldin.valid && isAfter(io.replay.bits.uop.robIdx, io.ldin.bits.uop.robIdx)
274  val s0_misalign_ld_valid   = io.misalign_ldin.valid
275  val s0_super_ld_rep_valid  = io.replay.valid && io.replay.bits.forward_tlDchannel
276  val s0_ld_fast_rep_valid   = io.fast_rep_in.valid
277  val s0_ld_mmio_valid       = io.lsq.uncache.valid
278  val s0_ld_rep_valid        = io.replay.valid && !io.replay.bits.forward_tlDchannel && !s0_rep_stall
279  val s0_high_conf_prf_valid = io.prefetch_req.valid && io.prefetch_req.bits.confidence > 0.U
280  val s0_vec_iss_valid       = io.vecldin.valid
281  val s0_int_iss_valid       = io.ldin.valid // int flow first issue or software prefetch
282  val s0_l2l_fwd_valid       = io.l2l_fwd_in.valid
283  val s0_low_conf_prf_valid  = io.prefetch_req.valid && io.prefetch_req.bits.confidence === 0.U
284  dontTouch(s0_misalign_ld_valid)
285  dontTouch(s0_super_ld_rep_valid)
286  dontTouch(s0_ld_fast_rep_valid)
287  dontTouch(s0_ld_mmio_valid)
288  dontTouch(s0_ld_rep_valid)
289  dontTouch(s0_high_conf_prf_valid)
290  dontTouch(s0_vec_iss_valid)
291  dontTouch(s0_int_iss_valid)
292  dontTouch(s0_l2l_fwd_valid)
293  dontTouch(s0_low_conf_prf_valid)
294
295  // load flow source ready
296  val s0_misalign_ld_ready   = WireInit(true.B)
297  val s0_super_ld_rep_ready  = !s0_misalign_ld_valid
298  val s0_ld_fast_rep_ready   = !s0_misalign_ld_valid &&
299                               !s0_super_ld_rep_valid
300  val s0_ld_mmio_ready       = !s0_misalign_ld_valid &&
301                               !s0_super_ld_rep_valid &&
302                               !s0_ld_fast_rep_valid
303  val s0_ld_rep_ready        = !s0_misalign_ld_valid &&
304                               !s0_super_ld_rep_valid &&
305                               !s0_ld_fast_rep_valid &&
306                               !s0_ld_mmio_valid
307  val s0_high_conf_prf_ready = !s0_misalign_ld_valid &&
308                               !s0_super_ld_rep_valid &&
309                               !s0_ld_fast_rep_valid &&
310                               !s0_ld_mmio_valid &&
311                               !s0_ld_rep_valid
312
313  val s0_vec_iss_ready       = !s0_misalign_ld_valid &&
314                               !s0_super_ld_rep_valid &&
315                               !s0_ld_fast_rep_valid &&
316                               !s0_ld_mmio_valid &&
317                               !s0_ld_rep_valid &&
318                               !s0_high_conf_prf_valid
319
320  val s0_int_iss_ready       = !s0_misalign_ld_valid &&
321                               !s0_super_ld_rep_valid &&
322                               !s0_ld_fast_rep_valid &&
323                               !s0_ld_mmio_valid &&
324                               !s0_ld_rep_valid &&
325                               !s0_high_conf_prf_valid &&
326                               !s0_vec_iss_valid
327
328  val s0_l2l_fwd_ready       = !s0_misalign_ld_valid &&
329                               !s0_super_ld_rep_valid &&
330                               !s0_ld_fast_rep_valid &&
331                               !s0_ld_mmio_valid &&
332                               !s0_ld_rep_valid &&
333                               !s0_high_conf_prf_valid &&
334                               !s0_int_iss_valid &&
335                               !s0_vec_iss_valid
336
337  val s0_low_conf_prf_ready  = !s0_misalign_ld_valid &&
338                               !s0_super_ld_rep_valid &&
339                               !s0_ld_fast_rep_valid &&
340                               !s0_ld_mmio_valid &&
341                               !s0_ld_rep_valid &&
342                               !s0_high_conf_prf_valid &&
343                               !s0_int_iss_valid &&
344                               !s0_vec_iss_valid &&
345                               !s0_l2l_fwd_valid
346  dontTouch(s0_misalign_ld_ready)
347  dontTouch(s0_super_ld_rep_ready)
348  dontTouch(s0_ld_fast_rep_ready)
349  dontTouch(s0_ld_mmio_ready)
350  dontTouch(s0_ld_rep_ready)
351  dontTouch(s0_high_conf_prf_ready)
352  dontTouch(s0_vec_iss_ready)
353  dontTouch(s0_int_iss_ready)
354  dontTouch(s0_l2l_fwd_ready)
355  dontTouch(s0_low_conf_prf_ready)
356
357  // load flow source select (OH)
358  val s0_misalign_ld_select  = s0_misalign_ld_valid && s0_misalign_ld_ready
359  val s0_super_ld_rep_select = s0_super_ld_rep_valid && s0_super_ld_rep_ready
360  val s0_ld_fast_rep_select  = s0_ld_fast_rep_valid && s0_ld_fast_rep_ready
361  val s0_ld_mmio_select      = s0_ld_mmio_valid && s0_ld_mmio_ready
362  val s0_ld_rep_select       = s0_ld_rep_valid && s0_ld_rep_ready
363  val s0_hw_prf_select       = s0_high_conf_prf_ready && s0_high_conf_prf_valid ||
364                               s0_low_conf_prf_ready && s0_low_conf_prf_valid
365  val s0_vec_iss_select      = s0_vec_iss_ready && s0_vec_iss_valid
366  val s0_int_iss_select      = s0_int_iss_ready && s0_int_iss_valid
367  val s0_l2l_fwd_select      = s0_l2l_fwd_ready && s0_l2l_fwd_valid
368  dontTouch(s0_misalign_ld_select)
369  dontTouch(s0_super_ld_rep_select)
370  dontTouch(s0_ld_fast_rep_select)
371  dontTouch(s0_ld_mmio_select)
372  dontTouch(s0_ld_rep_select)
373  dontTouch(s0_hw_prf_select)
374  dontTouch(s0_vec_iss_select)
375  dontTouch(s0_int_iss_select)
376  dontTouch(s0_l2l_fwd_select)
377
378  s0_valid := (s0_misalign_ld_valid ||
379               s0_super_ld_rep_valid ||
380               s0_ld_fast_rep_valid ||
381               s0_ld_rep_valid ||
382               s0_high_conf_prf_valid ||
383               s0_vec_iss_valid ||
384               s0_int_iss_valid ||
385               s0_l2l_fwd_valid ||
386               s0_low_conf_prf_valid) && !s0_ld_mmio_select && io.dcache.req.ready && !s0_kill
387
388  s0_mmio_select := s0_ld_mmio_select && !s0_kill
389
390  // which is S0's out is ready and dcache is ready
391  val s0_try_ptr_chasing      = s0_l2l_fwd_select
392  val s0_do_try_ptr_chasing   = s0_try_ptr_chasing && s0_can_go && io.dcache.req.ready
393  val s0_ptr_chasing_vaddr    = io.l2l_fwd_in.data(5, 0) +& io.ld_fast_imm(5, 0)
394  val s0_ptr_chasing_canceled = WireInit(false.B)
395  s0_kill := s0_ptr_chasing_canceled
396
397  // prefetch related ctrl signal
398  io.canAcceptLowConfPrefetch  := s0_low_conf_prf_ready && io.dcache.req.ready
399  io.canAcceptHighConfPrefetch := s0_high_conf_prf_ready && io.dcache.req.ready
400
401  // query DTLB
402  io.tlb.req.valid                   := s0_valid && !s0_hw_prf_select && !s0_sel_src.prf_i  // if is hardware prefetch, don't send valid to tlb, but need no_translate
403  io.tlb.req.bits.cmd                := Mux(s0_sel_src.prf,
404                                         Mux(s0_sel_src.prf_wr, TlbCmd.write, TlbCmd.read),
405                                         TlbCmd.read
406                                       )
407  io.tlb.req.bits.vaddr              := s0_tlb_vaddr
408  io.tlb.req.bits.hyperinst          := s0_sel_src.hlv
409  io.tlb.req.bits.hlvx               := s0_sel_src.hlvx
410  io.tlb.req.bits.size               := Mux(s0_sel_src.isvec, s0_sel_src.alignedType(2,0), LSUOpType.size(s0_sel_src.uop.fuOpType))
411  io.tlb.req.bits.kill               := s0_kill
412  io.tlb.req.bits.memidx.is_ld       := true.B
413  io.tlb.req.bits.memidx.is_st       := false.B
414  io.tlb.req.bits.memidx.idx         := s0_sel_src.uop.lqIdx.value
415  io.tlb.req.bits.debug.robIdx       := s0_sel_src.uop.robIdx
416  io.tlb.req.bits.no_translate       := s0_hw_prf_select  // hw b.reqetch addr does not need to be translated, need this signal for pmp check
417  io.tlb.req.bits.debug.pc           := s0_sel_src.uop.pc
418  io.tlb.req.bits.debug.isFirstIssue := s0_sel_src.isFirstIssue
419
420  // query DCache
421  io.dcache.req.valid             := s0_valid && !s0_sel_src.prf_i
422  io.dcache.req.bits.cmd          := Mux(s0_sel_src.prf_rd,
423                                      MemoryOpConstants.M_PFR,
424                                      Mux(s0_sel_src.prf_wr, MemoryOpConstants.M_PFW, MemoryOpConstants.M_XRD)
425                                    )
426  io.dcache.req.bits.vaddr        := s0_dcache_vaddr
427  io.dcache.req.bits.mask         := s0_sel_src.mask
428  io.dcache.req.bits.data         := DontCare
429  io.dcache.req.bits.isFirstIssue := s0_sel_src.isFirstIssue
430  io.dcache.req.bits.instrtype    := Mux(s0_sel_src.prf, DCACHE_PREFETCH_SOURCE.U, LOAD_SOURCE.U)
431  io.dcache.req.bits.debug_robIdx := s0_sel_src.uop.robIdx.value
432  io.dcache.req.bits.replayCarry  := s0_sel_src.rep_carry
433  io.dcache.req.bits.id           := DontCare // TODO: update cache meta
434  io.dcache.req.bits.lqIdx        := s0_sel_src.uop.lqIdx
435  io.dcache.pf_source             := Mux(s0_hw_prf_select, io.prefetch_req.bits.pf_source.value, L1_HW_PREFETCH_NULL)
436  io.dcache.is128Req              := s0_sel_src.is128bit
437
438  // load flow priority mux
439  def fromNullSource(): FlowSource = {
440    val out = WireInit(0.U.asTypeOf(new FlowSource))
441    out
442  }
443
444  def fromMisAlignBufferSource(src: LsPipelineBundle): FlowSource = {
445    val out = WireInit(0.U.asTypeOf(new FlowSource))
446    out.vaddr         := src.vaddr
447    out.mask          := src.mask
448    out.uop           := src.uop
449    out.try_l2l       := false.B
450    out.has_rob_entry := false.B
451    out.rep_carry     := src.replayCarry
452    out.mshrid        := src.mshrid
453    out.frm_mabuf     := true.B
454    out.isFirstIssue  := false.B
455    out.fast_rep      := false.B
456    out.ld_rep        := false.B
457    out.l2l_fwd       := false.B
458    out.prf           := false.B
459    out.prf_rd        := false.B
460    out.prf_wr        := false.B
461    out.sched_idx     := src.schedIndex
462    out.isvec         := false.B
463    out.is128bit      := src.is128bit
464    out.vecActive     := true.B
465    out.hlv           := LSUOpType.isHlv(src.uop.fuOpType)
466    out.hlvx          := LSUOpType.isHlvx(src.uop.fuOpType)
467    out
468  }
469
470  def fromFastReplaySource(src: LqWriteBundle): FlowSource = {
471    val out = WireInit(0.U.asTypeOf(new FlowSource))
472    out.mask          := src.mask
473    out.uop           := src.uop
474    out.try_l2l       := false.B
475    out.has_rob_entry := src.hasROBEntry
476    out.rep_carry     := src.rep_info.rep_carry
477    out.mshrid        := src.rep_info.mshr_id
478    out.frm_mabuf     := src.isFrmMisAlignBuf
479    out.isFirstIssue  := false.B
480    out.fast_rep      := true.B
481    out.ld_rep        := src.isLoadReplay
482    out.l2l_fwd       := false.B
483    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType) && !src.isvec
484    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
485    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
486    out.prf_i         := false.B
487    out.sched_idx     := src.schedIndex
488    out.isvec         := src.isvec
489    out.is128bit      := src.is128bit
490    out.uop_unit_stride_fof := src.uop_unit_stride_fof
491    out.reg_offset    := src.reg_offset
492    out.vecActive     := src.vecActive
493    out.is_first_ele  := src.is_first_ele
494    out.usSecondInv   := src.usSecondInv
495    out.mbIndex       := src.mbIndex
496    out.elemIdx       := src.elemIdx
497    out.elemIdxInsideVd := src.elemIdxInsideVd
498    out.alignedType   := src.alignedType
499    out.hlv           := LSUOpType.isHlv(src.uop.fuOpType)
500    out.hlvx          := LSUOpType.isHlvx(src.uop.fuOpType)
501    out
502  }
503
504  // TODO: implement vector mmio
505  def fromMmioSource(src: MemExuOutput) = {
506    val out = WireInit(0.U.asTypeOf(new FlowSource))
507    out.mask          := 0.U
508    out.uop           := src.uop
509    out.try_l2l       := false.B
510    out.has_rob_entry := false.B
511    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
512    out.mshrid        := 0.U
513    out.frm_mabuf     := false.B
514    out.isFirstIssue  := false.B
515    out.fast_rep      := false.B
516    out.ld_rep        := false.B
517    out.l2l_fwd       := false.B
518    out.prf           := false.B
519    out.prf_rd        := false.B
520    out.prf_wr        := false.B
521    out.prf_i         := false.B
522    out.sched_idx     := 0.U
523    out.hlv           := LSUOpType.isHlv(src.uop.fuOpType)
524    out.hlvx          := LSUOpType.isHlvx(src.uop.fuOpType)
525    out.vecActive     := true.B
526    out
527  }
528
529  def fromNormalReplaySource(src: LsPipelineBundle): FlowSource = {
530    val out = WireInit(0.U.asTypeOf(new FlowSource))
531    out.mask          := Mux(src.isvec, src.mask, genVWmask(src.vaddr, src.uop.fuOpType(1, 0)))
532    out.uop           := src.uop
533    out.try_l2l       := false.B
534    out.has_rob_entry := true.B
535    out.rep_carry     := src.replayCarry
536    out.mshrid        := src.mshrid
537    out.frm_mabuf     := false.B
538    out.isFirstIssue  := false.B
539    out.fast_rep      := false.B
540    out.ld_rep        := true.B
541    out.l2l_fwd       := false.B
542    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType) && !src.isvec
543    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
544    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
545    out.prf_i         := false.B
546    out.sched_idx     := src.schedIndex
547    out.isvec         := src.isvec
548    out.is128bit      := src.is128bit
549    out.uop_unit_stride_fof := src.uop_unit_stride_fof
550    out.reg_offset    := src.reg_offset
551    out.vecActive     := src.vecActive
552    out.is_first_ele  := src.is_first_ele
553    out.usSecondInv   := src.usSecondInv
554    out.mbIndex       := src.mbIndex
555    out.elemIdx       := src.elemIdx
556    out.elemIdxInsideVd := src.elemIdxInsideVd
557    out.alignedType   := src.alignedType
558    out.hlv           := LSUOpType.isHlv(src.uop.fuOpType)
559    out.hlvx          := LSUOpType.isHlvx(src.uop.fuOpType)
560    out
561  }
562
563  // TODO: implement vector prefetch
564  def fromPrefetchSource(src: L1PrefetchReq): FlowSource = {
565    val out = WireInit(0.U.asTypeOf(new FlowSource))
566    out.mask          := 0.U
567    out.uop           := DontCare
568    out.try_l2l       := false.B
569    out.has_rob_entry := false.B
570    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
571    out.mshrid        := 0.U
572    out.frm_mabuf     := false.B
573    out.isFirstIssue  := false.B
574    out.fast_rep      := false.B
575    out.ld_rep        := false.B
576    out.l2l_fwd       := false.B
577    out.prf           := true.B
578    out.prf_rd        := !src.is_store
579    out.prf_wr        := src.is_store
580    out.prf_i         := false.B
581    out.sched_idx     := 0.U
582    out
583  }
584
585  def fromVecIssueSource(src: VecPipeBundle): FlowSource = {
586    val out = WireInit(0.U.asTypeOf(new FlowSource))
587    out.mask          := src.mask
588    out.uop           := src.uop
589    out.try_l2l       := false.B
590    out.has_rob_entry := true.B
591    // TODO: VLSU, implement replay carry
592    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
593    out.mshrid        := 0.U
594    out.frm_mabuf     := false.B
595    // TODO: VLSU, implement first issue
596//    out.isFirstIssue  := src.isFirstIssue
597    out.fast_rep      := false.B
598    out.ld_rep        := false.B
599    out.l2l_fwd       := false.B
600    out.prf           := false.B
601    out.prf_rd        := false.B
602    out.prf_wr        := false.B
603    out.prf_i         := false.B
604    out.sched_idx     := 0.U
605    // Vector load interface
606    out.isvec               := true.B
607    // vector loads only access a single element at a time, so 128-bit path is not used for now
608    out.is128bit            := is128Bit(src.alignedType)
609    out.uop_unit_stride_fof := src.uop_unit_stride_fof
610    // out.rob_idx_valid       := src.rob_idx_valid
611    // out.inner_idx           := src.inner_idx
612    // out.rob_idx             := src.rob_idx
613    out.reg_offset          := src.reg_offset
614    // out.offset              := src.offset
615    out.vecActive           := src.vecActive
616    out.is_first_ele        := src.is_first_ele
617    // out.flowPtr             := src.flowPtr
618    out.usSecondInv         := src.usSecondInv
619    out.mbIndex             := src.mBIndex
620    out.elemIdx             := src.elemIdx
621    out.elemIdxInsideVd     := src.elemIdxInsideVd
622    out.alignedType         := src.alignedType
623    out.hlv                 := false.B
624    out.hlvx                := false.B
625    out
626  }
627
628  def fromIntIssueSource(src: MemExuInput): FlowSource = {
629    val out = WireInit(0.U.asTypeOf(new FlowSource))
630    val addr           = io.ldin.bits.src(0) + SignExt(io.ldin.bits.uop.imm(11, 0), VAddrBits)
631    out.mask          := genVWmask(addr, src.uop.fuOpType(1,0))
632    out.uop           := src.uop
633    out.try_l2l       := false.B
634    out.has_rob_entry := true.B
635    out.rep_carry     := 0.U.asTypeOf(out.rep_carry)
636    out.mshrid        := 0.U
637    out.frm_mabuf     := false.B
638    out.isFirstIssue  := true.B
639    out.fast_rep      := false.B
640    out.ld_rep        := false.B
641    out.l2l_fwd       := false.B
642    out.prf           := LSUOpType.isPrefetch(src.uop.fuOpType)
643    out.prf_rd        := src.uop.fuOpType === LSUOpType.prefetch_r
644    out.prf_wr        := src.uop.fuOpType === LSUOpType.prefetch_w
645    out.prf_i         := src.uop.fuOpType === LSUOpType.prefetch_i
646    out.sched_idx     := 0.U
647    out.hlv           := LSUOpType.isHlv(src.uop.fuOpType)
648    out.hlvx          := LSUOpType.isHlvx(src.uop.fuOpType)
649    out.vecActive     := true.B // true for scala load
650    out
651  }
652
653  // TODO: implement vector l2l
654  def fromLoadToLoadSource(src: LoadToLoadIO): FlowSource = {
655    val out = WireInit(0.U.asTypeOf(new FlowSource))
656    out.mask               := genVWmask(0.U, LSUOpType.ld)
657    // When there's no valid instruction from RS and LSQ, we try the load-to-load forwarding.
658    // Assume the pointer chasing is always ld.
659    out.uop.fuOpType       := LSUOpType.ld
660    out.try_l2l            := true.B
661    // we dont care out.isFirstIssue and out.rsIdx and s0_sqIdx in S0 when trying pointchasing
662    // because these signals will be updated in S1
663    out.has_rob_entry      := false.B
664    out.mshrid             := 0.U
665    out.frm_mabuf          := false.B
666    out.rep_carry          := 0.U.asTypeOf(out.rep_carry)
667    out.isFirstIssue       := true.B
668    out.fast_rep           := false.B
669    out.ld_rep             := false.B
670    out.l2l_fwd            := true.B
671    out.prf                := false.B
672    out.prf_rd             := false.B
673    out.prf_wr             := false.B
674    out.prf_i              := false.B
675    out.sched_idx          := 0.U
676    out.hlv                := LSUOpType.isHlv(out.uop.fuOpType)
677    out.hlvx               := LSUOpType.isHlvx(out.uop.fuOpType)
678    out
679  }
680
681  // set default
682  val s0_src_selector = Seq(
683    s0_misalign_ld_valid,
684    s0_super_ld_rep_valid,
685    s0_ld_fast_rep_valid,
686    s0_ld_mmio_valid,
687    s0_ld_rep_valid,
688    s0_high_conf_prf_valid,
689    s0_vec_iss_valid,
690    s0_int_iss_valid,
691    (if (EnableLoadToLoadForward) s0_l2l_fwd_valid else false.B),
692    s0_low_conf_prf_valid
693  )
694  val s0_src_format = Seq(
695    fromMisAlignBufferSource(io.misalign_ldin.bits),
696    fromNormalReplaySource(io.replay.bits),
697    fromFastReplaySource(io.fast_rep_in.bits),
698    fromMmioSource(io.lsq.uncache.bits),
699    fromNormalReplaySource(io.replay.bits),
700    fromPrefetchSource(io.prefetch_req.bits),
701    fromVecIssueSource(io.vecldin.bits),
702    fromIntIssueSource(io.ldin.bits),
703    (if (EnableLoadToLoadForward) fromLoadToLoadSource(io.l2l_fwd_in) else fromNullSource()),
704    fromPrefetchSource(io.prefetch_req.bits)
705  )
706  s0_sel_src := ParallelPriorityMux(s0_src_selector, s0_src_format)
707
708  val s0_addr_selector = Seq(
709    s0_misalign_ld_valid,
710    s0_super_ld_rep_valid,
711    s0_ld_fast_rep_valid,
712    s0_ld_rep_valid,
713    s0_vec_iss_valid,
714    s0_int_iss_valid,
715    (if (EnableLoadToLoadForward) s0_l2l_fwd_valid else false.B),
716  )
717  val s0_addr_format = Seq(
718    io.misalign_ldin.bits.vaddr,
719    io.replay.bits.vaddr,
720    io.fast_rep_in.bits.vaddr,
721    io.replay.bits.vaddr,
722    io.vecldin.bits.vaddr,
723    io.ldin.bits.src(0) + SignExt(io.ldin.bits.uop.imm(11, 0), VAddrBits),
724    (if (EnableLoadToLoadForward) Cat(io.l2l_fwd_in.data(XLEN-1, 6), s0_ptr_chasing_vaddr(5,0)) else 0.U(VAddrBits.W)),
725  )
726  s0_tlb_vaddr := ParallelPriorityMux(s0_addr_selector, s0_addr_format)
727  s0_dcache_vaddr := Mux(s0_hw_prf_select, io.prefetch_req.bits.getVaddr(), s0_tlb_vaddr)
728
729  // address align check
730  val s0_addr_aligned = LookupTree(Mux(s0_sel_src.isvec, s0_sel_src.alignedType(1,0), s0_sel_src.uop.fuOpType(1, 0)), List(
731    "b00".U   -> true.B,                   //b
732    "b01".U   -> (s0_dcache_vaddr(0)    === 0.U), //h
733    "b10".U   -> (s0_dcache_vaddr(1, 0) === 0.U), //w
734    "b11".U   -> (s0_dcache_vaddr(2, 0) === 0.U)  //d
735  ))
736  XSError(s0_sel_src.isvec && s0_dcache_vaddr(3, 0) =/= 0.U && s0_sel_src.alignedType(2), "unit-stride 128 bit element is not aligned!")
737
738  // accept load flow if dcache ready (tlb is always ready)
739  // TODO: prefetch need writeback to loadQueueFlag
740  s0_out               := DontCare
741  s0_out.vaddr         := s0_dcache_vaddr
742  s0_out.mask          := s0_sel_src.mask
743  s0_out.uop           := s0_sel_src.uop
744  s0_out.isFirstIssue  := s0_sel_src.isFirstIssue
745  s0_out.hasROBEntry   := s0_sel_src.has_rob_entry
746  s0_out.isPrefetch    := s0_sel_src.prf
747  s0_out.isHWPrefetch  := s0_hw_prf_select
748  s0_out.isFastReplay  := s0_sel_src.fast_rep
749  s0_out.isLoadReplay  := s0_sel_src.ld_rep
750  s0_out.isFastPath    := s0_sel_src.l2l_fwd
751  s0_out.mshrid        := s0_sel_src.mshrid
752  s0_out.isvec           := s0_sel_src.isvec
753  s0_out.is128bit        := s0_sel_src.is128bit
754  s0_out.isFrmMisAlignBuf    := s0_sel_src.frm_mabuf
755  s0_out.uop_unit_stride_fof := s0_sel_src.uop_unit_stride_fof
756  s0_out.paddr         := io.prefetch_req.bits.paddr // only for prefetch
757  // s0_out.rob_idx_valid   := s0_rob_idx_valid
758  // s0_out.inner_idx       := s0_inner_idx
759  // s0_out.rob_idx         := s0_rob_idx
760  s0_out.reg_offset      := s0_sel_src.reg_offset
761  // s0_out.offset          := s0_offset
762  s0_out.vecActive             := s0_sel_src.vecActive
763  s0_out.usSecondInv    := s0_sel_src.usSecondInv
764  s0_out.is_first_ele   := s0_sel_src.is_first_ele
765  s0_out.elemIdx        := s0_sel_src.elemIdx
766  s0_out.elemIdxInsideVd := s0_sel_src.elemIdxInsideVd
767  s0_out.alignedType    := s0_sel_src.alignedType
768  s0_out.mbIndex        := s0_sel_src.mbIndex
769  // s0_out.flowPtr         := s0_sel_src.flowPtr
770  s0_out.uop.exceptionVec(loadAddrMisaligned) := !s0_addr_aligned && s0_sel_src.vecActive
771  s0_out.forward_tlDchannel := s0_super_ld_rep_select
772  when(io.tlb.req.valid && s0_sel_src.isFirstIssue) {
773    s0_out.uop.debugInfo.tlbFirstReqTime := GTimer()
774  }.otherwise{
775    s0_out.uop.debugInfo.tlbFirstReqTime := s0_sel_src.uop.debugInfo.tlbFirstReqTime
776  }
777  s0_out.schedIndex     := s0_sel_src.sched_idx
778
779  // load fast replay
780  io.fast_rep_in.ready := (s0_can_go && io.dcache.req.ready && s0_ld_fast_rep_ready)
781
782  // mmio
783  io.lsq.uncache.ready := s0_mmio_fire
784
785  // load flow source ready
786  // cache missed load has highest priority
787  // always accept cache missed load flow from load replay queue
788  io.replay.ready := (s0_can_go && io.dcache.req.ready && (s0_ld_rep_ready && !s0_rep_stall || s0_super_ld_rep_select))
789
790  // accept load flow from rs when:
791  // 1) there is no lsq-replayed load
792  // 2) there is no fast replayed load
793  // 3) there is no high confidence prefetch request
794  io.vecldin.ready := s0_can_go && io.dcache.req.ready && s0_vec_iss_ready
795  io.ldin.ready := s0_can_go && io.dcache.req.ready && s0_int_iss_ready
796  io.misalign_ldin.ready := s0_can_go && io.dcache.req.ready && s0_misalign_ld_ready
797
798  // for hw prefetch load flow feedback, to be added later
799  // io.prefetch_in.ready := s0_hw_prf_select
800
801  // dcache replacement extra info
802  // TODO: should prefetch load update replacement?
803  io.dcache.replacementUpdated := Mux(s0_ld_rep_select || s0_super_ld_rep_select, io.replay.bits.replacementUpdated, false.B)
804
805  // load wakeup
806  // TODO: vector load wakeup?
807  val s0_wakeup_selector = Seq(
808    s0_super_ld_rep_valid,
809    s0_ld_fast_rep_valid,
810    s0_mmio_fire,
811    s0_ld_rep_valid,
812    s0_int_iss_valid
813  )
814  val s0_wakeup_format = Seq(
815    io.replay.bits.uop,
816    io.fast_rep_in.bits.uop,
817    io.lsq.uncache.bits.uop,
818    io.replay.bits.uop,
819    io.ldin.bits.uop,
820  )
821  val s0_wakeup_uop = ParallelPriorityMux(s0_wakeup_selector, s0_wakeup_format)
822  io.wakeup.valid := s0_fire && !s0_sel_src.isvec && !s0_sel_src.frm_mabuf &&
823                    (s0_super_ld_rep_valid || s0_ld_fast_rep_valid || s0_ld_rep_valid || ((s0_int_iss_valid && !s0_sel_src.prf) && !s0_vec_iss_valid && !s0_high_conf_prf_valid)) || s0_mmio_fire
824  io.wakeup.bits := s0_wakeup_uop
825
826  // prefetch.i(Zicbop)
827  io.ifetchPrefetch.valid := RegNext(s0_int_iss_select && s0_sel_src.prf_i)
828  io.ifetchPrefetch.bits.vaddr := RegEnable(s0_out.vaddr, 0.U, s0_int_iss_select && s0_sel_src.prf_i)
829
830  XSDebug(io.dcache.req.fire,
831    p"[DCACHE LOAD REQ] pc ${Hexadecimal(s0_sel_src.uop.pc)}, vaddr ${Hexadecimal(s0_dcache_vaddr)}\n"
832  )
833  XSDebug(s0_valid,
834    p"S0: pc ${Hexadecimal(s0_out.uop.pc)}, lId ${Hexadecimal(s0_out.uop.lqIdx.asUInt)}, " +
835    p"vaddr ${Hexadecimal(s0_out.vaddr)}, mask ${Hexadecimal(s0_out.mask)}\n")
836
837  // Pipeline
838  // --------------------------------------------------------------------------------
839  // stage 1
840  // --------------------------------------------------------------------------------
841  // TLB resp (send paddr to dcache)
842  val s1_valid      = RegInit(false.B)
843  val s1_in         = Wire(new LqWriteBundle)
844  val s1_out        = Wire(new LqWriteBundle)
845  val s1_kill       = Wire(Bool())
846  val s1_can_go     = s2_ready
847  val s1_fire       = s1_valid && !s1_kill && s1_can_go
848  val s1_vecActive        = RegEnable(s0_out.vecActive, true.B, s0_fire)
849
850  s1_ready := !s1_valid || s1_kill || s2_ready
851  when (s0_fire) { s1_valid := true.B }
852  .elsewhen (s1_fire) { s1_valid := false.B }
853  .elsewhen (s1_kill) { s1_valid := false.B }
854  s1_in   := RegEnable(s0_out, s0_fire)
855
856  val s1_fast_rep_dly_kill = RegEnable(io.fast_rep_in.bits.lateKill, io.fast_rep_in.valid) && s1_in.isFastReplay
857  val s1_fast_rep_dly_err =  RegEnable(io.fast_rep_in.bits.delayedLoadError, io.fast_rep_in.valid) && s1_in.isFastReplay
858  val s1_l2l_fwd_dly_err  = RegEnable(io.l2l_fwd_in.dly_ld_err, io.l2l_fwd_in.valid) && s1_in.isFastPath
859  val s1_dly_err          = s1_fast_rep_dly_err || s1_l2l_fwd_dly_err
860  val s1_vaddr_hi         = Wire(UInt())
861  val s1_vaddr_lo         = Wire(UInt())
862  val s1_vaddr            = Wire(UInt())
863  val s1_paddr_dup_lsu    = Wire(UInt())
864  val s1_gpaddr_dup_lsu   = Wire(UInt())
865  val s1_paddr_dup_dcache = Wire(UInt())
866  val s1_exception        = ExceptionNO.selectByFu(s1_out.uop.exceptionVec, LduCfg).asUInt.orR   // af & pf exception were modified below.
867  val s1_tlb_miss         = io.tlb.resp.bits.miss && io.tlb.resp.valid && s1_valid
868  val s1_pbmt             = Mux(io.tlb.resp.valid, io.tlb.resp.bits.pbmt(0), 0.U(2.W))
869  val s1_prf              = s1_in.isPrefetch
870  val s1_hw_prf           = s1_in.isHWPrefetch
871  val s1_sw_prf           = s1_prf && !s1_hw_prf
872  val s1_tlb_memidx       = io.tlb.resp.bits.memidx
873
874  s1_vaddr_hi         := s1_in.vaddr(VAddrBits - 1, 6)
875  s1_vaddr_lo         := s1_in.vaddr(5, 0)
876  s1_vaddr            := Cat(s1_vaddr_hi, s1_vaddr_lo)
877  s1_paddr_dup_lsu    := Mux(s1_hw_prf, s1_in.paddr, io.tlb.resp.bits.paddr(0))
878  s1_paddr_dup_dcache := Mux(s1_hw_prf, s1_in.paddr, io.tlb.resp.bits.paddr(1))
879  s1_gpaddr_dup_lsu   := Mux(s1_hw_prf, s1_in.paddr, io.tlb.resp.bits.gpaddr(0))
880
881  when (s1_tlb_memidx.is_ld && io.tlb.resp.valid && !s1_tlb_miss && s1_tlb_memidx.idx === s1_in.uop.lqIdx.value) {
882    // printf("load idx = %d\n", s1_tlb_memidx.idx)
883    s1_out.uop.debugInfo.tlbRespTime := GTimer()
884  }
885
886  io.tlb.req_kill   := s1_kill || s1_dly_err
887  io.tlb.req.bits.pmp_addr := s1_in.paddr
888  io.tlb.resp.ready := true.B
889
890  io.dcache.s1_paddr_dup_lsu    <> s1_paddr_dup_lsu
891  io.dcache.s1_paddr_dup_dcache <> s1_paddr_dup_dcache
892  io.dcache.s1_kill             := s1_kill || s1_dly_err || s1_tlb_miss || s1_exception
893
894  // store to load forwarding
895  io.sbuffer.valid := s1_valid && !(s1_exception || s1_tlb_miss || s1_kill || s1_dly_err || s1_prf)
896  io.sbuffer.vaddr := s1_vaddr
897  io.sbuffer.paddr := s1_paddr_dup_lsu
898  io.sbuffer.uop   := s1_in.uop
899  io.sbuffer.sqIdx := s1_in.uop.sqIdx
900  io.sbuffer.mask  := s1_in.mask
901  io.sbuffer.pc    := s1_in.uop.pc // FIXME: remove it
902
903  io.lsq.forward.valid     := s1_valid && !(s1_exception || s1_tlb_miss || s1_kill || s1_dly_err || s1_prf)
904  io.lsq.forward.vaddr     := s1_vaddr
905  io.lsq.forward.paddr     := s1_paddr_dup_lsu
906  io.lsq.forward.uop       := s1_in.uop
907  io.lsq.forward.sqIdx     := s1_in.uop.sqIdx
908  io.lsq.forward.sqIdxMask := 0.U
909  io.lsq.forward.mask      := s1_in.mask
910  io.lsq.forward.pc        := s1_in.uop.pc // FIXME: remove it
911
912  // st-ld violation query
913    // if store unit is 128-bits memory access, need match 128-bit
914  private val s1_isMatch128 = io.stld_nuke_query.map(x => (x.bits.matchLine || (s1_in.isvec && s1_in.is128bit)))
915  val s1_nuke_paddr_match = VecInit((0 until StorePipelineWidth).zip(s1_isMatch128).map{case (w, s) => {Mux(s,
916    s1_paddr_dup_lsu(PAddrBits-1, 4) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 4),
917    s1_paddr_dup_lsu(PAddrBits-1, 3) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 3))}})
918  val s1_nuke = VecInit((0 until StorePipelineWidth).map(w => {
919                       io.stld_nuke_query(w).valid && // query valid
920                       isAfter(s1_in.uop.robIdx, io.stld_nuke_query(w).bits.robIdx) && // older store
921                       s1_nuke_paddr_match(w) && // paddr match
922                       (s1_in.mask & io.stld_nuke_query(w).bits.mask).orR // data mask contain
923                      })).asUInt.orR && !s1_tlb_miss
924
925  s1_out                   := s1_in
926  s1_out.vaddr             := s1_vaddr
927  s1_out.paddr             := s1_paddr_dup_lsu
928  s1_out.gpaddr            := s1_gpaddr_dup_lsu
929  s1_out.tlbMiss           := s1_tlb_miss
930  s1_out.ptwBack           := io.tlb.resp.bits.ptwBack
931  s1_out.rep_info.debug    := s1_in.uop.debugInfo
932  s1_out.rep_info.nuke     := s1_nuke && !s1_sw_prf
933  s1_out.delayedLoadError  := s1_dly_err
934
935  when (!s1_dly_err) {
936    // current ori test will cause the case of ldest == 0, below will be modifeid in the future.
937    // af & pf exception were modified
938    s1_out.uop.exceptionVec(loadPageFault)   := io.tlb.resp.bits.excp(0).pf.ld && s1_vecActive && !s1_tlb_miss
939    s1_out.uop.exceptionVec(loadGuestPageFault)   := io.tlb.resp.bits.excp(0).gpf.ld && !s1_tlb_miss
940    s1_out.uop.exceptionVec(loadAccessFault) := io.tlb.resp.bits.excp(0).af.ld && s1_vecActive && !s1_tlb_miss
941  } .otherwise {
942    s1_out.uop.exceptionVec(loadPageFault)      := false.B
943    s1_out.uop.exceptionVec(loadGuestPageFault) := false.B
944    s1_out.uop.exceptionVec(loadAddrMisaligned) := false.B
945    s1_out.uop.exceptionVec(loadAccessFault)    := s1_dly_err && s1_vecActive
946  }
947
948  // pointer chasing
949  val s1_try_ptr_chasing       = GatedValidRegNext(s0_do_try_ptr_chasing, false.B)
950  val s1_ptr_chasing_vaddr     = RegEnable(s0_ptr_chasing_vaddr, s0_do_try_ptr_chasing)
951  val s1_fu_op_type_not_ld     = WireInit(false.B)
952  val s1_not_fast_match        = WireInit(false.B)
953  val s1_addr_mismatch         = WireInit(false.B)
954  val s1_addr_misaligned       = WireInit(false.B)
955  val s1_fast_mismatch         = WireInit(false.B)
956  val s1_ptr_chasing_canceled  = WireInit(false.B)
957  val s1_cancel_ptr_chasing    = WireInit(false.B)
958
959  val s1_redirect_reg = Wire(Valid(new Redirect))
960  s1_redirect_reg.bits := RegEnable(io.redirect.bits, io.redirect.valid)
961  s1_redirect_reg.valid := GatedValidRegNext(io.redirect.valid)
962
963  s1_kill := s1_fast_rep_dly_kill ||
964             s1_cancel_ptr_chasing ||
965             s1_in.uop.robIdx.needFlush(io.redirect) ||
966            (s1_in.uop.robIdx.needFlush(s1_redirect_reg) && !GatedValidRegNext(s0_try_ptr_chasing)) ||
967             RegEnable(s0_kill, false.B, io.ldin.valid || io.vecldin.valid || io.replay.valid || io.l2l_fwd_in.valid || io.fast_rep_in.valid || io.misalign_ldin.valid)
968
969  if (EnableLoadToLoadForward) {
970    // Sometimes, we need to cancel the load-load forwarding.
971    // These can be put at S0 if timing is bad at S1.
972    // Case 0: CACHE_SET(base + offset) != CACHE_SET(base) (lowest 6-bit addition has an overflow)
973    s1_addr_mismatch     := s1_ptr_chasing_vaddr(6) ||
974                             RegEnable(io.ld_fast_imm(11, 6).orR, s0_do_try_ptr_chasing)
975    // Case 1: the address is not 64-bit aligned or the fuOpType is not LD
976    s1_addr_misaligned := s1_ptr_chasing_vaddr(2, 0).orR
977    s1_fu_op_type_not_ld := io.ldin.bits.uop.fuOpType =/= LSUOpType.ld
978    // Case 2: this load-load uop is cancelled
979    s1_ptr_chasing_canceled := !io.ldin.valid
980    // Case 3: fast mismatch
981    s1_fast_mismatch := RegEnable(!io.ld_fast_match, s0_do_try_ptr_chasing)
982
983    when (s1_try_ptr_chasing) {
984      s1_cancel_ptr_chasing := s1_addr_mismatch ||
985                               s1_addr_misaligned ||
986                               s1_fu_op_type_not_ld ||
987                               s1_ptr_chasing_canceled ||
988                               s1_fast_mismatch
989
990      s1_in.uop           := io.ldin.bits.uop
991      s1_in.isFirstIssue  := io.ldin.bits.isFirstIssue
992      s1_vaddr_lo         := s1_ptr_chasing_vaddr(5, 0)
993      s1_paddr_dup_lsu    := Cat(io.tlb.resp.bits.paddr(0)(PAddrBits - 1, 6), s1_vaddr_lo)
994      s1_paddr_dup_dcache := Cat(io.tlb.resp.bits.paddr(0)(PAddrBits - 1, 6), s1_vaddr_lo)
995
996      // recored tlb time when get the data to ensure the correctness of the latency calculation (although it should not record in here, because it does not use tlb)
997      s1_in.uop.debugInfo.tlbFirstReqTime := GTimer()
998      s1_in.uop.debugInfo.tlbRespTime     := GTimer()
999    }
1000    when (!s1_cancel_ptr_chasing) {
1001      s0_ptr_chasing_canceled := s1_try_ptr_chasing && !io.replay.fire && !io.fast_rep_in.fire && !(s0_high_conf_prf_valid && io.canAcceptHighConfPrefetch) && !io.misalign_ldin.fire
1002      when (s1_try_ptr_chasing) {
1003        io.ldin.ready := true.B
1004      }
1005    }
1006  }
1007
1008  // pre-calcuate sqIdx mask in s0, then send it to lsq in s1 for forwarding
1009  val s1_sqIdx_mask = RegEnable(UIntToMask(s0_out.uop.sqIdx.value, StoreQueueSize), s0_fire)
1010  // to enable load-load, sqIdxMask must be calculated based on ldin.uop
1011  // If the timing here is not OK, load-load forwarding has to be disabled.
1012  // Or we calculate sqIdxMask at RS??
1013  io.lsq.forward.sqIdxMask := s1_sqIdx_mask
1014  if (EnableLoadToLoadForward) {
1015    when (s1_try_ptr_chasing) {
1016      io.lsq.forward.sqIdxMask := UIntToMask(io.ldin.bits.uop.sqIdx.value, StoreQueueSize)
1017    }
1018  }
1019
1020  io.forward_mshr.valid  := s1_valid && s1_out.forward_tlDchannel
1021  io.forward_mshr.mshrid := s1_out.mshrid
1022  io.forward_mshr.paddr  := s1_out.paddr
1023
1024  XSDebug(s1_valid,
1025    p"S1: pc ${Hexadecimal(s1_out.uop.pc)}, lId ${Hexadecimal(s1_out.uop.lqIdx.asUInt)}, tlb_miss ${io.tlb.resp.bits.miss}, " +
1026    p"paddr ${Hexadecimal(s1_out.paddr)}, mmio ${s1_out.mmio}\n")
1027
1028  // Pipeline
1029  // --------------------------------------------------------------------------------
1030  // stage 2
1031  // --------------------------------------------------------------------------------
1032  // s2: DCache resp
1033  val s2_valid  = RegInit(false.B)
1034  val s2_in     = Wire(new LqWriteBundle)
1035  val s2_out    = Wire(new LqWriteBundle)
1036  val s2_kill   = Wire(Bool())
1037  val s2_can_go = s3_ready
1038  val s2_fire   = s2_valid && !s2_kill && s2_can_go
1039  val s2_vecActive = RegEnable(s1_out.vecActive, true.B, s1_fire)
1040  val s2_isvec  = RegEnable(s1_out.isvec, false.B, s1_fire)
1041  val s2_data_select  = genRdataOH(s2_out.uop)
1042  val s2_data_select_by_offset = genDataSelectByOffset(s2_out.paddr(3, 0))
1043  val s2_frm_mabuf = s2_in.isFrmMisAlignBuf
1044  val s2_pbmt = RegEnable(s1_pbmt, s1_fire)
1045
1046  s2_kill := s2_in.uop.robIdx.needFlush(io.redirect)
1047  s2_ready := !s2_valid || s2_kill || s3_ready
1048  when (s1_fire) { s2_valid := true.B }
1049  .elsewhen (s2_fire) { s2_valid := false.B }
1050  .elsewhen (s2_kill) { s2_valid := false.B }
1051  s2_in := RegEnable(s1_out, s1_fire)
1052
1053  val s2_pmp = WireInit(io.pmp)
1054
1055  val s2_prf    = s2_in.isPrefetch
1056  val s2_hw_prf = s2_in.isHWPrefetch
1057
1058  // exception that may cause load addr to be invalid / illegal
1059  // if such exception happen, that inst and its exception info
1060  // will be force writebacked to rob
1061  val s2_exception_vec = WireInit(s2_in.uop.exceptionVec)
1062  when (!s2_in.delayedLoadError) {
1063    s2_exception_vec(loadAccessFault) := (s2_in.uop.exceptionVec(loadAccessFault) ||
1064                                         s2_pmp.ld ||
1065                                         s2_isvec && s2_pmp.mmio && !s2_prf && !s2_in.tlbMiss ||
1066                                         (io.dcache.resp.bits.tag_error && GatedValidRegNext(io.csrCtrl.cache_error_enable))
1067                                         ) && s2_vecActive
1068  }
1069
1070  // soft prefetch will not trigger any exception (but ecc error interrupt may
1071  // be triggered)
1072  when (!s2_in.delayedLoadError && (s2_prf || s2_in.tlbMiss)) {
1073    s2_exception_vec := 0.U.asTypeOf(s2_exception_vec.cloneType)
1074  }
1075  val s2_exception = ExceptionNO.selectByFu(s2_exception_vec, LduCfg).asUInt.orR && s2_vecActive
1076
1077  val (s2_fwd_frm_d_chan, s2_fwd_data_frm_d_chan) = io.tl_d_channel.forward(s1_valid && s1_out.forward_tlDchannel, s1_out.mshrid, s1_out.paddr)
1078  val (s2_fwd_data_valid, s2_fwd_frm_mshr, s2_fwd_data_frm_mshr) = io.forward_mshr.forward()
1079  val s2_fwd_frm_d_chan_or_mshr = s2_fwd_data_valid && (s2_fwd_frm_d_chan || s2_fwd_frm_mshr)
1080
1081  // writeback access fault caused by ecc error / bus error
1082  // * ecc data error is slow to generate, so we will not use it until load stage 3
1083  // * in load stage 3, an extra signal io.load_error will be used to
1084  val s2_actually_mmio = s2_pmp.mmio || Pbmt.isUncache(s2_pbmt)
1085  val s2_mmio          = !s2_prf &&
1086                          s2_actually_mmio &&
1087                         !s2_exception &&
1088                         !s2_in.tlbMiss
1089
1090  val s2_full_fwd      = Wire(Bool())
1091  val s2_mem_amb       = s2_in.uop.storeSetHit &&
1092                         io.lsq.forward.addrInvalid
1093
1094  val s2_tlb_miss      = s2_in.tlbMiss
1095  val s2_fwd_fail      = io.lsq.forward.dataInvalid
1096  val s2_dcache_miss   = io.dcache.resp.bits.miss &&
1097                         !s2_fwd_frm_d_chan_or_mshr &&
1098                         !s2_full_fwd
1099
1100  val s2_mq_nack       = io.dcache.s2_mq_nack &&
1101                         !s2_fwd_frm_d_chan_or_mshr &&
1102                         !s2_full_fwd
1103
1104  val s2_bank_conflict = io.dcache.s2_bank_conflict &&
1105                         !s2_fwd_frm_d_chan_or_mshr &&
1106                         !s2_full_fwd
1107
1108  val s2_wpu_pred_fail = io.dcache.s2_wpu_pred_fail &&
1109                        !s2_fwd_frm_d_chan_or_mshr &&
1110                        !s2_full_fwd
1111
1112  val s2_rar_nack      = io.lsq.ldld_nuke_query.req.valid &&
1113                         !io.lsq.ldld_nuke_query.req.ready
1114
1115  val s2_raw_nack      = io.lsq.stld_nuke_query.req.valid &&
1116                         !io.lsq.stld_nuke_query.req.ready
1117  // st-ld violation query
1118  //  NeedFastRecovery Valid when
1119  //  1. Fast recovery query request Valid.
1120  //  2. Load instruction is younger than requestors(store instructions).
1121  //  3. Physical address match.
1122  //  4. Data contains.
1123  private val s2_isMatch128 = io.stld_nuke_query.map(x => (x.bits.matchLine || (s2_in.isvec && s2_in.is128bit)))
1124  val s2_nuke_paddr_match = VecInit((0 until StorePipelineWidth).zip(s2_isMatch128).map{case (w, s) => {Mux(s,
1125    s2_in.paddr(PAddrBits-1, 4) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 4),
1126    s2_in.paddr(PAddrBits-1, 3) === io.stld_nuke_query(w).bits.paddr(PAddrBits-1, 3))}})
1127  val s2_nuke          = VecInit((0 until StorePipelineWidth).map(w => {
1128                          io.stld_nuke_query(w).valid && // query valid
1129                          isAfter(s2_in.uop.robIdx, io.stld_nuke_query(w).bits.robIdx) && // older store
1130                          s2_nuke_paddr_match(w) && // paddr match
1131                          (s2_in.mask & io.stld_nuke_query(w).bits.mask).orR // data mask contain
1132                        })).asUInt.orR && !s2_tlb_miss || s2_in.rep_info.nuke
1133
1134  val s2_cache_handled   = io.dcache.resp.bits.handled
1135  val s2_cache_tag_error = GatedValidRegNext(io.csrCtrl.cache_error_enable) &&
1136                           io.dcache.resp.bits.tag_error
1137
1138  val s2_troublem        = !s2_exception &&
1139                           !s2_mmio &&
1140                           !s2_prf &&
1141                           !s2_in.delayedLoadError
1142
1143  io.dcache.resp.ready  := true.B
1144  val s2_dcache_should_resp = !(s2_in.tlbMiss || s2_exception || s2_in.delayedLoadError || s2_mmio || s2_prf)
1145  assert(!(s2_valid && (s2_dcache_should_resp && !io.dcache.resp.valid)), "DCache response got lost")
1146
1147  // fast replay require
1148  val s2_dcache_fast_rep = (s2_mq_nack || !s2_dcache_miss && (s2_bank_conflict || s2_wpu_pred_fail))
1149  val s2_nuke_fast_rep   = !s2_mq_nack &&
1150                           !s2_dcache_miss &&
1151                           !s2_bank_conflict &&
1152                           !s2_wpu_pred_fail &&
1153                           !s2_rar_nack &&
1154                           !s2_raw_nack &&
1155                           s2_nuke
1156
1157  val s2_fast_rep = !s2_mem_amb &&
1158                    !s2_tlb_miss &&
1159                    !s2_fwd_fail &&
1160                    (s2_dcache_fast_rep || s2_nuke_fast_rep) &&
1161                    s2_troublem
1162
1163  // need allocate new entry
1164  val s2_can_query = !s2_mem_amb &&
1165                     !s2_tlb_miss &&
1166                     !s2_fwd_fail &&
1167                     !s2_frm_mabuf &&
1168                     s2_troublem
1169
1170  val s2_data_fwded = s2_dcache_miss && (s2_full_fwd || s2_cache_tag_error)
1171
1172  // ld-ld violation require
1173  io.lsq.ldld_nuke_query.req.valid           := s2_valid && s2_can_query
1174  io.lsq.ldld_nuke_query.req.bits.uop        := s2_in.uop
1175  io.lsq.ldld_nuke_query.req.bits.mask       := s2_in.mask
1176  io.lsq.ldld_nuke_query.req.bits.paddr      := s2_in.paddr
1177  io.lsq.ldld_nuke_query.req.bits.data_valid := Mux(s2_full_fwd || s2_fwd_data_valid, true.B, !s2_dcache_miss)
1178
1179  // st-ld violation require
1180  io.lsq.stld_nuke_query.req.valid           := s2_valid && s2_can_query
1181  io.lsq.stld_nuke_query.req.bits.uop        := s2_in.uop
1182  io.lsq.stld_nuke_query.req.bits.mask       := s2_in.mask
1183  io.lsq.stld_nuke_query.req.bits.paddr      := s2_in.paddr
1184  io.lsq.stld_nuke_query.req.bits.data_valid := Mux(s2_full_fwd || s2_fwd_data_valid, true.B, !s2_dcache_miss)
1185
1186  // merge forward result
1187  // lsq has higher priority than sbuffer
1188  val s2_fwd_mask = Wire(Vec((VLEN/8), Bool()))
1189  val s2_fwd_data = Wire(Vec((VLEN/8), UInt(8.W)))
1190  s2_full_fwd := ((~s2_fwd_mask.asUInt).asUInt & s2_in.mask) === 0.U && !io.lsq.forward.dataInvalid
1191  // generate XLEN/8 Muxs
1192  for (i <- 0 until VLEN / 8) {
1193    s2_fwd_mask(i) := io.lsq.forward.forwardMask(i) || io.sbuffer.forwardMask(i)
1194    s2_fwd_data(i) := Mux(io.lsq.forward.forwardMask(i), io.lsq.forward.forwardData(i), io.sbuffer.forwardData(i))
1195  }
1196
1197  XSDebug(s2_fire, "[FWD LOAD RESP] pc %x fwd %x(%b) + %x(%b)\n",
1198    s2_in.uop.pc,
1199    io.lsq.forward.forwardData.asUInt, io.lsq.forward.forwardMask.asUInt,
1200    s2_in.forwardData.asUInt, s2_in.forwardMask.asUInt
1201  )
1202
1203  //
1204  s2_out                     := s2_in
1205  s2_out.data                := 0.U // data will be generated in load s3
1206  s2_out.uop.fpWen           := s2_in.uop.fpWen && !s2_exception
1207  s2_out.mmio                := s2_mmio
1208  s2_out.uop.flushPipe       := false.B
1209  s2_out.uop.exceptionVec    := s2_exception_vec
1210  s2_out.forwardMask         := s2_fwd_mask
1211  s2_out.forwardData         := s2_fwd_data
1212  s2_out.handledByMSHR       := s2_cache_handled
1213  s2_out.miss                := s2_dcache_miss && s2_troublem
1214  s2_out.feedbacked          := io.feedback_fast.valid
1215
1216  // Generate replay signal caused by:
1217  // * st-ld violation check
1218  // * tlb miss
1219  // * dcache replay
1220  // * forward data invalid
1221  // * dcache miss
1222  s2_out.rep_info.mem_amb         := s2_mem_amb && s2_troublem
1223  s2_out.rep_info.tlb_miss        := s2_tlb_miss && s2_troublem
1224  s2_out.rep_info.fwd_fail        := s2_fwd_fail && s2_troublem
1225  s2_out.rep_info.dcache_rep      := s2_mq_nack && s2_troublem
1226  s2_out.rep_info.dcache_miss     := s2_dcache_miss && s2_troublem
1227  s2_out.rep_info.bank_conflict   := s2_bank_conflict && s2_troublem
1228  s2_out.rep_info.wpu_fail        := s2_wpu_pred_fail && s2_troublem
1229  s2_out.rep_info.rar_nack        := s2_rar_nack && s2_troublem
1230  s2_out.rep_info.raw_nack        := s2_raw_nack && s2_troublem
1231  s2_out.rep_info.nuke            := s2_nuke && s2_troublem
1232  s2_out.rep_info.full_fwd        := s2_data_fwded
1233  s2_out.rep_info.data_inv_sq_idx := io.lsq.forward.dataInvalidSqIdx
1234  s2_out.rep_info.addr_inv_sq_idx := io.lsq.forward.addrInvalidSqIdx
1235  s2_out.rep_info.rep_carry       := io.dcache.resp.bits.replayCarry
1236  s2_out.rep_info.mshr_id         := io.dcache.resp.bits.mshr_id
1237  s2_out.rep_info.last_beat       := s2_in.paddr(log2Up(refillBytes))
1238  s2_out.rep_info.debug           := s2_in.uop.debugInfo
1239  s2_out.rep_info.tlb_id          := io.tlb_hint.id
1240  s2_out.rep_info.tlb_full        := io.tlb_hint.full
1241
1242  // if forward fail, replay this inst from fetch
1243  val debug_fwd_fail_rep = s2_fwd_fail && !s2_troublem && !s2_in.tlbMiss
1244  // if ld-ld violation is detected, replay from this inst from fetch
1245  val debug_ldld_nuke_rep = false.B // s2_ldld_violation && !s2_mmio && !s2_is_prefetch && !s2_in.tlbMiss
1246
1247  // to be removed
1248  io.feedback_fast.valid                 := false.B
1249  io.feedback_fast.bits.hit              := false.B
1250  io.feedback_fast.bits.flushState       := s2_in.ptwBack
1251  io.feedback_fast.bits.robIdx           := s2_in.uop.robIdx
1252  io.feedback_fast.bits.sqIdx            := s2_in.uop.sqIdx
1253  io.feedback_fast.bits.lqIdx            := s2_in.uop.lqIdx
1254  io.feedback_fast.bits.sourceType       := RSFeedbackType.lrqFull
1255  io.feedback_fast.bits.dataInvalidSqIdx := DontCare
1256
1257  io.ldCancel.ld1Cancel := false.B
1258
1259  // fast wakeup
1260  val s1_fast_uop_valid = WireInit(false.B)
1261  s1_fast_uop_valid :=
1262    !io.dcache.s1_disable_fast_wakeup &&
1263    s1_valid &&
1264    !s1_kill &&
1265    !io.tlb.resp.bits.miss &&
1266    !io.lsq.forward.dataInvalidFast
1267  io.fast_uop.valid := GatedValidRegNext(s1_fast_uop_valid) && (s2_valid && !s2_out.rep_info.need_rep && !s2_mmio && !(s2_prf && !s2_hw_prf)) && !s2_isvec && !s2_frm_mabuf
1268  io.fast_uop.bits := RegEnable(s1_out.uop, s1_fast_uop_valid)
1269
1270  //
1271  io.s2_ptr_chasing                    := RegEnable(s1_try_ptr_chasing && !s1_cancel_ptr_chasing, false.B, s1_fire)
1272
1273  // RegNext prefetch train for better timing
1274  // ** Now, prefetch train is valid at load s3 **
1275  val s2_prefetch_train_valid = WireInit(false.B)
1276  s2_prefetch_train_valid              := s2_valid && !s2_actually_mmio && (!s2_in.tlbMiss || s2_hw_prf)
1277  io.prefetch_train.valid              := GatedValidRegNext(s2_prefetch_train_valid)
1278  io.prefetch_train.bits.fromLsPipelineBundle(s2_in, latch = true, enable = s2_prefetch_train_valid)
1279  io.prefetch_train.bits.miss          := RegEnable(io.dcache.resp.bits.miss, s2_prefetch_train_valid) // TODO: use trace with bank conflict?
1280  io.prefetch_train.bits.meta_prefetch := RegEnable(io.dcache.resp.bits.meta_prefetch, s2_prefetch_train_valid)
1281  io.prefetch_train.bits.meta_access   := RegEnable(io.dcache.resp.bits.meta_access, s2_prefetch_train_valid)
1282  io.s1_prefetch_spec := s1_fire
1283  io.s2_prefetch_spec := s2_prefetch_train_valid
1284
1285  val s2_prefetch_train_l1_valid = WireInit(false.B)
1286  s2_prefetch_train_l1_valid              := s2_valid && !s2_actually_mmio
1287  io.prefetch_train_l1.valid              := GatedValidRegNext(s2_prefetch_train_l1_valid)
1288  io.prefetch_train_l1.bits.fromLsPipelineBundle(s2_in, latch = true, enable = s2_prefetch_train_l1_valid)
1289  io.prefetch_train_l1.bits.miss          := RegEnable(io.dcache.resp.bits.miss, s2_prefetch_train_l1_valid)
1290  io.prefetch_train_l1.bits.meta_prefetch := RegEnable(io.dcache.resp.bits.meta_prefetch, s2_prefetch_train_l1_valid)
1291  io.prefetch_train_l1.bits.meta_access   := RegEnable(io.dcache.resp.bits.meta_access, s2_prefetch_train_l1_valid)
1292  if (env.FPGAPlatform){
1293    io.dcache.s0_pc := DontCare
1294    io.dcache.s1_pc := DontCare
1295    io.dcache.s2_pc := DontCare
1296  }else{
1297    io.dcache.s0_pc := s0_out.uop.pc
1298    io.dcache.s1_pc := s1_out.uop.pc
1299    io.dcache.s2_pc := s2_out.uop.pc
1300  }
1301  io.dcache.s2_kill := s2_pmp.ld || s2_actually_mmio || s2_kill
1302
1303  val s1_ld_left_fire = s1_valid && !s1_kill && s2_ready
1304  val s2_ld_valid_dup = RegInit(0.U(6.W))
1305  s2_ld_valid_dup := 0x0.U(6.W)
1306  when (s1_ld_left_fire && !s1_out.isHWPrefetch) { s2_ld_valid_dup := 0x3f.U(6.W) }
1307  when (s1_kill || s1_out.isHWPrefetch) { s2_ld_valid_dup := 0x0.U(6.W) }
1308  assert(RegNext((s2_valid === s2_ld_valid_dup(0)) || RegNext(s1_out.isHWPrefetch)))
1309
1310  // Pipeline
1311  // --------------------------------------------------------------------------------
1312  // stage 3
1313  // --------------------------------------------------------------------------------
1314  // writeback and update load queue
1315  val s3_valid        = GatedValidRegNext(s2_valid && !s2_out.isHWPrefetch && !s2_out.uop.robIdx.needFlush(io.redirect))
1316  val s3_in           = RegEnable(s2_out, s2_fire)
1317  val s3_out          = Wire(Valid(new MemExuOutput))
1318  val s3_dcache_rep   = RegEnable(s2_dcache_fast_rep && s2_troublem, false.B, s2_fire)
1319  val s3_ld_valid_dup = RegEnable(s2_ld_valid_dup, s2_fire)
1320  val s3_fast_rep     = Wire(Bool())
1321  val s3_troublem     = GatedValidRegNext(s2_troublem)
1322  val s3_kill         = s3_in.uop.robIdx.needFlush(io.redirect)
1323  val s3_vecout       = Wire(new OnlyVecExuOutput)
1324  val s3_vecActive    = RegEnable(s2_out.vecActive, true.B, s2_fire)
1325  val s3_isvec        = RegEnable(s2_out.isvec, false.B, s2_fire)
1326  val s3_vec_alignedType = RegEnable(s2_out.alignedType, s2_fire)
1327  val s3_vec_mBIndex     = RegEnable(s2_out.mbIndex, s2_fire)
1328  val s3_frm_mabuf       = s3_in.isFrmMisAlignBuf
1329  val s3_mmio         = Wire(Valid(new MemExuOutput))
1330  val s3_data_select  = RegEnable(s2_data_select, 0.U(s2_data_select.getWidth.W), s2_fire)
1331  val s3_data_select_by_offset = RegEnable(s2_data_select_by_offset, 0.U.asTypeOf(s2_data_select_by_offset), s2_fire)
1332  // TODO: Fix vector load merge buffer nack
1333  val s3_vec_mb_nack  = Wire(Bool())
1334  s3_vec_mb_nack     := false.B
1335  XSError(s3_valid && s3_vec_mb_nack, "Merge buffer should always accept vector loads!")
1336
1337  s3_ready := !s3_valid || s3_kill || io.ldout.ready
1338  s3_mmio.valid := RegNextN(io.lsq.uncache.fire, 3, Some(false.B))
1339  s3_mmio.bits  := RegNextN(io.lsq.uncache.bits, 3)
1340
1341  // forwrad last beat
1342  val (s3_fwd_frm_d_chan, s3_fwd_data_frm_d_chan) = io.tl_d_channel.forward(s2_valid && s2_out.forward_tlDchannel, s2_out.mshrid, s2_out.paddr)
1343  val s3_fwd_data_valid = RegEnable(s2_fwd_data_valid, false.B, s2_valid)
1344  val s3_fwd_frm_d_chan_valid = (s3_fwd_frm_d_chan && s3_fwd_data_valid && s3_in.handledByMSHR)
1345  val s3_fast_rep_canceled = io.replay.valid && io.replay.bits.forward_tlDchannel || io.misalign_ldin.valid || !io.dcache.req.ready
1346
1347  // s3 load fast replay
1348  io.fast_rep_out.valid := s3_valid && s3_fast_rep && !s3_in.uop.robIdx.needFlush(io.redirect)
1349  io.fast_rep_out.bits := s3_in
1350
1351  io.lsq.ldin.valid := s3_valid && (!s3_fast_rep || s3_fast_rep_canceled) && !s3_in.feedbacked && !s3_frm_mabuf
1352  // TODO: check this --by hx
1353  // io.lsq.ldin.valid := s3_valid && (!s3_fast_rep || !io.fast_rep_out.ready) && !s3_in.feedbacked && !s3_in.lateKill
1354  io.lsq.ldin.bits := s3_in
1355  io.lsq.ldin.bits.miss := s3_in.miss && !s3_fwd_frm_d_chan_valid
1356
1357  // connect to misalignBuffer
1358  io.misalign_buf.valid := io.lsq.ldin.valid && io.csrCtrl.hd_misalign_ld_enable
1359  io.misalign_buf.bits  := s3_in
1360
1361  /* <------- DANGEROUS: Don't change sequence here ! -------> */
1362  io.lsq.ldin.bits.data_wen_dup := s3_ld_valid_dup.asBools
1363  io.lsq.ldin.bits.replacementUpdated := io.dcache.resp.bits.replacementUpdated
1364  io.lsq.ldin.bits.missDbUpdated := GatedValidRegNext(s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss && !s2_in.missDbUpdated)
1365
1366  val s3_dly_ld_err =
1367    if (EnableAccurateLoadError) {
1368      io.dcache.resp.bits.error_delayed && GatedValidRegNext(io.csrCtrl.cache_error_enable) && s3_troublem
1369    } else {
1370      WireInit(false.B)
1371    }
1372  io.s3_dly_ld_err := false.B // s3_dly_ld_err && s3_valid
1373  io.lsq.ldin.bits.dcacheRequireReplay  := s3_dcache_rep
1374  io.fast_rep_out.bits.delayedLoadError := s3_dly_ld_err
1375
1376  val s3_vp_match_fail = GatedValidRegNext(io.lsq.forward.matchInvalid || io.sbuffer.matchInvalid) && s3_troublem
1377  val s3_rep_frm_fetch = s3_vp_match_fail
1378  val s3_ldld_rep_inst =
1379      io.lsq.ldld_nuke_query.resp.valid &&
1380      io.lsq.ldld_nuke_query.resp.bits.rep_frm_fetch &&
1381      GatedValidRegNext(io.csrCtrl.ldld_vio_check_enable)
1382  val s3_flushPipe = s3_ldld_rep_inst
1383
1384  val s3_rep_info = WireInit(s3_in.rep_info)
1385  s3_rep_info.dcache_miss   := s3_in.rep_info.dcache_miss && !s3_fwd_frm_d_chan_valid
1386  val s3_sel_rep_cause = PriorityEncoderOH(s3_rep_info.cause.asUInt)
1387
1388  val s3_exception = ExceptionNO.selectByFu(s3_in.uop.exceptionVec, LduCfg).asUInt.orR && s3_vecActive
1389  val s3_mis_align = s3_valid && s3_in.uop.exceptionVec(loadAddrMisaligned) && io.csrCtrl.hd_misalign_ld_enable
1390  when (s3_exception || s3_dly_ld_err || s3_rep_frm_fetch) {
1391    io.lsq.ldin.bits.rep_info.cause := 0.U.asTypeOf(s3_rep_info.cause.cloneType)
1392  } .otherwise {
1393    io.lsq.ldin.bits.rep_info.cause := VecInit(s3_sel_rep_cause.asBools)
1394  }
1395
1396  // Int load, if hit, will be writebacked at s3
1397  s3_out.valid                := s3_valid && !io.lsq.ldin.bits.rep_info.need_rep && !s3_in.mmio
1398  s3_out.bits.uop             := s3_in.uop
1399  s3_out.bits.uop.exceptionVec(loadAccessFault) := (s3_dly_ld_err || s3_in.uop.exceptionVec(loadAccessFault)) && s3_vecActive
1400  s3_out.bits.uop.flushPipe   := false.B
1401  s3_out.bits.uop.replayInst  := s3_rep_frm_fetch || s3_flushPipe
1402  s3_out.bits.data            := s3_in.data
1403  s3_out.bits.debug.isMMIO    := s3_in.mmio
1404  s3_out.bits.debug.isPerfCnt := false.B
1405  s3_out.bits.debug.paddr     := s3_in.paddr
1406  s3_out.bits.debug.vaddr     := s3_in.vaddr
1407
1408  // Vector load, writeback to merge buffer
1409  // TODO: Add assertion in merge buffer, merge buffer must accept vec load writeback
1410  s3_vecout.isvec             := s3_isvec
1411  s3_vecout.vecdata           := 0.U // Data will be assigned later
1412  s3_vecout.mask              := s3_in.mask
1413  // s3_vecout.rob_idx_valid     := s3_in.rob_idx_valid
1414  // s3_vecout.inner_idx         := s3_in.inner_idx
1415  // s3_vecout.rob_idx           := s3_in.rob_idx
1416  // s3_vecout.offset            := s3_in.offset
1417  s3_vecout.reg_offset        := s3_in.reg_offset
1418  s3_vecout.vecActive         := s3_vecActive
1419  s3_vecout.is_first_ele      := s3_in.is_first_ele
1420  // s3_vecout.uopQueuePtr       := DontCare // uopQueuePtr is already saved in flow queue
1421  // s3_vecout.flowPtr           := s3_in.flowPtr
1422  s3_vecout.elemIdx           := s3_in.elemIdx // elemIdx is already saved in flow queue // TODO:
1423  s3_vecout.elemIdxInsideVd   := s3_in.elemIdxInsideVd
1424  val s3_usSecondInv          = s3_in.usSecondInv
1425
1426  io.rollback.valid := s3_valid && (s3_rep_frm_fetch || s3_flushPipe) && !s3_exception
1427  io.rollback.bits             := DontCare
1428  io.rollback.bits.isRVC       := s3_out.bits.uop.preDecodeInfo.isRVC
1429  io.rollback.bits.robIdx      := s3_out.bits.uop.robIdx
1430  io.rollback.bits.ftqIdx      := s3_out.bits.uop.ftqPtr
1431  io.rollback.bits.ftqOffset   := s3_out.bits.uop.ftqOffset
1432  io.rollback.bits.level       := Mux(s3_rep_frm_fetch, RedirectLevel.flush, RedirectLevel.flushAfter)
1433  io.rollback.bits.cfiUpdate.target := s3_out.bits.uop.pc
1434  io.rollback.bits.debug_runahead_checkpoint_id := s3_out.bits.uop.debugInfo.runahead_checkpoint_id
1435  /* <------- DANGEROUS: Don't change sequence here ! -------> */
1436
1437  io.lsq.ldin.bits.uop := s3_out.bits.uop
1438
1439  val s3_revoke = s3_exception || io.lsq.ldin.bits.rep_info.need_rep
1440  io.lsq.ldld_nuke_query.revoke := s3_revoke
1441  io.lsq.stld_nuke_query.revoke := s3_revoke
1442
1443  // feedback slow
1444  s3_fast_rep := GatedValidRegNext(s2_fast_rep)
1445
1446  val s3_fb_no_waiting = !s3_in.isLoadReplay &&
1447                        (!(s3_fast_rep && !s3_fast_rep_canceled)) &&
1448                        !s3_in.feedbacked
1449
1450  // feedback: scalar load will send feedback to RS
1451  //           vector load will send signal to VL Merge Buffer, then send feedback at granularity of uops
1452  io.feedback_slow.valid                 := s3_valid && s3_fb_no_waiting && !s3_isvec && !s3_frm_mabuf
1453  io.feedback_slow.bits.hit              := !s3_rep_info.need_rep || io.lsq.ldin.ready
1454  io.feedback_slow.bits.flushState       := s3_in.ptwBack
1455  io.feedback_slow.bits.robIdx           := s3_in.uop.robIdx
1456  io.feedback_slow.bits.sqIdx            := s3_in.uop.sqIdx
1457  io.feedback_slow.bits.lqIdx            := s3_in.uop.lqIdx
1458  io.feedback_slow.bits.sourceType       := RSFeedbackType.lrqFull
1459  io.feedback_slow.bits.dataInvalidSqIdx := DontCare
1460
1461  io.ldCancel.ld2Cancel := s3_valid && (
1462    io.lsq.ldin.bits.rep_info.need_rep ||                       // exe fail or
1463    s3_in.mmio                         ||                       // is mmio
1464    s3_mis_align                                                // misalign
1465  ) && !s3_isvec && !s3_frm_mabuf
1466
1467  val s3_ld_wb_meta = Mux(s3_valid, s3_out.bits, s3_mmio.bits)
1468
1469  // data from load queue refill
1470  val s3_ld_raw_data_frm_uncache = RegNextN(io.lsq.ld_raw_data, 3)
1471  val s3_merged_data_frm_uncache = s3_ld_raw_data_frm_uncache.mergedData()
1472  val s3_picked_data_frm_uncache = LookupTree(s3_ld_raw_data_frm_uncache.addrOffset, List(
1473    "b000".U -> s3_merged_data_frm_uncache(63,  0),
1474    "b001".U -> s3_merged_data_frm_uncache(63,  8),
1475    "b010".U -> s3_merged_data_frm_uncache(63, 16),
1476    "b011".U -> s3_merged_data_frm_uncache(63, 24),
1477    "b100".U -> s3_merged_data_frm_uncache(63, 32),
1478    "b101".U -> s3_merged_data_frm_uncache(63, 40),
1479    "b110".U -> s3_merged_data_frm_uncache(63, 48),
1480    "b111".U -> s3_merged_data_frm_uncache(63, 56)
1481  ))
1482  val s3_ld_data_frm_uncache = rdataHelper(s3_ld_raw_data_frm_uncache.uop, s3_picked_data_frm_uncache)
1483
1484  // data from dcache hit
1485  val s3_ld_raw_data_frm_cache = Wire(new LoadDataFromDcacheBundle)
1486  s3_ld_raw_data_frm_cache.respDcacheData       := io.dcache.resp.bits.data_delayed
1487  s3_ld_raw_data_frm_cache.forwardMask          := RegEnable(s2_fwd_mask, s2_valid)
1488  s3_ld_raw_data_frm_cache.forwardData          := RegEnable(s2_fwd_data, s2_valid)
1489  s3_ld_raw_data_frm_cache.uop                  := RegEnable(s2_out.uop, s2_valid)
1490  s3_ld_raw_data_frm_cache.addrOffset           := RegEnable(s2_out.paddr(3, 0), s2_valid)
1491  s3_ld_raw_data_frm_cache.forward_D            := RegEnable(s2_fwd_frm_d_chan, false.B, s2_valid) || s3_fwd_frm_d_chan_valid
1492  s3_ld_raw_data_frm_cache.forwardData_D        := Mux(s3_fwd_frm_d_chan_valid, s3_fwd_data_frm_d_chan, RegEnable(s2_fwd_data_frm_d_chan, s2_valid))
1493  s3_ld_raw_data_frm_cache.forward_mshr         := RegEnable(s2_fwd_frm_mshr, false.B, s2_valid)
1494  s3_ld_raw_data_frm_cache.forwardData_mshr     := RegEnable(s2_fwd_data_frm_mshr, s2_valid)
1495  s3_ld_raw_data_frm_cache.forward_result_valid := RegEnable(s2_fwd_data_valid, false.B, s2_valid)
1496
1497  val s3_merged_data_frm_cache = s3_ld_raw_data_frm_cache.mergedData()
1498  val s3_data_frm_cache = Seq(
1499    s3_merged_data_frm_cache(63,    0),
1500    s3_merged_data_frm_cache(63,    8),
1501    s3_merged_data_frm_cache(63,   16),
1502    s3_merged_data_frm_cache(63,   24),
1503    s3_merged_data_frm_cache(63,   32),
1504    s3_merged_data_frm_cache(63,   40),
1505    s3_merged_data_frm_cache(63,   48),
1506    s3_merged_data_frm_cache(63,   56),
1507    s3_merged_data_frm_cache(127,  64),
1508    s3_merged_data_frm_cache(127,  72),
1509    s3_merged_data_frm_cache(127,  80),
1510    s3_merged_data_frm_cache(127,  88),
1511    s3_merged_data_frm_cache(127,  96),
1512    s3_merged_data_frm_cache(127, 104),
1513    s3_merged_data_frm_cache(127, 112),
1514    s3_merged_data_frm_cache(127, 120)
1515  )
1516  val s3_picked_data_frm_cache = Mux1H(s3_data_select_by_offset, s3_data_frm_cache)
1517  val s3_ld_data_frm_cache = newRdataHelper(s3_data_select, s3_picked_data_frm_cache)
1518
1519  // FIXME: add 1 cycle delay ?
1520  // io.lsq.uncache.ready := !s3_valid
1521  val s3_outexception = ExceptionNO.selectByFu(s3_out.bits.uop.exceptionVec, LduCfg).asUInt.orR && s3_vecActive
1522  io.ldout.bits        := s3_ld_wb_meta
1523  io.ldout.bits.data   := Mux(s3_valid, s3_ld_data_frm_cache, s3_ld_data_frm_uncache)
1524  io.ldout.valid       := ((s3_out.valid && !s3_vecout.isvec && !s3_mis_align && !s3_frm_mabuf) ||
1525                           (s3_mmio.valid && !s3_valid))
1526  io.ldout.bits.uop.exceptionVec := ExceptionNO.selectByFu(s3_ld_wb_meta.uop.exceptionVec, LduCfg)
1527
1528  // TODO: check this --hx
1529  // io.ldout.valid       := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) && !s3_vecout.isvec ||
1530  //   io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && !io.lsq.uncache.bits.isVls
1531  //  io.ldout.bits.data   := Mux(s3_out.valid, s3_ld_data_frm_cache, s3_ld_data_frm_uncache)
1532  //  io.ldout.valid       := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) ||
1533  //                         s3_mmio.valid && !s3_mmio.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid
1534
1535  // s3 load fast replay
1536  io.fast_rep_out.valid := s3_valid && s3_fast_rep
1537  io.fast_rep_out.bits := s3_in
1538  io.fast_rep_out.bits.lateKill := s3_rep_frm_fetch
1539
1540  val vecFeedback = s3_valid && s3_fb_no_waiting && s3_rep_info.need_rep && !io.lsq.ldin.ready && s3_isvec
1541
1542  // vector output
1543  io.vecldout.bits.alignedType := s3_vec_alignedType
1544  // vec feedback
1545  io.vecldout.bits.vecFeedback := vecFeedback
1546  // TODO: VLSU, uncache data logic
1547  val vecdata = rdataVecHelper(s3_vec_alignedType(1,0), s3_picked_data_frm_cache)
1548  io.vecldout.bits.vecdata.get := Mux(s3_in.is128bit, s3_merged_data_frm_cache, vecdata)
1549  io.vecldout.bits.isvec := s3_vecout.isvec
1550  io.vecldout.bits.elemIdx := s3_vecout.elemIdx
1551  io.vecldout.bits.elemIdxInsideVd.get := s3_vecout.elemIdxInsideVd
1552  io.vecldout.bits.mask := s3_vecout.mask
1553  io.vecldout.bits.reg_offset.get := s3_vecout.reg_offset
1554  io.vecldout.bits.usSecondInv := s3_usSecondInv
1555  io.vecldout.bits.mBIndex := s3_vec_mBIndex
1556  io.vecldout.bits.hit := !s3_rep_info.need_rep || io.lsq.ldin.ready
1557  io.vecldout.bits.sourceType := RSFeedbackType.lrqFull
1558  io.vecldout.bits.flushState := DontCare
1559  io.vecldout.bits.exceptionVec := ExceptionNO.selectByFu(s3_out.bits.uop.exceptionVec, VlduCfg)
1560  io.vecldout.bits.vaddr := s3_in.vaddr
1561  io.vecldout.bits.mmio := DontCare
1562
1563  io.vecldout.valid := s3_out.valid && !s3_out.bits.uop.robIdx.needFlush(io.redirect) && s3_vecout.isvec ||
1564  // TODO: check this, why !io.lsq.uncache.bits.isVls before?
1565    io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && io.lsq.uncache.bits.isVls
1566    //io.lsq.uncache.valid && !io.lsq.uncache.bits.uop.robIdx.needFlush(io.redirect) && !s3_out.valid && !io.lsq.uncache.bits.isVls
1567
1568  io.misalign_ldout.valid     := s3_valid && (!s3_fast_rep || s3_fast_rep_canceled) && s3_frm_mabuf
1569  io.misalign_ldout.bits      := io.lsq.ldin.bits
1570  io.misalign_ldout.bits.data := Mux(s3_in.is128bit, s3_merged_data_frm_cache, s3_picked_data_frm_cache)
1571
1572  // fast load to load forward
1573  if (EnableLoadToLoadForward) {
1574    io.l2l_fwd_out.valid      := s3_valid && !s3_in.mmio && !s3_rep_info.need_rep
1575    io.l2l_fwd_out.data       := Mux(s3_in.vaddr(3), s3_merged_data_frm_cache(127, 64), s3_merged_data_frm_cache(63, 0))
1576    io.l2l_fwd_out.dly_ld_err := s3_dly_ld_err || // ecc delayed error
1577                                 s3_ldld_rep_inst ||
1578                                 s3_rep_frm_fetch
1579  } else {
1580    io.l2l_fwd_out.valid := false.B
1581    io.l2l_fwd_out.data := DontCare
1582    io.l2l_fwd_out.dly_ld_err := DontCare
1583  }
1584
1585   // trigger
1586  val last_valid_data = RegNext(RegEnable(io.ldout.bits.data, io.ldout.fire))
1587  val hit_ld_addr_trig_hit_vec = Wire(Vec(TriggerNum, Bool()))
1588  val lq_ld_addr_trig_hit_vec = io.lsq.trigger.lqLoadAddrTriggerHitVec
1589  (0 until TriggerNum).map{i => {
1590    val tdata2    = GatedRegNext(io.trigger(i).tdata2)
1591    val matchType = RegNext(io.trigger(i).matchType)
1592    val tEnable   = RegNext(io.trigger(i).tEnable)
1593
1594    hit_ld_addr_trig_hit_vec(i) := TriggerCmp(RegEnable(s2_out.vaddr, 0.U, s2_valid), tdata2, matchType, tEnable)
1595    io.trigger(i).addrHit       := Mux(s3_out.valid, hit_ld_addr_trig_hit_vec(i), lq_ld_addr_trig_hit_vec(i))
1596  }}
1597  io.lsq.trigger.hitLoadAddrTriggerHitVec := hit_ld_addr_trig_hit_vec
1598
1599  // s1
1600  io.debug_ls.s1_robIdx := s1_in.uop.robIdx.value
1601  io.debug_ls.s1_isLoadToLoadForward := s1_fire && s1_try_ptr_chasing && !s1_ptr_chasing_canceled
1602  io.debug_ls.s1_isTlbFirstMiss := s1_fire && s1_tlb_miss && s1_in.isFirstIssue
1603  // s2
1604  io.debug_ls.s2_robIdx := s2_in.uop.robIdx.value
1605  io.debug_ls.s2_isBankConflict := s2_fire && (!s2_kill && s2_bank_conflict)
1606  io.debug_ls.s2_isDcacheFirstMiss := s2_fire && io.dcache.resp.bits.miss && s2_in.isFirstIssue
1607  io.debug_ls.s2_isForwardFail := s2_fire && s2_fwd_fail
1608  // s3
1609  io.debug_ls.s3_robIdx := s3_in.uop.robIdx.value
1610  io.debug_ls.s3_isReplayFast := s3_valid && s3_fast_rep && !s3_fast_rep_canceled
1611  io.debug_ls.s3_isReplayRS :=  RegNext(io.feedback_fast.valid && !io.feedback_fast.bits.hit) || (io.feedback_slow.valid && !io.feedback_slow.bits.hit)
1612  io.debug_ls.s3_isReplaySlow := io.lsq.ldin.valid && io.lsq.ldin.bits.rep_info.need_rep
1613  io.debug_ls.s3_isReplay := s3_valid && s3_rep_info.need_rep // include fast+slow+rs replay
1614  io.debug_ls.replayCause := s3_rep_info.cause
1615  io.debug_ls.replayCnt := 1.U
1616
1617  // Topdown
1618  io.lsTopdownInfo.s1.robIdx          := s1_in.uop.robIdx.value
1619  io.lsTopdownInfo.s1.vaddr_valid     := s1_valid && s1_in.hasROBEntry
1620  io.lsTopdownInfo.s1.vaddr_bits      := s1_vaddr
1621  io.lsTopdownInfo.s2.robIdx          := s2_in.uop.robIdx.value
1622  io.lsTopdownInfo.s2.paddr_valid     := s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss
1623  io.lsTopdownInfo.s2.paddr_bits      := s2_in.paddr
1624  io.lsTopdownInfo.s2.first_real_miss := io.dcache.resp.bits.real_miss
1625  io.lsTopdownInfo.s2.cache_miss_en   := s2_fire && s2_in.hasROBEntry && !s2_in.tlbMiss && !s2_in.missDbUpdated
1626
1627  // perf cnt
1628  XSPerfAccumulate("s0_in_valid",                  io.ldin.valid)
1629  XSPerfAccumulate("s0_in_block",                  io.ldin.valid && !io.ldin.fire)
1630  XSPerfAccumulate("s0_vecin_valid",               io.vecldin.valid)
1631  XSPerfAccumulate("s0_vecin_block",               io.vecldin.valid && !io.vecldin.fire)
1632  XSPerfAccumulate("s0_in_fire_first_issue",       s0_valid && s0_sel_src.isFirstIssue)
1633  XSPerfAccumulate("s0_lsq_replay_issue",          io.replay.fire)
1634  XSPerfAccumulate("s0_lsq_replay_vecissue",       io.replay.fire && io.replay.bits.isvec)
1635  XSPerfAccumulate("s0_ldu_fire_first_issue",      io.ldin.fire && s0_sel_src.isFirstIssue)
1636  XSPerfAccumulate("s0_fast_replay_issue",         io.fast_rep_in.fire)
1637  XSPerfAccumulate("s0_fast_replay_vecissue",      io.fast_rep_in.fire && io.fast_rep_in.bits.isvec)
1638  XSPerfAccumulate("s0_stall_out",                 s0_valid && !s0_can_go)
1639  XSPerfAccumulate("s0_stall_dcache",              s0_valid && !io.dcache.req.ready)
1640  XSPerfAccumulate("s0_addr_spec_success",         s0_fire && s0_dcache_vaddr(VAddrBits-1, 12) === io.ldin.bits.src(0)(VAddrBits-1, 12))
1641  XSPerfAccumulate("s0_addr_spec_failed",          s0_fire && s0_dcache_vaddr(VAddrBits-1, 12) =/= io.ldin.bits.src(0)(VAddrBits-1, 12))
1642  XSPerfAccumulate("s0_addr_spec_success_once",    s0_fire && s0_dcache_vaddr(VAddrBits-1, 12) === io.ldin.bits.src(0)(VAddrBits-1, 12) && s0_sel_src.isFirstIssue)
1643  XSPerfAccumulate("s0_addr_spec_failed_once",     s0_fire && s0_dcache_vaddr(VAddrBits-1, 12) =/= io.ldin.bits.src(0)(VAddrBits-1, 12) && s0_sel_src.isFirstIssue)
1644  XSPerfAccumulate("s0_vec_addr_vlen_aligned",     s0_fire && s0_sel_src.isvec && s0_dcache_vaddr(3, 0) === 0.U)
1645  XSPerfAccumulate("s0_vec_addr_vlen_unaligned",   s0_fire && s0_sel_src.isvec && s0_dcache_vaddr(3, 0) =/= 0.U)
1646  XSPerfAccumulate("s0_forward_tl_d_channel",      s0_out.forward_tlDchannel)
1647  XSPerfAccumulate("s0_hardware_prefetch_fire",    s0_fire && s0_hw_prf_select)
1648  XSPerfAccumulate("s0_software_prefetch_fire",    s0_fire && s0_sel_src.prf && s0_int_iss_select)
1649  XSPerfAccumulate("s0_hardware_prefetch_blocked", io.prefetch_req.valid && !s0_hw_prf_select)
1650  XSPerfAccumulate("s0_hardware_prefetch_total",   io.prefetch_req.valid)
1651
1652  XSPerfAccumulate("s1_in_valid",                  s1_valid)
1653  XSPerfAccumulate("s1_in_fire",                   s1_fire)
1654  XSPerfAccumulate("s1_in_fire_first_issue",       s1_fire && s1_in.isFirstIssue)
1655  XSPerfAccumulate("s1_tlb_miss",                  s1_fire && s1_tlb_miss)
1656  XSPerfAccumulate("s1_tlb_miss_first_issue",      s1_fire && s1_tlb_miss && s1_in.isFirstIssue)
1657  XSPerfAccumulate("s1_stall_out",                 s1_valid && !s1_can_go)
1658  XSPerfAccumulate("s1_dly_err",                   s1_valid && s1_fast_rep_dly_err)
1659
1660  XSPerfAccumulate("s2_in_valid",                  s2_valid)
1661  XSPerfAccumulate("s2_in_fire",                   s2_fire)
1662  XSPerfAccumulate("s2_in_fire_first_issue",       s2_fire && s2_in.isFirstIssue)
1663  XSPerfAccumulate("s2_dcache_miss",               s2_fire && io.dcache.resp.bits.miss)
1664  XSPerfAccumulate("s2_dcache_miss_first_issue",   s2_fire && io.dcache.resp.bits.miss && s2_in.isFirstIssue)
1665  XSPerfAccumulate("s2_dcache_real_miss_first_issue",   s2_fire && io.dcache.resp.bits.miss && s2_in.isFirstIssue)
1666  XSPerfAccumulate("s2_full_forward",              s2_fire && s2_full_fwd)
1667  XSPerfAccumulate("s2_dcache_miss_full_forward",  s2_fire && s2_dcache_miss)
1668  XSPerfAccumulate("s2_fwd_frm_d_can",             s2_valid && s2_fwd_frm_d_chan)
1669  XSPerfAccumulate("s2_fwd_frm_d_chan_or_mshr",    s2_valid && s2_fwd_frm_d_chan_or_mshr)
1670  XSPerfAccumulate("s2_stall_out",                 s2_fire && !s2_can_go)
1671  XSPerfAccumulate("s2_prefetch",                  s2_fire && s2_prf)
1672  XSPerfAccumulate("s2_prefetch_ignored",          s2_fire && s2_prf && io.dcache.s2_mq_nack) // ignore prefetch for mshr full / miss req port conflict
1673  XSPerfAccumulate("s2_prefetch_miss",             s2_fire && s2_prf && io.dcache.resp.bits.miss) // prefetch req miss in l1
1674  XSPerfAccumulate("s2_prefetch_hit",              s2_fire && s2_prf && !io.dcache.resp.bits.miss) // prefetch req hit in l1
1675  XSPerfAccumulate("s2_prefetch_accept",           s2_fire && s2_prf && io.dcache.resp.bits.miss && !io.dcache.s2_mq_nack) // prefetch a missed line in l1, and l1 accepted it
1676  XSPerfAccumulate("s2_forward_req",               s2_fire && s2_in.forward_tlDchannel)
1677  XSPerfAccumulate("s2_successfully_forward_channel_D", s2_fire && s2_fwd_frm_d_chan && s2_fwd_data_valid)
1678  XSPerfAccumulate("s2_successfully_forward_mshr",      s2_fire && s2_fwd_frm_mshr && s2_fwd_data_valid)
1679
1680  XSPerfAccumulate("s3_fwd_frm_d_chan",            s3_valid && s3_fwd_frm_d_chan_valid)
1681  XSPerfAccumulate("s3_frm_mabuf",                 s3_valid && s3_frm_mabuf)
1682
1683  XSPerfAccumulate("load_to_load_forward",                      s1_try_ptr_chasing && !s1_ptr_chasing_canceled)
1684  XSPerfAccumulate("load_to_load_forward_try",                  s1_try_ptr_chasing)
1685  XSPerfAccumulate("load_to_load_forward_fail",                 s1_cancel_ptr_chasing)
1686  XSPerfAccumulate("load_to_load_forward_fail_cancelled",       s1_cancel_ptr_chasing && s1_ptr_chasing_canceled)
1687  XSPerfAccumulate("load_to_load_forward_fail_wakeup_mismatch", s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && s1_not_fast_match)
1688  XSPerfAccumulate("load_to_load_forward_fail_op_not_ld",       s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && s1_fu_op_type_not_ld)
1689  XSPerfAccumulate("load_to_load_forward_fail_addr_align",      s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && !s1_fu_op_type_not_ld && s1_addr_misaligned)
1690  XSPerfAccumulate("load_to_load_forward_fail_set_mismatch",    s1_cancel_ptr_chasing && !s1_ptr_chasing_canceled && !s1_not_fast_match && !s1_fu_op_type_not_ld && !s1_addr_misaligned && s1_addr_mismatch)
1691
1692  // bug lyq: some signals in perfEvents are no longer suitable for the current MemBlock design
1693  // hardware performance counter
1694  val perfEvents = Seq(
1695    ("load_s0_in_fire         ", s0_fire                                                        ),
1696    ("load_to_load_forward    ", s1_fire && s1_try_ptr_chasing && !s1_ptr_chasing_canceled      ),
1697    ("stall_dcache            ", s0_valid && s0_can_go && !io.dcache.req.ready                  ),
1698    ("load_s1_in_fire         ", s0_fire                                                        ),
1699    ("load_s1_tlb_miss        ", s1_fire && io.tlb.resp.bits.miss                               ),
1700    ("load_s2_in_fire         ", s1_fire                                                        ),
1701    ("load_s2_dcache_miss     ", s2_fire && io.dcache.resp.bits.miss                            ),
1702  )
1703  generatePerfEvent()
1704
1705  when(io.ldout.fire){
1706    XSDebug("ldout %x\n", io.ldout.bits.uop.pc)
1707  }
1708  // end
1709}