xref: /XiangShan/src/main/scala/xiangshan/frontend/icache/ICacheMainPipe.scala (revision 64129915cef747be1a9c0343a655fec16a4fdece)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend.icache
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import difftest._
23import freechips.rocketchip.tilelink.ClientStates
24import xiangshan._
25import xiangshan.cache.mmu._
26import utils._
27import utility._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29import xiangshan.frontend.{FtqICacheInfo, FtqToICacheRequestBundle}
30
31class ICacheMainPipeReq(implicit p: Parameters) extends ICacheBundle
32{
33  val vaddr  = UInt(VAddrBits.W)
34  def vsetIdx = get_idx(vaddr)
35}
36
37class ICacheMainPipeResp(implicit p: Parameters) extends ICacheBundle
38{
39  val vaddr    = UInt(VAddrBits.W)
40  val registerData = UInt(blockBits.W)
41  val sramData = UInt(blockBits.W)
42  val select   = Bool()
43  val paddr    = UInt(PAddrBits.W)
44  val tlbExcp  = new Bundle{
45    val pageFault = Bool()
46    val accessFault = Bool()
47    val mmio = Bool()
48  }
49}
50
51class ICacheMainPipeBundle(implicit p: Parameters) extends ICacheBundle
52{
53  val req  = Flipped(Decoupled(new FtqToICacheRequestBundle))
54  val resp = Vec(PortNumber, ValidIO(new ICacheMainPipeResp))
55  val topdownIcacheMiss = Output(Bool())
56  val topdownItlbMiss = Output(Bool())
57}
58
59class ICacheMetaReqBundle(implicit p: Parameters) extends ICacheBundle{
60  val toIMeta       = DecoupledIO(new ICacheReadBundle)
61  val fromIMeta     = Input(new ICacheMetaRespBundle)
62}
63
64class ICacheDataReqBundle(implicit p: Parameters) extends ICacheBundle{
65  val toIData       = DecoupledIO(Vec(partWayNum, new ICacheReadBundle))
66  val fromIData     = Input(new ICacheDataRespBundle)
67}
68
69class ICacheMSHRBundle(implicit p: Parameters) extends ICacheBundle{
70  val toMSHR        = Decoupled(new ICacheMissReq)
71  val fromMSHR      = Flipped(ValidIO(new ICacheMissResp))
72}
73
74class ICachePMPBundle(implicit p: Parameters) extends ICacheBundle{
75  val req  = Valid(new PMPReqBundle())
76  val resp = Input(new PMPRespBundle())
77}
78
79class ICachePerfInfo(implicit p: Parameters) extends ICacheBundle{
80  val only_0_hit     = Bool()
81  val only_0_miss    = Bool()
82  val hit_0_hit_1    = Bool()
83  val hit_0_miss_1   = Bool()
84  val miss_0_hit_1   = Bool()
85  val miss_0_miss_1  = Bool()
86  val hit_0_except_1 = Bool()
87  val miss_0_except_1 = Bool()
88  val except_0       = Bool()
89  val bank_hit       = Vec(2,Bool())
90  val hit            = Bool()
91}
92
93class ICacheMainPipeInterface(implicit p: Parameters) extends ICacheBundle {
94  val hartId = Input(UInt(8.W))
95  /*** internal interface ***/
96  val metaArray   = new ICacheMetaReqBundle
97  val dataArray   = new ICacheDataReqBundle
98  /** prefetch io */
99  val IPFBufferRead = Flipped(new IPFBufferRead)
100  val PIQRead       = Flipped(new PIQRead)
101
102  val IPFReplacer         = Flipped(new IPFReplacer)
103  val ICacheMainPipeInfo  = new ICacheMainPipeInfo
104
105  val mshr        = Vec(PortNumber, new ICacheMSHRBundle)
106  val errors      = Output(Vec(PortNumber, new L1CacheErrorInfo))
107  /*** outside interface ***/
108  //val fetch       = Vec(PortNumber, new ICacheMainPipeBundle)
109  /* when ftq.valid is high in T + 1 cycle
110   * the ftq component must be valid in T cycle
111   */
112  val fetch       = new ICacheMainPipeBundle
113  val pmp         = Vec(PortNumber, new ICachePMPBundle)
114  val itlb        = Vec(PortNumber, new TlbRequestIO)
115  val respStall   = Input(Bool())
116  val perfInfo = Output(new ICachePerfInfo)
117
118  val csr_parity_enable = Input(Bool())
119}
120
121class ICacheDB(implicit p: Parameters) extends ICacheBundle {
122  val blk_vaddr   = UInt((VAddrBits - blockOffBits).W)
123  val blk_paddr   = UInt((PAddrBits - blockOffBits).W)
124  val hit         = Bool()
125}
126
127class ICacheMainPipe(implicit p: Parameters) extends ICacheModule
128{
129  val io = IO(new ICacheMainPipeInterface)
130
131  /** Input/Output port */
132  val (fromFtq, toIFU)    = (io.fetch.req,          io.fetch.resp)
133  val (toMeta, metaResp)  = (io.metaArray.toIMeta,  io.metaArray.fromIMeta)
134  val (toData, dataResp)  = (io.dataArray.toIData,  io.dataArray.fromIData)
135  val (toIPF,  fromIPF)   = (io.IPFBufferRead.req,  io.IPFBufferRead.resp)
136  val (toPIQ,  fromPIQ)   = (io.PIQRead.req,        io.PIQRead.resp)
137  val (toMSHR, fromMSHR)  = (io.mshr.map(_.toMSHR), io.mshr.map(_.fromMSHR))
138  val (toITLB, fromITLB)  = (io.itlb.map(_.req),    io.itlb.map(_.resp))
139  val (toPMP,  fromPMP)   = (io.pmp.map(_.req),     io.pmp.map(_.resp))
140
141  val IPFReplacer         = io.IPFReplacer
142  val toIPrefetch         = io.ICacheMainPipeInfo
143
144
145  // Statistics on the frequency distribution of FTQ fire interval
146  val cntFtqFireInterval = RegInit(0.U(32.W))
147  cntFtqFireInterval := Mux(fromFtq.fire, 1.U, cntFtqFireInterval + 1.U)
148  XSPerfHistogram("ftq2icache_fire_" + p(XSCoreParamsKey).HartId.toString,
149                  cntFtqFireInterval, fromFtq.fire,
150                  1, 300, 1, right_strict = true)
151
152  // Ftq RegNext Register
153  val fromFtqReq = fromFtq.bits.pcMemRead
154
155  /** pipeline control signal */
156  val s1_ready, s2_ready = Wire(Bool())
157  val s0_fire,  s1_fire , s2_fire  = Wire(Bool())
158
159  val missSwitchBit = RegInit(false.B)
160
161  /** replacement status register */
162  val touch_sets = Seq.fill(2)(Wire(Vec(2, UInt(log2Ceil(nSets/2).W))))
163  val touch_ways = Seq.fill(2)(Wire(Vec(2, Valid(UInt(log2Ceil(nWays).W)))) )
164
165  /**
166    ******************************************************************************
167    * ICache Stage 0
168    * - send req to ITLB and wait for tlb miss fixing
169    * - send req to Meta/Data SRAM
170    ******************************************************************************
171    */
172
173  /** s0 control */
174  val s0_valid       = fromFtq.valid
175  val s0_req_vaddr   = (0 until partWayNum + 1).map(i => VecInit(Seq(fromFtqReq(i).startAddr, fromFtqReq(i).nextlineStart)))
176  val s0_req_vsetIdx = (0 until partWayNum + 1).map(i => VecInit(s0_req_vaddr(i).map(get_idx(_))))
177  val s0_only_first  = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && !fromFtqReq(i).crossCacheline)
178  val s0_double_line = (0 until partWayNum + 1).map(i => fromFtq.bits.readValid(i) && fromFtqReq(i).crossCacheline)
179
180  val s0_final_valid        = s0_valid
181  val s0_final_vaddr        = s0_req_vaddr.head
182  val s0_final_vsetIdx      = s0_req_vsetIdx.head
183  val s0_final_only_first   = s0_only_first.head
184  val s0_final_double_line  = s0_double_line.head
185
186  /** SRAM request */
187  //0 -> metaread, 1,2,3 -> data, 3 -> code 4 -> itlb
188  // TODO: it seems like 0,1,2,3 -> dataArray(data); 3 -> dataArray(code); 0 -> metaArray; 4 -> itlb
189  val ftq_req_to_data_doubleline  = s0_double_line.init
190  val ftq_req_to_data_vset_idx    = s0_req_vsetIdx.init
191  val ftq_req_to_data_valid       = fromFtq.bits.readValid.init
192
193  val ftq_req_to_meta_doubleline  = s0_double_line.head
194  val ftq_req_to_meta_vset_idx    = s0_req_vsetIdx.head
195
196  val ftq_req_to_itlb_only_first  = s0_only_first.last
197  val ftq_req_to_itlb_doubleline  = s0_double_line.last
198  val ftq_req_to_itlb_vaddr       = s0_req_vaddr.last
199  val ftq_req_to_itlb_vset_idx    = s0_req_vsetIdx.last
200
201  /** Data request */
202  for(i <- 0 until partWayNum) {
203    toData.valid                  := ftq_req_to_data_valid(i) && !missSwitchBit
204    toData.bits(i).isDoubleLine   := ftq_req_to_data_doubleline(i)
205    toData.bits(i).vSetIdx        := ftq_req_to_data_vset_idx(i)
206  }
207
208  /** Meta request */
209  toMeta.valid               := s0_valid && !missSwitchBit
210  toMeta.bits.isDoubleLine   := ftq_req_to_meta_doubleline
211  toMeta.bits.vSetIdx        := ftq_req_to_meta_vset_idx
212
213  val toITLB_s0_valid    = VecInit(Seq(s0_valid, s0_valid && ftq_req_to_itlb_doubleline))
214  val toITLB_s0_size     = VecInit(Seq(3.U, 3.U)) // TODO: fix the size
215  val toITLB_s0_vaddr    = ftq_req_to_itlb_vaddr
216  val toITLB_s0_debug_pc = ftq_req_to_itlb_vaddr
217
218  val itlb_can_go    = toITLB(0).ready && toITLB(1).ready
219  val icache_can_go  = toData.ready && toMeta.ready
220  val pipe_can_go    = !missSwitchBit && s1_ready
221  val s0_can_go      = itlb_can_go && icache_can_go && pipe_can_go
222  s0_fire  := s0_valid && s0_can_go
223
224  //TODO: fix GTimer() condition
225  fromFtq.ready := s0_can_go
226
227  /**
228    ******************************************************************************
229    * ICache Stage 1
230    * - get tlb resp data (exceptiong info and physical addresses)
231    * - get Meta/Data SRAM read responses (latched for pipeline stop)
232    * - tag compare/hit check
233    * - check ipf and piq
234    ******************************************************************************
235    */
236
237  /** s1 control */
238  val s1_valid = generatePipeControl(lastFire = s0_fire, thisFire = s1_fire, thisFlush = false.B, lastFlush = false.B)
239
240  val s1_req_vaddr   = RegEnable(s0_final_vaddr, s0_fire)
241  val s1_req_vsetIdx = RegEnable(s0_final_vsetIdx, s0_fire)
242  val s1_only_first  = RegEnable(s0_final_only_first, s0_fire)
243  val s1_double_line = RegEnable(s0_final_double_line, s0_fire)
244
245  /** tlb request and response */
246  fromITLB.foreach(_.ready := true.B)
247  val s1_wait_itlb  = RegInit(VecInit(Seq.fill(PortNumber)(false.B)))
248
249  (0 until PortNumber).foreach { i =>
250    when(RegNext(s0_fire) && fromITLB(i).bits.miss) {
251      s1_wait_itlb(i) := true.B
252    }.elsewhen(s1_wait_itlb(i) && !fromITLB(i).bits.miss) {
253      s1_wait_itlb(i) := false.B
254    }
255  }
256
257  val s1_need_itlb = Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && fromITLB(0).bits.miss,
258                             (RegNext(s0_fire) || s1_wait_itlb(1)) && fromITLB(1).bits.miss && s1_double_line)
259  val toITLB_s1_valid    = s1_need_itlb
260  val toITLB_s1_size     = VecInit(Seq(3.U, 3.U)) // TODO: fix the size
261  val toITLB_s1_vaddr    = s1_req_vaddr
262  val toITLB_s1_debug_pc = s1_req_vaddr
263
264  // chose tlb req between s0 and s1
265  for (i <- 0 until PortNumber) {
266    toITLB(i).valid         := Mux(s1_need_itlb(i), toITLB_s1_valid(i), toITLB_s0_valid(i))
267    toITLB(i).bits.size     := Mux(s1_need_itlb(i), toITLB_s1_size(i), toITLB_s0_size(i))
268    toITLB(i).bits.vaddr    := Mux(s1_need_itlb(i), toITLB_s1_vaddr(i), toITLB_s0_vaddr(i))
269    toITLB(i).bits.debug.pc := Mux(s1_need_itlb(i), toITLB_s1_debug_pc(i), toITLB_s0_debug_pc(i))
270  }
271  toITLB.map{port =>
272    port.bits.cmd                 := TlbCmd.exec
273    port.bits.memidx              := DontCare
274    port.bits.debug.robIdx        := DontCare
275    port.bits.no_translate        := false.B
276    port.bits.debug.isFirstIssue  := DontCare
277    port.bits.kill                := DontCare
278  }
279  io.itlb.foreach(_.req_kill := false.B)
280
281  /** tlb response latch for pipeline stop */
282  // val tlb_valid_tmp = VecInit((0 until PortNumber).map(i =>
283  //                       (RegNext(s0_fire) || s1_wait_itlb(i)) && !fromITLB(i).bits.miss))
284  val tlb_valid_tmp = VecInit(Seq((RegNext(s0_fire) || s1_wait_itlb(0)) && !fromITLB(0).bits.miss,
285                                  (RegNext(s0_fire) || s1_wait_itlb(1)) && !fromITLB(1).bits.miss && s1_double_line))
286  val tlbRespPAddr  = VecInit((0 until PortNumber).map(i =>
287                        ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.paddr(0))))
288  val tlbExcpPF     = VecInit((0 until PortNumber).map(i =>
289                        ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.excp(0).pf.instr)))
290  val tlbExcpAF     = VecInit((0 until PortNumber).map(i =>
291                        ResultHoldBypass(valid = tlb_valid_tmp(i), data = fromITLB(i).bits.excp(0).af.instr)))
292  val tlbExcp       = VecInit((0 until PortNumber).map(i => tlbExcpAF(i) || tlbExcpPF(i)))
293
294  val s1_tlb_valid = VecInit((0 until PortNumber).map(i => ValidHoldBypass(tlb_valid_tmp(i), s1_fire)))
295  val tlbRespAllValid = s1_tlb_valid(0) && (!s1_double_line || s1_double_line && s1_tlb_valid(1))
296
297
298  def numOfStage = 3
299  val itlbMissStage = RegInit(VecInit(Seq.fill(numOfStage - 1)(0.B)))
300  itlbMissStage(0) := !tlbRespAllValid
301  for (i <- 1 until numOfStage - 1) {
302    itlbMissStage(i) := itlbMissStage(i - 1)
303  }
304
305  /** s1 hit check/tag compare */
306  val s1_req_paddr              = tlbRespPAddr
307  val s1_req_ptags              = VecInit(s1_req_paddr.map(get_phy_tag(_)))
308
309  val s1_meta_ptags              = ResultHoldBypass(data = metaResp.tags, valid = RegNext(s0_fire))
310  val s1_meta_valids             = ResultHoldBypass(data = metaResp.entryValid, valid = RegNext(s0_fire))
311  val s1_meta_errors             = ResultHoldBypass(data = metaResp.errors, valid = RegNext(s0_fire))
312
313  val s1_data_cacheline          = ResultHoldBypass(data = dataResp.datas, valid = RegNext(s0_fire))
314  val s1_data_errorBits          = ResultHoldBypass(data = dataResp.codes, valid = RegNext(s0_fire))
315
316  val s1_tag_eq_vec        = VecInit((0 until PortNumber).map( p => VecInit((0 until nWays).map( w =>  s1_meta_ptags(p)(w) ===  s1_req_ptags(p) ))))
317  val s1_tag_match_vec     = VecInit((0 until PortNumber).map( k => VecInit(s1_tag_eq_vec(k).zipWithIndex.map{ case(way_tag_eq, w) => way_tag_eq && s1_meta_valids(k)(w) /*s1_meta_cohs(k)(w).isValid()*/})))
318  val s1_tag_match         = VecInit(s1_tag_match_vec.map(vector => ParallelOR(vector)))
319
320  val s1_port_hit          = VecInit(Seq(s1_tag_match(0) && s1_valid  && !tlbExcp(0),  s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) ))
321  val s1_bank_miss         = VecInit(Seq(!s1_tag_match(0) && s1_valid && !tlbExcp(0), !s1_tag_match(1) && s1_valid && s1_double_line && !tlbExcp(1) ))
322  val s1_hit               = (s1_port_hit(0) && s1_port_hit(1)) || (!s1_double_line && s1_port_hit(0))
323
324  /** choose victim cacheline */
325  val replacers       = Seq.fill(PortNumber)(ReplacementPolicy.fromString(cacheParams.replacer,nWays,nSets/PortNumber))
326  val s1_victim_oh    = ResultHoldBypass(data = VecInit(replacers.zipWithIndex.map{case (replacer, i) => UIntToOH(replacer.way(s1_req_vsetIdx(i)(highestIdxBit, 1)))}), valid = RegNext(s0_fire))
327
328
329  // when(s1_fire){
330  //   // when (!(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line))) {
331  //   //   printf("Multiple hit in main pipe\n")
332  //   // }
333  //   assert(PopCount(s1_tag_match_vec(0)) <= 1.U && (PopCount(s1_tag_match_vec(1)) <= 1.U || !s1_double_line),
334  //     "Multiple hit in main pipe, port0:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x port1:is=%d,ptag=0x%x,vidx=0x%x,vaddr=0x%x ",
335  //     PopCount(s1_tag_match_vec(0)) > 1.U,s1_req_ptags(0), get_idx(s1_req_vaddr(0)), s1_req_vaddr(0),
336  //     PopCount(s1_tag_match_vec(1)) > 1.U && s1_double_line, s1_req_ptags(1), get_idx(s1_req_vaddr(1)), s1_req_vaddr(1))
337  // }
338
339  ((replacers zip touch_sets) zip touch_ways).map{case ((r, s),w) => r.access(s,w)}
340  IPFReplacer.waymask := UIntToOH(replacers(0).way(IPFReplacer.vsetIdx))
341
342  /** check ipf, get result at the same cycle */
343  (0 until PortNumber).foreach { i =>
344    toIPF(i).valid      := tlb_valid_tmp(i)
345    toIPF(i).bits.paddr := s1_req_paddr(i)
346  }
347  val s1_ipf_hit        = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromIPF(i).ipf_hit))
348  val s1_ipf_hit_latch  = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = s1_ipf_hit(i), release = s1_fire, flush = false.B)))
349  val s1_ipf_data       = VecInit((0 until PortNumber).map(i => ResultHoldBypass(data = fromIPF(i).cacheline, valid = s1_ipf_hit(i))))
350
351  /** check in PIQ, if hit, wait until prefetch port hit */
352  (0 until PortNumber).foreach { i =>
353    toPIQ(i).valid      := tlb_valid_tmp(i)
354    toPIQ(i).bits.paddr := s1_req_paddr(i)
355  }
356  val s1_piq_hit        = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromPIQ(i).piq_hit))
357  val s1_piq_hit_latch  = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = s1_piq_hit(i), release = s1_fire, flush = false.B)))
358  val wait_piq          = VecInit((0 until PortNumber).map(i => toIPF(i).valid && fromPIQ(i).piq_hit && !fromPIQ(i).data_valid))
359  val wait_piq_latch    = VecInit((0 until PortNumber).map(i => holdReleaseLatch(valid = wait_piq(i), release = s1_fire || fromPIQ(i).data_valid, flush = false.B)))
360  val s1_piq_data       = VecInit((0 until PortNumber).map(i => ResultHoldBypass(data = fromPIQ(i).cacheline, valid = (s1_piq_hit(i) || wait_piq_latch(i)) && fromPIQ(i).data_valid)))
361
362  val s1_wait           = (0 until PortNumber).map(i => wait_piq_latch(i) && !fromPIQ(i).data_valid).reduce(_||_)
363
364  val s1_prefetch_hit = VecInit((0 until PortNumber).map(i => s1_ipf_hit_latch(i) || s1_piq_hit_latch(i)))
365  val s1_prefetch_hit_data = VecInit((0 until PortNumber).map(i => Mux(s1_ipf_hit_latch(i), s1_ipf_data(i), s1_piq_data(i))))
366
367  s1_ready := s2_ready && tlbRespAllValid && !s1_wait || !s1_valid
368  s1_fire  := s1_valid && tlbRespAllValid && s2_ready && !s1_wait
369
370  if (env.EnableDifftest) {
371    (0 until PortNumber).foreach { i =>
372      val diffPIQ = DifftestModule(new DiffRefillEvent, dontCare = true)
373      diffPIQ.coreid := io.hartId
374      diffPIQ.index := (i + 7).U
375      if (i == 0) diffPIQ.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_piq_hit_latch(i) && !tlbExcp(0)
376      else diffPIQ.valid := s1_fire && !s1_port_hit(i) && !s1_ipf_hit_latch(i) && s1_piq_hit_latch(i) && s1_double_line && !tlbExcp(0) && !tlbExcp(1)
377      diffPIQ.addr := s1_req_paddr(i)
378      diffPIQ.data := s1_piq_data(i).asTypeOf(diffPIQ.data)
379      diffPIQ.idtfr := DontCare
380    }
381  }
382
383  // record cacheline log
384  val isWriteICacheTable = WireInit(Constantin.createRecord("isWriteICacheTable" + p(XSCoreParamsKey).HartId.toString))
385  val ICacheTable = ChiselDB.createTable("ICacheTable" + p(XSCoreParamsKey).HartId.toString, new ICacheDB)
386
387  val ICacheDumpData_req0 = Wire(new ICacheDB)
388  ICacheDumpData_req0.blk_paddr := getBlkAddr(s1_req_paddr(0))
389  ICacheDumpData_req0.blk_vaddr := getBlkAddr(s1_req_vaddr(0))
390  ICacheDumpData_req0.hit       := s1_port_hit(0) || s1_prefetch_hit(0)
391  ICacheTable.log(
392    data = ICacheDumpData_req0,
393    en = isWriteICacheTable.orR && s1_fire,
394    clock = clock,
395    reset = reset
396  )
397
398  val ICacheDumpData_req1 = Wire(new ICacheDB)
399  ICacheDumpData_req1.blk_paddr := getBlkAddr(s1_req_paddr(1))
400  ICacheDumpData_req1.blk_vaddr := getBlkAddr(s1_req_vaddr(1))
401  ICacheDumpData_req1.hit       := s1_port_hit(1) || s1_prefetch_hit(1)
402  ICacheTable.log(
403    data = ICacheDumpData_req1,
404    en = isWriteICacheTable.orR && s1_fire && s1_double_line,
405    clock = clock,
406    reset = reset
407  )
408
409  /** <PERF> replace victim way number */
410
411  (0 until nWays).map{ w =>
412    XSPerfAccumulate("line_0_hit_way_" + Integer.toString(w, 10),  s1_fire && s1_port_hit(0) && OHToUInt(s1_tag_match_vec(0))  === w.U)
413  }
414
415  (0 until nWays).map{ w =>
416    XSPerfAccumulate("line_0_victim_way_" + Integer.toString(w, 10),  s1_fire && !s1_port_hit(0) && OHToUInt(s1_victim_oh(0))  === w.U)
417  }
418
419  (0 until nWays).map{ w =>
420    XSPerfAccumulate("line_1_hit_way_" + Integer.toString(w, 10),  s1_fire && s1_double_line && s1_port_hit(1) && OHToUInt(s1_tag_match_vec(1))  === w.U)
421  }
422
423  (0 until nWays).map{ w =>
424    XSPerfAccumulate("line_1_victim_way_" + Integer.toString(w, 10),  s1_fire && s1_double_line && !s1_port_hit(1) && OHToUInt(s1_victim_oh(1))  === w.U)
425  }
426
427  XSPerfAccumulate("mainPipe_stage1_block_by_piq_cycles", s1_valid && s1_wait)
428
429  /**
430    ******************************************************************************
431    * ICache Stage 2
432    * - send request to MSHR if ICache miss
433    * - generate secondary miss status/data registers
434    * - response to IFU
435    ******************************************************************************
436    */
437
438  /** s2 control */
439  val s2_fetch_finish = Wire(Bool())
440
441  val s2_valid          = generatePipeControl(lastFire = s1_fire, thisFire = s2_fire, thisFlush = false.B, lastFlush = false.B)
442  val s2_miss_available = Wire(Bool())
443
444  s2_ready      := (s2_valid && s2_fetch_finish && !io.respStall) || (!s2_valid && s2_miss_available)
445  s2_fire       := s2_valid && s2_fetch_finish && !io.respStall
446
447  /** s2 data */
448  // val mmio = fromPMP.map(port => port.mmio) // TODO: handle it
449  val (s2_req_paddr , s2_req_vaddr) = (RegEnable(s1_req_paddr, s1_fire), RegEnable(s1_req_vaddr, s1_fire))
450  val s2_req_vsetIdx          = RegEnable(s1_req_vsetIdx,       s1_fire)
451  val s2_req_ptags            = RegEnable(s1_req_ptags,         s1_fire)
452  val s2_only_first           = RegEnable(s1_only_first,        s1_fire)
453  val s2_double_line          = RegEnable(s1_double_line,       s1_fire)
454  val s2_hit                  = RegEnable(s1_hit   ,            s1_fire)
455  val s2_port_hit             = RegEnable(s1_port_hit,          s1_fire)
456  val s2_bank_miss            = RegEnable(s1_bank_miss,         s1_fire)
457  val s2_waymask              = RegEnable(s1_victim_oh,         s1_fire)
458  val s2_tag_match_vec        = RegEnable(s1_tag_match_vec,     s1_fire)
459  val s2_prefetch_hit         = RegEnable(s1_prefetch_hit,      s1_fire)
460  val s2_prefetch_hit_data    = RegEnable(s1_prefetch_hit_data, s1_fire)
461  val s2_prefetch_hit_in_ipf  = RegEnable(s1_ipf_hit_latch,     s1_fire)
462  val s2_prefetch_hit_in_piq  = RegEnable(s1_piq_hit_latch,     s1_fire)
463
464  val icacheMissStage = RegInit(VecInit(Seq.fill(numOfStage - 2)(0.B)))
465  icacheMissStage(0) := !s2_hit
466
467  /** send req info of s1 and s2 to IPrefetchPipe for filter request */
468  toIPrefetch.s1Info(0).paddr  := s1_req_paddr(0)
469  toIPrefetch.s1Info(0).valid  := s1_valid
470  toIPrefetch.s1Info(1).paddr  := s1_req_paddr(1)
471  toIPrefetch.s1Info(1).valid  := s1_valid && s1_double_line
472  toIPrefetch.s2Info(0).paddr  := s2_req_paddr(0)
473  toIPrefetch.s2Info(0).valid  := s2_valid
474  toIPrefetch.s2Info(1).paddr  := s2_req_paddr(1)
475  toIPrefetch.s2Info(1).valid  := s2_valid && s2_double_line
476
477  assert(RegNext(!s2_valid || s2_req_paddr(0)(11,0) === s2_req_vaddr(0)(11,0), true.B))
478
479  /** status imply that s2 is a secondary miss (no need to resend miss request) */
480  val sec_meet_vec = Wire(Vec(2, Bool()))
481  val s2_fixed_hit_vec = VecInit((0 until 2).map(i => s2_port_hit(i) || s2_prefetch_hit(i) || sec_meet_vec(i)))
482  val s2_fixed_hit = (s2_valid && s2_fixed_hit_vec(0) && s2_fixed_hit_vec(1) && s2_double_line) || (s2_valid && s2_fixed_hit_vec(0) && !s2_double_line)
483
484  val s2_meta_errors    = RegEnable(s1_meta_errors,    s1_fire)
485  val s2_data_errorBits = RegEnable(s1_data_errorBits, s1_fire)
486  val s2_data_cacheline = RegEnable(s1_data_cacheline, s1_fire)
487
488  val s2_data_errors    = Wire(Vec(PortNumber,Vec(nWays, Bool())))
489
490  (0 until PortNumber).map{ i =>
491    val read_datas = s2_data_cacheline(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeUnit.W))))
492    val read_codes = s2_data_errorBits(i).asTypeOf(Vec(nWays,Vec(dataCodeUnitNum, UInt(dataCodeBits.W))))
493    val data_full_wayBits = VecInit((0 until nWays).map( w =>
494                                  VecInit((0 until dataCodeUnitNum).map(u =>
495                                        Cat(read_codes(w)(u), read_datas(w)(u))))))
496    val data_error_wayBits = VecInit((0 until nWays).map( w =>
497                                  VecInit((0 until dataCodeUnitNum).map(u =>
498                                       cacheParams.dataCode.decode(data_full_wayBits(w)(u)).error ))))
499    if(i == 0){
500      (0 until nWays).map{ w =>
501        s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(data_error_wayBits(w)).reduce(_||_)
502      }
503    } else {
504      (0 until nWays).map{ w =>
505        s2_data_errors(i)(w) := RegNext(RegNext(s1_fire)) && RegNext(RegNext(s1_double_line)) && RegNext(data_error_wayBits(w)).reduce(_||_)
506      }
507    }
508  }
509
510  val s2_parity_meta_error  = VecInit((0 until PortNumber).map(i => s2_meta_errors(i).reduce(_||_) && io.csr_parity_enable))
511  val s2_parity_data_error  = VecInit((0 until PortNumber).map(i => s2_data_errors(i).reduce(_||_) && io.csr_parity_enable))
512  val s2_parity_error       = VecInit((0 until PortNumber).map(i => RegNext(s2_parity_meta_error(i)) || s2_parity_data_error(i)))
513
514  for(i <- 0 until PortNumber){
515    io.errors(i).valid            := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire)))
516    io.errors(i).report_to_beu    := RegNext(s2_parity_error(i) && RegNext(RegNext(s1_fire)))
517    io.errors(i).paddr            := RegNext(RegNext(s2_req_paddr(i)))
518    io.errors(i).source           := DontCare
519    io.errors(i).source.tag       := RegNext(RegNext(s2_parity_meta_error(i)))
520    io.errors(i).source.data      := RegNext(s2_parity_data_error(i))
521    io.errors(i).source.l2        := false.B
522    io.errors(i).opType           := DontCare
523    io.errors(i).opType.fetch     := true.B
524  }
525  if (!ICacheECCForceError) {
526    XSError(s2_parity_error.reduce(_||_) && RegNext(RegNext(s1_fire)), "ICache has parity error in MainPaipe!")
527  }
528
529  /** exception and pmp logic **/
530  val s2_tlb_valid = VecInit((0 until PortNumber).map(i => ValidHold(s1_tlb_valid(i) && s1_fire, s2_fire, false.B)))
531  val pmpExcpAF = VecInit(Seq(fromPMP(0).instr && s2_tlb_valid(0), fromPMP(1).instr && s2_double_line && s2_tlb_valid(1)))
532  // exception information and mmio
533  // short delay exception signal
534  val s2_except_tlb_pf  = RegEnable(tlbExcpPF, s1_fire)
535  val s2_except_tlb_af  = RegEnable(tlbExcpAF, s1_fire)
536  // long delay exception signal
537  val s2_except_pmp_af    =  DataHoldBypass(pmpExcpAF, RegNext(s1_fire))
538
539  val s2_except     = VecInit(Seq(s2_except_tlb_pf(0) || s2_except_tlb_af(0), s2_double_line && (s2_except_tlb_pf(1) || s2_except_tlb_af(1))))
540  val s2_has_except = s2_valid && s2_except.reduce(_||_)
541  val s2_mmio       = s2_valid && DataHoldBypass(io.pmp(0).resp.mmio && !s2_except(0) && !s2_except_pmp_af(0), RegNext(s1_fire)).asBool
542  // pmp port
543  io.pmp.zipWithIndex.map { case (p, i) =>
544    p.req.valid := s2_valid && !missSwitchBit
545    p.req.bits.addr := s2_req_paddr(i)
546    p.req.bits.size := 3.U // TODO
547    p.req.bits.cmd := TlbCmd.exec
548  }
549
550  /*** cacheline miss logic ***/
551  val wait_idle :: wait_queue_ready :: wait_send_req  :: wait_two_resp :: wait_0_resp :: wait_1_resp :: wait_one_resp ::wait_finish :: wait_pmp_except :: Nil = Enum(9)
552  val wait_state = RegInit(wait_idle)
553
554//  val port_miss_fix  = VecInit(Seq(fromMSHR(0).fire && !s2_port_hit(0),   fromMSHR(1).fire && s2_double_line && !s2_port_hit(1) ))
555
556  // secondary miss record registers
557  class MissSlot(implicit p: Parameters) extends  ICacheBundle {
558    val m_vSetIdx   = UInt(idxBits.W)
559    val m_pTag      = UInt(tagBits.W)
560    val m_data      = UInt(blockBits.W)
561    val m_corrupt   = Bool()
562  }
563
564  val missSlot    = Seq.fill(2)(RegInit(0.U.asTypeOf(new MissSlot)))
565  val m_invalid :: m_valid :: m_refilled :: m_flushed :: m_wait_sec_miss :: m_check_final ::Nil = Enum(6)
566  val missStateQueue = RegInit(VecInit(Seq.fill(2)(m_invalid)) )
567  val reservedRefillData = Wire(Vec(2, UInt(blockBits.W)))
568
569  s2_miss_available :=  VecInit(missStateQueue.map(entry => entry === m_invalid  || entry === m_wait_sec_miss)).reduce(_&&_)
570
571  // check miss slot
572  val fix_sec_miss    = Wire(Vec(4, Bool()))
573  val sec_meet_0_miss = fix_sec_miss(0) || fix_sec_miss(2)
574  val sec_meet_1_miss = fix_sec_miss(1) || fix_sec_miss(3)
575  sec_meet_vec := VecInit(Seq(sec_meet_0_miss, sec_meet_1_miss))
576
577  /*** miss/hit pattern: <Control Signal> only raise at the first cycle of s2_valid ***/
578  val cacheline_0_hit  = (s2_port_hit(0) || s2_prefetch_hit(0) || sec_meet_0_miss)
579  val cacheline_0_miss = !s2_port_hit(0) && !s2_prefetch_hit(0) && !sec_meet_0_miss
580
581  val cacheline_1_hit  = (s2_port_hit(1) || s2_prefetch_hit(1) || sec_meet_1_miss)
582  val cacheline_1_miss = !s2_port_hit(1) && !s2_prefetch_hit(1) && !sec_meet_1_miss
583
584  val only_0_miss      = RegNext(s1_fire) && cacheline_0_miss && !s2_double_line && !s2_has_except && !s2_mmio
585  val only_0_hit       = RegNext(s1_fire) && cacheline_0_hit  && !s2_double_line && !s2_mmio
586  val hit_0_hit_1      = RegNext(s1_fire) && cacheline_0_hit  && cacheline_1_hit  && s2_double_line && !s2_mmio
587  val hit_0_miss_1     = RegNext(s1_fire) && cacheline_0_hit  && cacheline_1_miss && s2_double_line  && !s2_has_except && !s2_mmio
588  val miss_0_hit_1     = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_hit && s2_double_line  && !s2_has_except && !s2_mmio
589  val miss_0_miss_1    = RegNext(s1_fire) && cacheline_0_miss && cacheline_1_miss && s2_double_line  && !s2_has_except && !s2_mmio
590
591  val hit_0_except_1   = RegNext(s1_fire) && s2_double_line &&  !s2_except(0) && s2_except(1)  &&  cacheline_0_hit
592  val miss_0_except_1  = RegNext(s1_fire) && s2_double_line &&  !s2_except(0) && s2_except(1)  &&  cacheline_0_miss
593  val except_0         = RegNext(s1_fire) && s2_except(0)
594
595  /*** miss/hit pattern latch: <Control Signal> latch the miss/hit patter if pipeline stop ***/
596  val only_0_miss_latch      = holdReleaseLatch(valid = only_0_miss,     release = s2_fire,  flush = false.B)
597  val only_0_hit_latch       = holdReleaseLatch(valid = only_0_hit,      release = s2_fire,  flush = false.B)
598  val hit_0_hit_1_latch      = holdReleaseLatch(valid = hit_0_hit_1,     release = s2_fire,  flush = false.B)
599  val hit_0_miss_1_latch     = holdReleaseLatch(valid = hit_0_miss_1,    release = s2_fire,  flush = false.B)
600  val miss_0_hit_1_latch     = holdReleaseLatch(valid = miss_0_hit_1,    release = s2_fire,  flush = false.B)
601  val miss_0_miss_1_latch    = holdReleaseLatch(valid = miss_0_miss_1,   release = s2_fire,  flush = false.B)
602
603  val hit_0_except_1_latch   = holdReleaseLatch(valid = hit_0_except_1,  release = s2_fire,  flush = false.B)
604  val miss_0_except_1_latch  = holdReleaseLatch(valid = miss_0_except_1, release = s2_fire,  flush = false.B)
605  val except_0_latch         = holdReleaseLatch(valid = except_0,        release = s2_fire,  flush = false.B)
606
607  /*** secondary miss judgment ***/
608  def waitSecondComeIn(missState: UInt): Bool = (missState === m_wait_sec_miss)
609
610  def getMissSituat(slotNum : Int, missNum : Int ) :Bool =  {
611    RegNext(s1_fire) &&
612    RegNext(missSlot(slotNum).m_vSetIdx === s1_req_vsetIdx(missNum)) &&
613    RegNext(missSlot(slotNum).m_pTag  === s1_req_ptags(missNum)) &&
614    !s2_port_hit(missNum) && !s2_prefetch_hit(missNum) &&
615    waitSecondComeIn(missStateQueue(slotNum))
616  }
617
618  /*** compare new req and last req saved in miss slot ***/
619  val miss_0_s2_0 = getMissSituat(slotNum = 0, missNum = 0)
620  val miss_0_s2_1 = getMissSituat(slotNum = 0, missNum = 1)
621  val miss_1_s2_0 = getMissSituat(slotNum = 1, missNum = 0)
622  val miss_1_s2_1 = getMissSituat(slotNum = 1, missNum = 1)
623
624  val miss_0_s2_0_latch = holdReleaseLatch(valid = miss_0_s2_0,  release = s2_fire,  flush = false.B)
625  val miss_0_s2_1_latch = holdReleaseLatch(valid = miss_0_s2_1,  release = s2_fire,  flush = false.B)
626  val miss_1_s2_0_latch = holdReleaseLatch(valid = miss_1_s2_0,  release = s2_fire,  flush = false.B)
627  val miss_1_s2_1_latch = holdReleaseLatch(valid = miss_1_s2_1,  release = s2_fire,  flush = false.B)
628
629  val slot_0_solve = fix_sec_miss(0) || fix_sec_miss(1)
630  val slot_1_solve = fix_sec_miss(2) || fix_sec_miss(3)
631  val slot_slove   = VecInit(Seq(slot_0_solve, slot_1_solve))
632  fix_sec_miss   := VecInit(Seq(miss_0_s2_0_latch, miss_0_s2_1_latch, miss_1_s2_0_latch, miss_1_s2_1_latch))
633
634  /*** reserved data for secondary miss ***/
635  reservedRefillData(0) := DataHoldBypass(data = missSlot(0).m_data, valid = miss_0_s2_0 || miss_0_s2_1)
636  reservedRefillData(1) := DataHoldBypass(data = missSlot(1).m_data, valid = miss_1_s2_0 || miss_1_s2_1)
637
638  /*** miss state machine ***/
639
640  //deal with not-cache-hit pmp af
641  val only_pmp_af = Wire(Vec(2, Bool()))
642  only_pmp_af(0) := s2_except_pmp_af(0) && cacheline_0_miss && !s2_except(0) && s2_valid
643  only_pmp_af(1) := s2_except_pmp_af(1) && cacheline_1_miss && !s2_except(1) && s2_valid && s2_double_line
644
645  switch(wait_state){
646    is(wait_idle){
647      when(only_pmp_af(0) || only_pmp_af(1) || s2_mmio){
648        //should not send req to MissUnit when there is an access exception in PMP
649        //But to avoid using pmp exception in control signal (like s2_fire), should delay 1 cycle.
650        //NOTE: pmp exception cache line also could hit in ICache, but the result is meaningless. Just give the exception signals.
651        wait_state := wait_finish
652      }.elsewhen(miss_0_except_1_latch){
653        wait_state :=  Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle )
654      }.elsewhen(only_0_miss_latch  || miss_0_hit_1_latch){
655        wait_state :=  Mux(toMSHR(0).ready, wait_queue_ready ,wait_idle )
656      }.elsewhen(hit_0_miss_1_latch){
657        wait_state :=  Mux(toMSHR(1).ready, wait_queue_ready ,wait_idle )
658      }.elsewhen(miss_0_miss_1_latch ){
659        wait_state := Mux(toMSHR(0).ready && toMSHR(1).ready, wait_queue_ready ,wait_idle)
660      }
661    }
662
663    is(wait_queue_ready){
664      wait_state := wait_send_req
665    }
666
667    is(wait_send_req) {
668      when(miss_0_except_1_latch || only_0_miss_latch || hit_0_miss_1_latch || miss_0_hit_1_latch){
669        wait_state :=  wait_one_resp
670      }.elsewhen( miss_0_miss_1_latch ){
671        wait_state := wait_two_resp
672      }
673    }
674
675    is(wait_one_resp) {
676      when( (miss_0_except_1_latch ||only_0_miss_latch || miss_0_hit_1_latch) && fromMSHR(0).fire){
677        wait_state := wait_finish
678      }.elsewhen( hit_0_miss_1_latch && fromMSHR(1).fire){
679        wait_state := wait_finish
680      }
681    }
682
683    is(wait_two_resp) {
684      when(fromMSHR(0).fire && fromMSHR(1).fire){
685        wait_state := wait_finish
686      }.elsewhen( !fromMSHR(0).fire && fromMSHR(1).fire ){
687        wait_state := wait_0_resp
688      }.elsewhen(fromMSHR(0).fire && !fromMSHR(1).fire){
689        wait_state := wait_1_resp
690      }
691    }
692
693    is(wait_0_resp) {
694      when(fromMSHR(0).fire){
695        wait_state := wait_finish
696      }
697    }
698
699    is(wait_1_resp) {
700      when(fromMSHR(1).fire){
701        wait_state := wait_finish
702      }
703    }
704
705    is(wait_finish) {when(s2_fire) {wait_state := wait_idle }
706    }
707  }
708
709
710  /*** send request to MissUnit ***/
711
712  (0 until 2).map { i =>
713    if(i == 1) toMSHR(i).valid   := (hit_0_miss_1_latch || miss_0_miss_1_latch) && wait_state === wait_queue_ready && !s2_mmio
714        else     toMSHR(i).valid := (only_0_miss_latch || miss_0_hit_1_latch || miss_0_miss_1_latch || miss_0_except_1_latch) && wait_state === wait_queue_ready && !s2_mmio
715    toMSHR(i).bits.paddr    := s2_req_paddr(i)
716    toMSHR(i).bits.vaddr    := s2_req_vaddr(i)
717    toMSHR(i).bits.waymask  := s2_waymask(i)
718
719
720    when(toMSHR(i).fire && missStateQueue(i) === m_invalid){
721      missStateQueue(i)     := m_valid
722      missSlot(i).m_vSetIdx := s2_req_vsetIdx(i)
723      missSlot(i).m_pTag    := get_phy_tag(s2_req_paddr(i))
724    }
725
726    when(fromMSHR(i).fire && missStateQueue(i) === m_valid ){
727      missStateQueue(i)         := m_refilled
728      missSlot(i).m_data        := fromMSHR(i).bits.data
729      missSlot(i).m_corrupt     := fromMSHR(i).bits.corrupt
730    }
731
732
733    when(s2_fire && missStateQueue(i) === m_refilled){
734      missStateQueue(i)     := m_wait_sec_miss
735    }
736
737    /*** Only the first cycle to check whether meet the secondary miss ***/
738    when(missStateQueue(i) === m_wait_sec_miss){
739      /*** The seondary req has been fix by this slot and another also hit || the secondary req for other cacheline and hit ***/
740      when((slot_slove(i) && s2_fire) || (!slot_slove(i) && s2_fire) ) {
741        missStateQueue(i)     := m_invalid
742      }
743      /*** The seondary req has been fix by this slot but another miss/f3 not ready || the seondary req for other cacheline and miss ***/
744      .elsewhen((slot_slove(i) && !s2_fire && s2_valid) ||  (s2_valid && !slot_slove(i) && !s2_fire) ){
745        missStateQueue(i)     := m_check_final
746      }
747    }
748
749    when(missStateQueue(i) === m_check_final && toMSHR(i).fire){
750      missStateQueue(i)     :=  m_valid
751      missSlot(i).m_vSetIdx := s2_req_vsetIdx(i)
752      missSlot(i).m_pTag    := get_phy_tag(s2_req_paddr(i))
753    }.elsewhen(missStateQueue(i) === m_check_final) {
754      missStateQueue(i)     :=  m_invalid
755    }
756  }
757
758  when(toMSHR.map(_.valid).reduce(_||_)){
759    missSwitchBit := true.B
760  }.elsewhen(missSwitchBit && s2_fetch_finish){
761    missSwitchBit := false.B
762  }
763
764  (0 until PortNumber).foreach{
765    i =>
766      toIPrefetch.missSlot(i).valid   := missStateQueue(i) =/= m_invalid
767      toIPrefetch.missSlot(i).vSetIdx := missSlot(i).m_vSetIdx
768      toIPrefetch.missSlot(i).ptag    := missSlot(i).m_pTag
769  }
770
771  val miss_all_fix       =  wait_state === wait_finish
772
773  s2_fetch_finish        := ((s2_valid && s2_fixed_hit) || miss_all_fix || hit_0_except_1_latch || except_0_latch)
774
775  /** update replacement status register: 0 is hit access/ 1 is miss access */
776  (touch_ways zip touch_sets).zipWithIndex.map{ case((t_w,t_s), i) =>
777    t_s(0)         := s2_req_vsetIdx(i)(highestIdxBit, 1)
778    t_w(0).valid   := s2_valid && s2_port_hit(i)
779    t_w(0).bits    := OHToUInt(s2_tag_match_vec(i))
780
781    t_s(1)         := s2_req_vsetIdx(i)(highestIdxBit, 1)
782    t_w(1).valid   := s2_valid && !s2_port_hit(i)
783    t_w(1).bits    := OHToUInt(s2_waymask(i))
784  }
785
786  //** use hit one-hot select data
787  val s2_hit_datas    = VecInit(s2_data_cacheline.zipWithIndex.map { case(bank, i) =>
788    val port_hit_data = Mux1H(s2_tag_match_vec(i).asUInt, bank)
789    port_hit_data
790  })
791
792  val s2_register_datas       = Wire(Vec(2, UInt(blockBits.W)))
793
794  s2_register_datas.zipWithIndex.map{case(bank,i) =>
795    // if(i == 0) bank := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data)))
796    // else    bank    := Mux(s2_port_hit(i), s2_hit_datas(i), Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data)))
797    if(i == 0) bank := Mux(miss_0_s2_0_latch,reservedRefillData(0), Mux(miss_1_s2_0_latch,reservedRefillData(1), missSlot(0).m_data))
798    else    bank    := Mux(miss_0_s2_1_latch,reservedRefillData(0), Mux(miss_1_s2_1_latch,reservedRefillData(1), missSlot(1).m_data))
799  }
800
801  /** response to IFU */
802
803  (0 until PortNumber).map{ i =>
804    if(i ==0) toIFU(i).valid          := s2_fire
805       else   toIFU(i).valid          := s2_fire && s2_double_line
806    //when select is high, use sramData. Otherwise, use registerData.
807    toIFU(i).bits.registerData  := s2_register_datas(i)
808    toIFU(i).bits.sramData  := Mux(s2_port_hit(i), s2_hit_datas(i), s2_prefetch_hit_data(i))
809    toIFU(i).bits.select    := s2_port_hit(i) || s2_prefetch_hit(i)
810    toIFU(i).bits.paddr     := s2_req_paddr(i)
811    toIFU(i).bits.vaddr     := s2_req_vaddr(i)
812    toIFU(i).bits.tlbExcp.pageFault     := s2_except_tlb_pf(i)
813    toIFU(i).bits.tlbExcp.accessFault   := s2_except_tlb_af(i) || missSlot(i).m_corrupt || s2_except_pmp_af(i)
814    toIFU(i).bits.tlbExcp.mmio          := s2_mmio
815
816    when(RegNext(s2_fire && missSlot(i).m_corrupt)){
817      io.errors(i).valid            := true.B
818      io.errors(i).report_to_beu    := false.B // l2 should have report that to bus error unit, no need to do it again
819      io.errors(i).paddr            := RegNext(s2_req_paddr(i))
820      io.errors(i).source.tag       := false.B
821      io.errors(i).source.data      := false.B
822      io.errors(i).source.l2        := true.B
823    }
824  }
825  io.fetch.topdownIcacheMiss := !s2_hit
826  io.fetch.topdownItlbMiss := itlbMissStage(0)
827
828  (0 until 2).map {i =>
829    XSPerfAccumulate("port_" + i + "_only_hit_in_ipf", !s2_port_hit(i) && s2_prefetch_hit(i) && s2_fire)
830  }
831
832  io.perfInfo.only_0_hit      := only_0_hit_latch
833  io.perfInfo.only_0_miss     := only_0_miss_latch
834  io.perfInfo.hit_0_hit_1     := hit_0_hit_1_latch
835  io.perfInfo.hit_0_miss_1    := hit_0_miss_1_latch
836  io.perfInfo.miss_0_hit_1    := miss_0_hit_1_latch
837  io.perfInfo.miss_0_miss_1   := miss_0_miss_1_latch
838  io.perfInfo.hit_0_except_1  := hit_0_except_1_latch
839  io.perfInfo.miss_0_except_1 := miss_0_except_1_latch
840  io.perfInfo.except_0        := except_0_latch
841  io.perfInfo.bank_hit(0)     := only_0_miss_latch  || hit_0_hit_1_latch || hit_0_miss_1_latch || hit_0_except_1_latch
842  io.perfInfo.bank_hit(1)     := miss_0_hit_1_latch || hit_0_hit_1_latch
843  io.perfInfo.hit             := hit_0_hit_1_latch || only_0_hit_latch || hit_0_except_1_latch || except_0_latch
844
845  /** <PERF> fetch bubble generated by icache miss*/
846
847  XSPerfAccumulate("icache_bubble_s2_miss",    s2_valid && !s2_fetch_finish )
848
849  // TODO: this perf is wrong!
850  val tlb_miss_vec = VecInit((0 until PortNumber).map(i => toITLB(i).valid && s0_can_go && fromITLB(i).bits.miss))
851  val tlb_has_miss = tlb_miss_vec.reduce(_ || _)
852  XSPerfAccumulate("icache_bubble_s0_tlb_miss",    s0_valid && tlb_has_miss )
853
854  if (env.EnableDifftest) {
855    val discards = (0 until PortNumber).map { i =>
856      val discard = toIFU(i).bits.tlbExcp.pageFault || toIFU(i).bits.tlbExcp.accessFault || toIFU(i).bits.tlbExcp.mmio
857      discard
858    }
859    (0 until PortNumber).map { i =>
860      val diffMainPipeOut = DifftestModule(new DiffRefillEvent, dontCare = true)
861      diffMainPipeOut.coreid := io.hartId
862      diffMainPipeOut.index := (4 + i).U
863      if (i == 0) diffMainPipeOut.valid := s2_fire && !discards(0)
864      else        diffMainPipeOut.valid := s2_fire && s2_double_line && !discards(0) && !discards(1)
865      diffMainPipeOut.addr := s2_req_paddr(i)
866      when (toIFU(i).bits.select.asBool) {
867        diffMainPipeOut.data := toIFU(i).bits.sramData.asTypeOf(diffMainPipeOut.data)
868      } .otherwise {
869        diffMainPipeOut.data := toIFU(i).bits.registerData.asTypeOf(diffMainPipeOut.data)
870      }
871      // idtfr: 1 -> data from icache 2 -> data from ipf 3 -> data from piq 4 -> data from missUnit
872      when (s2_port_hit(i)) { diffMainPipeOut.idtfr := 1.U }
873        .elsewhen(s2_prefetch_hit(i)) {
874          when (s2_prefetch_hit_in_ipf(i)) { diffMainPipeOut.idtfr := 2.U  }
875            .elsewhen(s2_prefetch_hit_in_piq(i)) { diffMainPipeOut.idtfr := 3.U }
876            .otherwise { diffMainPipeOut.idtfr := DontCare; XSWarn(true.B, "should not in this situation\n") }
877        }
878        .otherwise { diffMainPipeOut.idtfr := 4.U }
879      diffMainPipeOut
880    }
881  }
882}
883