xref: /XiangShan/src/main/scala/xiangshan/frontend/IFU.scala (revision 4ab7f02c251981009225c54bc740213e3937eeab)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.frontend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.rocket.RVCDecoder
23import xiangshan._
24import xiangshan.cache.mmu._
25import xiangshan.frontend.icache._
26import utils._
27import utility._
28import xiangshan.backend.fu.{PMPReqBundle, PMPRespBundle}
29import utility.ChiselDB
30
31trait HasInstrMMIOConst extends HasXSParameter with HasIFUConst{
32  def mmioBusWidth = 64
33  def mmioBusBytes = mmioBusWidth / 8
34  def maxInstrLen = 32
35}
36
37trait HasIFUConst extends HasXSParameter{
38  def addrAlign(addr: UInt, bytes: Int, highest: Int): UInt = Cat(addr(highest-1, log2Ceil(bytes)), 0.U(log2Ceil(bytes).W))
39  def fetchQueueSize = 2
40
41  def getBasicBlockIdx( pc: UInt, start:  UInt ): UInt = {
42    val byteOffset = pc - start
43    (byteOffset - instBytes.U)(log2Ceil(PredictWidth),instOffsetBits)
44  }
45}
46
47class IfuToFtqIO(implicit p:Parameters) extends XSBundle {
48  val pdWb = Valid(new PredecodeWritebackBundle)
49}
50
51class IfuToBackendIO(implicit p:Parameters) extends XSBundle {
52  // write to backend gpaddr mem
53  val gpaddrMem_wen = Output(Bool())
54  val gpaddrMem_waddr = Output(UInt(log2Ceil(FtqSize).W)) // Ftq Ptr
55  // 2 gpaddrs, correspond to startAddr & nextLineAddr in bundle FtqICacheInfo
56  // TODO: avoid cross page entry in Ftq
57  val gpaddrMem_wdata = Output(UInt(GPAddrBits.W))
58}
59
60class FtqInterface(implicit p: Parameters) extends XSBundle {
61  val fromFtq = Flipped(new FtqToIfuIO)
62  val toFtq   = new IfuToFtqIO
63}
64
65class UncacheInterface(implicit p: Parameters) extends XSBundle {
66  val fromUncache = Flipped(DecoupledIO(new InsUncacheResp))
67  val toUncache   = DecoupledIO( new InsUncacheReq )
68}
69
70class NewIFUIO(implicit p: Parameters) extends XSBundle {
71  val ftqInter         = new FtqInterface
72  val icacheInter      = Flipped(new IFUICacheIO)
73  val icacheStop       = Output(Bool())
74  val icachePerfInfo   = Input(new ICachePerfInfo)
75  val toIbuffer        = Decoupled(new FetchToIBuffer)
76  val toBackend        = new IfuToBackendIO
77  val uncacheInter     = new UncacheInterface
78  val frontendTrigger  = Flipped(new FrontendTdataDistributeIO)
79  val rob_commits      = Flipped(Vec(CommitWidth, Valid(new RobCommitInfo)))
80  val iTLBInter        = new TlbRequestIO
81  val pmp              = new ICachePMPBundle
82  val mmioCommitRead   = new mmioCommitRead
83}
84
85// record the situation in which fallThruAddr falls into
86// the middle of an RVI inst
87class LastHalfInfo(implicit p: Parameters) extends XSBundle {
88  val valid = Bool()
89  val middlePC = UInt(VAddrBits.W)
90  def matchThisBlock(startAddr: UInt) = valid && middlePC === startAddr
91}
92
93class IfuToPreDecode(implicit p: Parameters) extends XSBundle {
94  val data                =  if(HasCExtension) Vec(PredictWidth + 1, UInt(16.W)) else Vec(PredictWidth, UInt(32.W))
95  val frontendTrigger     = new FrontendTdataDistributeIO
96  val pc                  = Vec(PredictWidth, UInt(VAddrBits.W))
97}
98
99
100class IfuToPredChecker(implicit p: Parameters) extends XSBundle {
101  val ftqOffset     = Valid(UInt(log2Ceil(PredictWidth).W))
102  val jumpOffset    = Vec(PredictWidth, UInt(XLEN.W))
103  val target        = UInt(VAddrBits.W)
104  val instrRange    = Vec(PredictWidth, Bool())
105  val instrValid    = Vec(PredictWidth, Bool())
106  val pds           = Vec(PredictWidth, new PreDecodeInfo)
107  val pc            = Vec(PredictWidth, UInt(VAddrBits.W))
108  val fire_in       = Bool()
109}
110
111class FetchToIBufferDB extends Bundle {
112  val start_addr = UInt(39.W)
113  val instr_count = UInt(32.W)
114  val exception = Bool()
115  val is_cache_hit = Bool()
116}
117
118class IfuWbToFtqDB extends Bundle {
119  val start_addr = UInt(39.W)
120  val is_miss_pred = Bool()
121  val miss_pred_offset = UInt(32.W)
122  val checkJalFault = Bool()
123  val checkRetFault = Bool()
124  val checkTargetFault = Bool()
125  val checkNotCFIFault = Bool()
126  val checkInvalidTaken = Bool()
127}
128
129class NewIFU(implicit p: Parameters) extends XSModule
130  with HasICacheParameters
131  with HasIFUConst
132  with HasPdConst
133  with HasCircularQueuePtrHelper
134  with HasPerfEvents
135  with HasTlbConst
136{
137  val io = IO(new NewIFUIO)
138  val (toFtq, fromFtq)    = (io.ftqInter.toFtq, io.ftqInter.fromFtq)
139  val fromICache = io.icacheInter.resp
140  val (toUncache, fromUncache) = (io.uncacheInter.toUncache , io.uncacheInter.fromUncache)
141
142  def isCrossLineReq(start: UInt, end: UInt): Bool = start(blockOffBits) ^ end(blockOffBits)
143
144  def numOfStage = 3
145  // equal lower_result overflow bit
146  def PcCutPoint = (VAddrBits/4) - 1
147  def CatPC(low: UInt, high: UInt, high1: UInt): UInt = {
148    Mux(
149      low(PcCutPoint),
150      Cat(high1, low(PcCutPoint-1, 0)),
151      Cat(high, low(PcCutPoint-1, 0))
152    )
153  }
154  def CatPC(lowVec: Vec[UInt], high: UInt, high1: UInt): Vec[UInt] = VecInit(lowVec.map(CatPC(_, high, high1)))
155  require(numOfStage > 1, "BPU numOfStage must be greater than 1")
156  val topdown_stages = RegInit(VecInit(Seq.fill(numOfStage)(0.U.asTypeOf(new FrontendTopDownBundle))))
157  // bubble events in IFU, only happen in stage 1
158  val icacheMissBubble = Wire(Bool())
159  val itlbMissBubble =Wire(Bool())
160
161  // only driven by clock, not valid-ready
162  topdown_stages(0) := fromFtq.req.bits.topdown_info
163  for (i <- 1 until numOfStage) {
164    topdown_stages(i) := topdown_stages(i - 1)
165  }
166  when (icacheMissBubble) {
167    topdown_stages(1).reasons(TopDownCounters.ICacheMissBubble.id) := true.B
168  }
169  when (itlbMissBubble) {
170    topdown_stages(1).reasons(TopDownCounters.ITLBMissBubble.id) := true.B
171  }
172  io.toIbuffer.bits.topdown_info := topdown_stages(numOfStage - 1)
173  when (fromFtq.topdown_redirect.valid) {
174    // only redirect from backend, IFU redirect itself is handled elsewhere
175    when (fromFtq.topdown_redirect.bits.debugIsCtrl) {
176      /*
177      for (i <- 0 until numOfStage) {
178        topdown_stages(i).reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
179      }
180      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ControlRedirectBubble.id) := true.B
181      */
182      when (fromFtq.topdown_redirect.bits.ControlBTBMissBubble) {
183        for (i <- 0 until numOfStage) {
184          topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
185        }
186        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.BTBMissBubble.id) := true.B
187      } .elsewhen (fromFtq.topdown_redirect.bits.TAGEMissBubble) {
188        for (i <- 0 until numOfStage) {
189          topdown_stages(i).reasons(TopDownCounters.TAGEMissBubble.id) := true.B
190        }
191        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.TAGEMissBubble.id) := true.B
192      } .elsewhen (fromFtq.topdown_redirect.bits.SCMissBubble) {
193        for (i <- 0 until numOfStage) {
194          topdown_stages(i).reasons(TopDownCounters.SCMissBubble.id) := true.B
195        }
196        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.SCMissBubble.id) := true.B
197      } .elsewhen (fromFtq.topdown_redirect.bits.ITTAGEMissBubble) {
198        for (i <- 0 until numOfStage) {
199          topdown_stages(i).reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
200        }
201        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B
202      } .elsewhen (fromFtq.topdown_redirect.bits.RASMissBubble) {
203        for (i <- 0 until numOfStage) {
204          topdown_stages(i).reasons(TopDownCounters.RASMissBubble.id) := true.B
205        }
206        io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.RASMissBubble.id) := true.B
207      }
208    } .elsewhen (fromFtq.topdown_redirect.bits.debugIsMemVio) {
209      for (i <- 0 until numOfStage) {
210        topdown_stages(i).reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
211      }
212      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B
213    } .otherwise {
214      for (i <- 0 until numOfStage) {
215        topdown_stages(i).reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
216      }
217      io.toIbuffer.bits.topdown_info.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B
218    }
219  }
220
221  class TlbExept(implicit p: Parameters) extends XSBundle{
222    val pageFault = Bool()
223    val accessFault = Bool()
224    val mmio = Bool()
225  }
226
227  val preDecoder       = Module(new PreDecode)
228
229  val predChecker     = Module(new PredChecker)
230  val frontendTrigger = Module(new FrontendTrigger)
231  val (checkerIn, checkerOutStage1, checkerOutStage2)         = (predChecker.io.in, predChecker.io.out.stage1Out,predChecker.io.out.stage2Out)
232
233  io.iTLBInter.req_kill := false.B
234  io.iTLBInter.resp.ready := true.B
235
236  /**
237    ******************************************************************************
238    * IFU Stage 0
239    * - send cacheline fetch request to ICacheMainPipe
240    ******************************************************************************
241    */
242
243  val f0_valid                             = fromFtq.req.valid
244  val f0_ftq_req                           = fromFtq.req.bits
245  val f0_doubleLine                        = fromFtq.req.bits.crossCacheline
246  val f0_vSetIdx                           = VecInit(get_idx((f0_ftq_req.startAddr)), get_idx(f0_ftq_req.nextlineStart))
247  val f0_fire                              = fromFtq.req.fire
248
249  val f0_flush, f1_flush, f2_flush, f3_flush = WireInit(false.B)
250  val from_bpu_f0_flush, from_bpu_f1_flush, from_bpu_f2_flush, from_bpu_f3_flush = WireInit(false.B)
251
252  from_bpu_f0_flush := fromFtq.flushFromBpu.shouldFlushByStage2(f0_ftq_req.ftqIdx) ||
253                       fromFtq.flushFromBpu.shouldFlushByStage3(f0_ftq_req.ftqIdx)
254
255  val wb_redirect , mmio_redirect,  backend_redirect= WireInit(false.B)
256  val f3_wb_not_flush = WireInit(false.B)
257
258  backend_redirect := fromFtq.redirect.valid
259  f3_flush := backend_redirect || (wb_redirect && !f3_wb_not_flush)
260  f2_flush := backend_redirect || mmio_redirect || wb_redirect
261  f1_flush := f2_flush || from_bpu_f1_flush
262  f0_flush := f1_flush || from_bpu_f0_flush
263
264  val f1_ready, f2_ready, f3_ready         = WireInit(false.B)
265
266  fromFtq.req.ready := f1_ready && io.icacheInter.icacheReady
267
268
269  when (wb_redirect) {
270    when (f3_wb_not_flush) {
271      topdown_stages(2).reasons(TopDownCounters.BTBMissBubble.id) := true.B
272    }
273    for (i <- 0 until numOfStage - 1) {
274      topdown_stages(i).reasons(TopDownCounters.BTBMissBubble.id) := true.B
275    }
276  }
277
278  /** <PERF> f0 fetch bubble */
279
280  XSPerfAccumulate("fetch_bubble_ftq_not_valid",   !fromFtq.req.valid && fromFtq.req.ready  )
281  // XSPerfAccumulate("fetch_bubble_pipe_stall",    f0_valid && toICache(0).ready && toICache(1).ready && !f1_ready )
282  // XSPerfAccumulate("fetch_bubble_icache_0_busy",   f0_valid && !toICache(0).ready  )
283  // XSPerfAccumulate("fetch_bubble_icache_1_busy",   f0_valid && !toICache(1).ready  )
284  XSPerfAccumulate("fetch_flush_backend_redirect",   backend_redirect  )
285  XSPerfAccumulate("fetch_flush_wb_redirect",    wb_redirect  )
286  XSPerfAccumulate("fetch_flush_bpu_f1_flush",   from_bpu_f1_flush  )
287  XSPerfAccumulate("fetch_flush_bpu_f0_flush",   from_bpu_f0_flush  )
288
289
290  /**
291    ******************************************************************************
292    * IFU Stage 1
293    * - calculate pc/half_pc/cut_ptr for every instruction
294    ******************************************************************************
295    */
296
297  val f1_valid      = RegInit(false.B)
298  val f1_ftq_req    = RegEnable(f0_ftq_req,    f0_fire)
299  // val f1_situation  = RegEnable(f0_situation,  f0_fire)
300  val f1_doubleLine = RegEnable(f0_doubleLine, f0_fire)
301  val f1_vSetIdx    = RegEnable(f0_vSetIdx,    f0_fire)
302  val f1_fire       = f1_valid && f2_ready
303
304  f1_ready := f1_fire || !f1_valid
305
306  from_bpu_f1_flush := fromFtq.flushFromBpu.shouldFlushByStage3(f1_ftq_req.ftqIdx) && f1_valid
307  // from_bpu_f1_flush := false.B
308
309  when(f1_flush)                  {f1_valid  := false.B}
310  .elsewhen(f0_fire && !f0_flush) {f1_valid  := true.B}
311  .elsewhen(f1_fire)              {f1_valid  := false.B}
312
313  val f1_pc_high            = f1_ftq_req.startAddr(VAddrBits-1, PcCutPoint)
314  val f1_pc_high_plus1      = f1_pc_high + 1.U
315
316  /**
317   * In order to reduce power consumption, avoid calculating the full PC value in the first level.
318   * code of original logic, this code has been deprecated
319   * val f1_pc                 = VecInit(f1_pc_lower_result.map{ i =>
320   *  Mux(i(f1_pc_adder_cut_point), Cat(f1_pc_high_plus1,i(f1_pc_adder_cut_point-1,0)), Cat(f1_pc_high,i(f1_pc_adder_cut_point-1,0)))})
321   *
322   */
323  val f1_pc_lower_result    = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + (i * 2).U)) // cat with overflow bit
324
325  val f1_pc                 = CatPC(f1_pc_lower_result, f1_pc_high, f1_pc_high_plus1)
326
327  val f1_half_snpc_lower_result = VecInit((0 until PredictWidth).map(i => Cat(0.U(1.W), f1_ftq_req.startAddr(PcCutPoint-1, 0)) + ((i+2) * 2).U)) // cat with overflow bit
328  val f1_half_snpc            = CatPC(f1_half_snpc_lower_result, f1_pc_high, f1_pc_high_plus1)
329
330  if (env.FPGAPlatform){
331    val f1_pc_diff          = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + (i * 2).U))
332    val f1_half_snpc_diff   = VecInit((0 until PredictWidth).map(i => f1_ftq_req.startAddr + ((i+2) * 2).U))
333
334    XSError(f1_pc.zip(f1_pc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_), "f1_half_snpc adder cut fail")
335    XSError(f1_half_snpc.zip(f1_half_snpc_diff).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_),  "f1_half_snpc adder cut fail")
336  }
337
338  val f1_cut_ptr            = if(HasCExtension)  VecInit((0 until PredictWidth + 1).map(i =>  Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 1)) + i.U ))
339                                  else           VecInit((0 until PredictWidth).map(i =>     Cat(0.U(2.W), f1_ftq_req.startAddr(blockOffBits-1, 2)) + i.U ))
340
341  /**
342    ******************************************************************************
343    * IFU Stage 2
344    * - icache response data (latched for pipeline stop)
345    * - generate exceprion bits for every instruciton (page fault/access fault/mmio)
346    * - generate predicted instruction range (1 means this instruciton is in this fetch packet)
347    * - cut data from cachlines to packet instruction code
348    * - instruction predecode and RVC expand
349    ******************************************************************************
350    */
351
352  val icacheRespAllValid = WireInit(false.B)
353
354  val f2_valid      = RegInit(false.B)
355  val f2_ftq_req    = RegEnable(f1_ftq_req,    f1_fire)
356  // val f2_situation  = RegEnable(f1_situation,  f1_fire)
357  val f2_doubleLine = RegEnable(f1_doubleLine, f1_fire)
358  val f2_vSetIdx    = RegEnable(f1_vSetIdx,    f1_fire)
359  val f2_fire       = f2_valid && f3_ready && icacheRespAllValid
360
361  f2_ready := f2_fire || !f2_valid
362  //TODO: addr compare may be timing critical
363  val f2_icache_all_resp_wire       =  fromICache(0).valid && (fromICache(0).bits.vaddr ===  f2_ftq_req.startAddr) && ((fromICache(1).valid && (fromICache(1).bits.vaddr ===  f2_ftq_req.nextlineStart)) || !f2_doubleLine)
364  val f2_icache_all_resp_reg        = RegInit(false.B)
365
366  icacheRespAllValid := f2_icache_all_resp_reg || f2_icache_all_resp_wire
367
368  icacheMissBubble := io.icacheInter.topdownIcacheMiss
369  itlbMissBubble   := io.icacheInter.topdownItlbMiss
370
371  io.icacheStop := !f3_ready
372
373  when(f2_flush)                                              {f2_icache_all_resp_reg := false.B}
374  .elsewhen(f2_valid && f2_icache_all_resp_wire && !f3_ready) {f2_icache_all_resp_reg := true.B}
375  .elsewhen(f2_fire && f2_icache_all_resp_reg)                {f2_icache_all_resp_reg := false.B}
376
377  when(f2_flush)                  {f2_valid := false.B}
378  .elsewhen(f1_fire && !f1_flush) {f2_valid := true.B }
379  .elsewhen(f2_fire)              {f2_valid := false.B}
380
381  val f2_except_pf    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.pageFault))
382  val f2_except_gpf   = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.guestPageFault))
383  val f2_except_af    = VecInit((0 until PortNumber).map(i => fromICache(i).bits.tlbExcp.accessFault))
384  // paddr and gpaddr of [startAddr, nextLineAddr]
385  val f2_paddrs       = VecInit((0 until PortNumber).map(i => fromICache(i).bits.paddr))
386  val f2_gpaddr       = fromICache(0).bits.gpaddr
387  val f2_mmio         = fromICache(0).bits.tlbExcp.mmio &&
388    !fromICache(0).bits.tlbExcp.accessFault &&
389    !fromICache(0).bits.tlbExcp.pageFault   &&
390    !fromICache(0).bits.tlbExcp.guestPageFault
391
392  /**
393    * reduce the number of registers, origin code
394    * f2_pc = RegEnable(f1_pc, f1_fire)
395    */
396  val f2_pc_lower_result        = RegEnable(f1_pc_lower_result, f1_fire)
397  val f2_pc_high                = RegEnable(f1_pc_high, f1_fire)
398  val f2_pc_high_plus1          = RegEnable(f1_pc_high_plus1, f1_fire)
399  val f2_pc                     = CatPC(f2_pc_lower_result, f2_pc_high, f2_pc_high_plus1)
400
401  val f2_cut_ptr                = RegEnable(f1_cut_ptr, f1_fire)
402  val f2_resend_vaddr           = RegEnable(f1_ftq_req.startAddr + 2.U, f1_fire)
403
404  def isNextLine(pc: UInt, startAddr: UInt) = {
405    startAddr(blockOffBits) ^ pc(blockOffBits)
406  }
407
408  def isLastInLine(pc: UInt) = {
409    pc(blockOffBits - 1, 0) === "b111110".U
410  }
411
412  val f2_foldpc = VecInit(f2_pc.map(i => XORFold(i(VAddrBits-1,1), MemPredPCWidth)))
413  val f2_jump_range = Fill(PredictWidth, !f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~f2_ftq_req.ftqOffset.bits
414  val f2_ftr_range  = Fill(PredictWidth,  f2_ftq_req.ftqOffset.valid) | Fill(PredictWidth, 1.U(1.W)) >> ~getBasicBlockIdx(f2_ftq_req.nextStartAddr, f2_ftq_req.startAddr)
415  val f2_instr_range = f2_jump_range & f2_ftr_range
416  val f2_pf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_pf(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine &&  f2_except_pf(1))))
417  val f2_af_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_af(0)   ||  isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_af(1))))
418  val f2_gpf_vec = VecInit((0 until PredictWidth).map(i => (!isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_except_gpf(0) || isNextLine(f2_pc(i), f2_ftq_req.startAddr) && f2_doubleLine && f2_except_gpf(1))))
419  val f2_perf_info    = io.icachePerfInfo
420
421  def cut(cacheline: UInt, cutPtr: Vec[UInt]) : Vec[UInt] ={
422    require(HasCExtension)
423    // if(HasCExtension){
424      val result   = Wire(Vec(PredictWidth + 1, UInt(16.W)))
425      val dataVec  = cacheline.asTypeOf(Vec(blockBytes, UInt(16.W))) //32 16-bit data vector
426      (0 until PredictWidth + 1).foreach( i =>
427        result(i) := dataVec(cutPtr(i)) //the max ptr is 3*blockBytes/4-1
428      )
429      result
430    // } else {
431    //   val result   = Wire(Vec(PredictWidth, UInt(32.W)) )
432    //   val dataVec  = cacheline.asTypeOf(Vec(blockBytes * 2/ 4, UInt(32.W)))
433    //   (0 until PredictWidth).foreach( i =>
434    //     result(i) := dataVec(cutPtr(i))
435    //   )
436    //   result
437    // }
438  }
439
440  val f2_cache_response_data = fromICache.map(_.bits.data)
441  val f2_data_2_cacheline = Cat(f2_cache_response_data(0), f2_cache_response_data(0))
442
443  val f2_cut_data   = cut(f2_data_2_cacheline, f2_cut_ptr)
444
445  /** predecode (include RVC expander) */
446  // preDecoderRegIn.data := f2_reg_cut_data
447  // preDecoderRegInIn.frontendTrigger := io.frontendTrigger
448  // preDecoderRegInIn.csrTriggerEnable := io.csrTriggerEnable
449  // preDecoderRegIn.pc  := f2_pc
450
451  val preDecoderIn  = preDecoder.io.in
452  preDecoderIn.valid := f2_valid
453  preDecoderIn.bits.data := f2_cut_data
454  preDecoderIn.bits.frontendTrigger := io.frontendTrigger
455  preDecoderIn.bits.pc  := f2_pc
456  val preDecoderOut = preDecoder.io.out
457
458  //val f2_expd_instr     = preDecoderOut.expInstr
459  val f2_instr          = preDecoderOut.instr
460  val f2_pd             = preDecoderOut.pd
461  val f2_jump_offset    = preDecoderOut.jumpOffset
462  val f2_hasHalfValid   =  preDecoderOut.hasHalfValid
463  val f2_crossPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_pf(0) && f2_doubleLine &&  f2_except_pf(1) && !f2_pd(i).isRVC ))
464  val f2_crossGuestPageFault = VecInit((0 until PredictWidth).map(i => isLastInLine(f2_pc(i)) && !f2_except_gpf(0) && f2_doubleLine && f2_except_gpf(1) && !f2_pd(i).isRVC ))
465  XSPerfAccumulate("fetch_bubble_icache_not_resp",   f2_valid && !icacheRespAllValid )
466
467
468  /**
469    ******************************************************************************
470    * IFU Stage 3
471    * - handle MMIO instruciton
472    *  -send request to Uncache fetch Unit
473    *  -every packet include 1 MMIO instruction
474    *  -MMIO instructions will stop fetch pipeline until commiting from RoB
475    *  -flush to snpc (send ifu_redirect to Ftq)
476    * - Ibuffer enqueue
477    * - check predict result in Frontend (jalFault/retFault/notCFIFault/invalidTakenFault/targetFault)
478    * - handle last half RVI instruction
479    ******************************************************************************
480    */
481
482  val f3_valid          = RegInit(false.B)
483  val f3_ftq_req        = RegEnable(f2_ftq_req,    f2_fire)
484  // val f3_situation      = RegEnable(f2_situation,  f2_fire)
485  val f3_doubleLine     = RegEnable(f2_doubleLine, f2_fire)
486  val f3_fire           = io.toIbuffer.fire
487
488  f3_ready := f3_fire || !f3_valid
489
490  val f3_cut_data       = RegEnable(f2_cut_data, f2_fire)
491
492  val f3_except_pf      = RegEnable(f2_except_pf,  f2_fire)
493  val f3_except_af      = RegEnable(f2_except_af,  f2_fire)
494  val f3_except_gpf     = RegEnable(f2_except_gpf,  f2_fire)
495  val f3_mmio           = RegEnable(f2_mmio   ,  f2_fire)
496
497  //val f3_expd_instr     = RegEnable(f2_expd_instr,  f2_fire)
498  val f3_instr          = RegEnable(f2_instr, f2_fire)
499  val f3_expd_instr     = VecInit((0 until PredictWidth).map{ i =>
500    val expander       = Module(new RVCExpander)
501    expander.io.in := f3_instr(i)
502    expander.io.out.bits
503  })
504
505  val f3_pd_wire        = RegEnable(f2_pd,          f2_fire)
506  val f3_pd             = WireInit(f3_pd_wire)
507  val f3_jump_offset    = RegEnable(f2_jump_offset, f2_fire)
508  val f3_af_vec         = RegEnable(f2_af_vec,      f2_fire)
509  val f3_pf_vec         = RegEnable(f2_pf_vec ,     f2_fire)
510  val f3_gpf_vec        = RegEnable(f2_gpf_vec,     f2_fire)
511
512  val f3_pc_lower_result        = RegEnable(f2_pc_lower_result, f2_fire)
513  val f3_pc_high                = RegEnable(f2_pc_high, f2_fire)
514  val f3_pc_high_plus1          = RegEnable(f2_pc_high_plus1, f2_fire)
515  val f3_pc             = CatPC(f3_pc_lower_result, f3_pc_high, f3_pc_high_plus1)
516
517  val f3_pc_last_lower_result_plus2 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 2.U, f2_fire)
518  val f3_pc_last_lower_result_plus4 = RegEnable(f2_pc_lower_result(PredictWidth - 1) + 4.U, f2_fire)
519  //val f3_half_snpc      = RegEnable(f2_half_snpc,   f2_fire)
520
521  /**
522    ***********************************************************************
523    * Half snpc(i) is larger than pc(i) by 4. Using pc to calculate half snpc may be a good choice.
524    ***********************************************************************
525    */
526  val f3_half_snpc      = Wire(Vec(PredictWidth,UInt(VAddrBits.W)))
527  for(i <- 0 until PredictWidth){
528    if(i == (PredictWidth - 2)){
529      f3_half_snpc(i)   := CatPC(f3_pc_last_lower_result_plus2, f3_pc_high, f3_pc_high_plus1)
530    } else if (i == (PredictWidth - 1)){
531      f3_half_snpc(i)   := CatPC(f3_pc_last_lower_result_plus4, f3_pc_high, f3_pc_high_plus1)
532    } else {
533      f3_half_snpc(i)   := f3_pc(i+2)
534    }
535  }
536
537  val f3_instr_range    = RegEnable(f2_instr_range, f2_fire)
538  val f3_foldpc         = RegEnable(f2_foldpc,      f2_fire)
539  val f3_crossPageFault = RegEnable(f2_crossPageFault,           f2_fire)
540  val f3_crossGuestPageFault = RegEnable(f2_crossGuestPageFault, f2_fire)
541  val f3_hasHalfValid   = RegEnable(f2_hasHalfValid,             f2_fire)
542  val f3_except         = VecInit((0 until 2).map{i => f3_except_pf(i) || f3_except_af(i) || f3_except_gpf(i)})
543  val f3_has_except     = f3_valid && (f3_except_af.reduce(_||_) || f3_except_pf.reduce(_||_) || f3_except_gpf.reduce(_||_))
544  val f3_paddrs         = RegEnable(f2_paddrs,  f2_fire)
545  val f3_gpaddr         = RegEnable(f2_gpaddr,  f2_fire)
546  val f3_resend_vaddr   = RegEnable(f2_resend_vaddr,             f2_fire)
547
548  // Expand 1 bit to prevent overflow when assert
549  val f3_ftq_req_startAddr      = Cat(0.U(1.W), f3_ftq_req.startAddr)
550  val f3_ftq_req_nextStartAddr  = Cat(0.U(1.W), f3_ftq_req.nextStartAddr)
551  // brType, isCall and isRet generation is delayed to f3 stage
552  val f3Predecoder = Module(new F3Predecoder)
553
554  f3Predecoder.io.in.instr := f3_instr
555
556  f3_pd.zipWithIndex.map{ case (pd,i) =>
557    pd.brType := f3Predecoder.io.out.pd(i).brType
558    pd.isCall := f3Predecoder.io.out.pd(i).isCall
559    pd.isRet  := f3Predecoder.io.out.pd(i).isRet
560  }
561
562  val f3PdDiff = f3_pd_wire.zip(f3_pd).map{ case (a,b) => a.asUInt =/= b.asUInt }.reduce(_||_)
563  XSError(f3_valid && f3PdDiff, "f3 pd diff")
564
565  when(f3_valid && !f3_ftq_req.ftqOffset.valid){
566    assert(f3_ftq_req_startAddr + (2*PredictWidth).U >= f3_ftq_req_nextStartAddr, s"More tha ${2*PredictWidth} Bytes fetch is not allowed!")
567  }
568
569  /*** MMIO State Machine***/
570  val f3_mmio_data    = Reg(Vec(2, UInt(16.W)))
571  val mmio_is_RVC     = RegInit(false.B)
572  val mmio_resend_addr =RegInit(0.U(PAddrBits.W))
573  val mmio_resend_af  = RegInit(false.B)
574  val mmio_resend_pf  = RegInit(false.B)
575  val mmio_resend_gpf = RegInit(false.B)
576
577  //last instuction finish
578  val is_first_instr = RegInit(true.B)
579  /*** Determine whether the MMIO instruction is executable based on the previous prediction block ***/
580  io.mmioCommitRead.mmioFtqPtr := RegNext(f3_ftq_req.ftqIdx - 1.U)
581
582  val m_idle :: m_waitLastCmt:: m_sendReq :: m_waitResp :: m_sendTLB :: m_tlbResp :: m_sendPMP :: m_resendReq :: m_waitResendResp :: m_waitCommit :: m_commited :: Nil = Enum(11)
583  val mmio_state = RegInit(m_idle)
584
585  val f3_req_is_mmio     = f3_mmio && f3_valid
586  val mmio_commit = VecInit(io.rob_commits.map{commit => commit.valid && commit.bits.ftqIdx === f3_ftq_req.ftqIdx &&  commit.bits.ftqOffset === 0.U}).asUInt.orR
587  val f3_mmio_req_commit = f3_req_is_mmio && mmio_state === m_commited
588
589  val f3_mmio_to_commit =  f3_req_is_mmio && mmio_state === m_waitCommit
590  val f3_mmio_to_commit_next = RegNext(f3_mmio_to_commit)
591  val f3_mmio_can_go      = f3_mmio_to_commit && !f3_mmio_to_commit_next
592
593  val fromFtqRedirectReg = Wire(fromFtq.redirect.cloneType)
594  fromFtqRedirectReg.bits := RegEnable(fromFtq.redirect.bits, 0.U.asTypeOf(fromFtq.redirect.bits), fromFtq.redirect.valid)
595  fromFtqRedirectReg.valid := RegNext(fromFtq.redirect.valid, init = false.B)
596  val mmioF3Flush           = RegNext(f3_flush,init = false.B)
597  val f3_ftq_flush_self     = fromFtqRedirectReg.valid && RedirectLevel.flushItself(fromFtqRedirectReg.bits.level)
598  val f3_ftq_flush_by_older = fromFtqRedirectReg.valid && isBefore(fromFtqRedirectReg.bits.ftqIdx, f3_ftq_req.ftqIdx)
599
600  val f3_need_not_flush = f3_req_is_mmio && fromFtqRedirectReg.valid && !f3_ftq_flush_self && !f3_ftq_flush_by_older
601
602  /**
603    **********************************************************************************
604    * We want to defer instruction fetching when encountering MMIO instructions to ensure that the MMIO region is not negatively impacted.
605    * This is the exception when the first instruction is an MMIO instruction.
606    **********************************************************************************
607    */
608  when(is_first_instr && f3_fire){
609    is_first_instr  := false.B
610  }
611
612  when(f3_flush && !f3_req_is_mmio)                                                 {f3_valid := false.B}
613  .elsewhen(mmioF3Flush && f3_req_is_mmio && !f3_need_not_flush)                    {f3_valid := false.B}
614  .elsewhen(f2_fire && !f2_flush )                                                  {f3_valid := true.B }
615  .elsewhen(io.toIbuffer.fire && !f3_req_is_mmio)                                   {f3_valid := false.B}
616  .elsewhen{f3_req_is_mmio && f3_mmio_req_commit}                                   {f3_valid := false.B}
617
618  val f3_mmio_use_seq_pc = RegInit(false.B)
619
620  val (redirect_ftqIdx, redirect_ftqOffset)  = (fromFtqRedirectReg.bits.ftqIdx,fromFtqRedirectReg.bits.ftqOffset)
621  val redirect_mmio_req = fromFtqRedirectReg.valid && redirect_ftqIdx === f3_ftq_req.ftqIdx && redirect_ftqOffset === 0.U
622
623  when(RegNext(f2_fire && !f2_flush) && f3_req_is_mmio)        { f3_mmio_use_seq_pc := true.B  }
624  .elsewhen(redirect_mmio_req)                                 { f3_mmio_use_seq_pc := false.B }
625
626  f3_ready := Mux(f3_req_is_mmio, io.toIbuffer.ready && f3_mmio_req_commit || !f3_valid , io.toIbuffer.ready || !f3_valid)
627
628  // mmio state machine
629  switch(mmio_state){
630    is(m_idle){
631      when(f3_req_is_mmio){
632        mmio_state :=  m_waitLastCmt
633      }
634    }
635
636    is(m_waitLastCmt){
637      when(is_first_instr){
638        mmio_state := m_sendReq
639      }.otherwise{
640        mmio_state := Mux(io.mmioCommitRead.mmioLastCommit, m_sendReq, m_waitLastCmt)
641      }
642    }
643
644    is(m_sendReq){
645      mmio_state :=  Mux(toUncache.fire, m_waitResp, m_sendReq )
646    }
647
648    is(m_waitResp){
649      when(fromUncache.fire){
650          val isRVC =  fromUncache.bits.data(1,0) =/= 3.U
651          val needResend = !isRVC && f3_paddrs(0)(2,1) === 3.U
652          mmio_state :=  Mux(needResend, m_sendTLB , m_waitCommit)
653
654          mmio_is_RVC := isRVC
655          f3_mmio_data(0)   :=  fromUncache.bits.data(15,0)
656          f3_mmio_data(1)   :=  fromUncache.bits.data(31,16)
657      }
658    }
659
660    is(m_sendTLB){
661      when( io.iTLBInter.req.valid && !io.iTLBInter.resp.bits.miss ){
662        mmio_state :=  m_tlbResp
663      }
664    }
665
666    is(m_tlbResp){
667      val tlbExept = io.iTLBInter.resp.bits.excp(0).pf.instr ||
668                     io.iTLBInter.resp.bits.excp(0).af.instr ||
669                     io.iTLBInter.resp.bits.excp(0).gpf.instr
670      mmio_state :=  Mux(tlbExept,m_waitCommit,m_sendPMP)
671      mmio_resend_addr := io.iTLBInter.resp.bits.paddr(0)
672      mmio_resend_af := mmio_resend_af || io.iTLBInter.resp.bits.excp(0).af.instr
673      mmio_resend_pf := mmio_resend_pf || io.iTLBInter.resp.bits.excp(0).pf.instr
674      mmio_resend_gpf := mmio_resend_gpf || io.iTLBInter.resp.bits.excp(0).gpf.instr
675    }
676
677    is(m_sendPMP){
678      val pmpExcpAF = io.pmp.resp.instr || !io.pmp.resp.mmio
679      mmio_state :=  Mux(pmpExcpAF, m_waitCommit , m_resendReq)
680      mmio_resend_af := pmpExcpAF
681    }
682
683    is(m_resendReq){
684      mmio_state :=  Mux(toUncache.fire, m_waitResendResp, m_resendReq )
685    }
686
687    is(m_waitResendResp){
688      when(fromUncache.fire){
689          mmio_state :=  m_waitCommit
690          f3_mmio_data(1)   :=  fromUncache.bits.data(15,0)
691      }
692    }
693
694    is(m_waitCommit){
695      when(mmio_commit){
696          mmio_state  :=  m_commited
697      }
698    }
699
700    //normal mmio instruction
701    is(m_commited){
702      mmio_state := m_idle
703      mmio_is_RVC := false.B
704      mmio_resend_addr := 0.U
705    }
706  }
707
708  // Exception or flush by older branch prediction
709  // Condition is from RegNext(fromFtq.redirect), 1 cycle after backend rediect
710  when(f3_ftq_flush_self || f3_ftq_flush_by_older)  {
711    mmio_state := m_idle
712    mmio_is_RVC := false.B
713    mmio_resend_addr := 0.U
714    mmio_resend_af := false.B
715    f3_mmio_data.map(_ := 0.U)
716  }
717
718  toUncache.valid     :=  ((mmio_state === m_sendReq) || (mmio_state === m_resendReq)) && f3_req_is_mmio
719  toUncache.bits.addr := Mux((mmio_state === m_resendReq), mmio_resend_addr, f3_paddrs(0))
720  fromUncache.ready   := true.B
721
722  io.iTLBInter.req.valid         := (mmio_state === m_sendTLB) && f3_req_is_mmio
723  io.iTLBInter.req.bits.size     := 3.U
724  io.iTLBInter.req.bits.vaddr    := f3_resend_vaddr
725  io.iTLBInter.req.bits.debug.pc := f3_resend_vaddr
726  io.iTLBInter.req.bits.hyperinst:= DontCare
727  io.iTLBInter.req.bits.hlvx     := DontCare
728
729  io.iTLBInter.req.bits.kill                := false.B // IFU use itlb for mmio, doesn't need sync, set it to false
730  io.iTLBInter.req.bits.cmd                 := TlbCmd.exec
731  io.iTLBInter.req.bits.memidx              := DontCare
732  io.iTLBInter.req.bits.debug.robIdx        := DontCare
733  io.iTLBInter.req.bits.no_translate        := false.B
734  io.iTLBInter.req.bits.debug.isFirstIssue  := DontCare
735
736  io.pmp.req.valid := (mmio_state === m_sendPMP) && f3_req_is_mmio
737  io.pmp.req.bits.addr  := mmio_resend_addr
738  io.pmp.req.bits.size  := 3.U
739  io.pmp.req.bits.cmd   := TlbCmd.exec
740
741  val f3_lastHalf       = RegInit(0.U.asTypeOf(new LastHalfInfo))
742
743  val f3_predecode_range = VecInit(preDecoderOut.pd.map(inst => inst.valid)).asUInt
744  val f3_mmio_range      = VecInit((0 until PredictWidth).map(i => if(i ==0) true.B else false.B))
745  val f3_instr_valid     = Wire(Vec(PredictWidth, Bool()))
746
747  /*** prediction result check   ***/
748  checkerIn.ftqOffset   := f3_ftq_req.ftqOffset
749  checkerIn.jumpOffset  := f3_jump_offset
750  checkerIn.target      := f3_ftq_req.nextStartAddr
751  checkerIn.instrRange  := f3_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
752  checkerIn.instrValid  := f3_instr_valid.asTypeOf(Vec(PredictWidth, Bool()))
753  checkerIn.pds         := f3_pd
754  checkerIn.pc          := f3_pc
755  checkerIn.fire_in     := RegNext(f2_fire, init = false.B)
756
757  /*** handle half RVI in the last 2 Bytes  ***/
758
759  def hasLastHalf(idx: UInt) = {
760    //!f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && !checkerOutStage2.fixedMissPred(idx) && ! f3_req_is_mmio
761    !f3_pd(idx).isRVC && checkerOutStage1.fixedRange(idx) && f3_instr_valid(idx) && !checkerOutStage1.fixedTaken(idx) && ! f3_req_is_mmio
762  }
763
764  val f3_last_validIdx       = ParallelPosteriorityEncoder(checkerOutStage1.fixedRange)
765
766  val f3_hasLastHalf         = hasLastHalf((PredictWidth - 1).U)
767  val f3_false_lastHalf      = hasLastHalf(f3_last_validIdx)
768  val f3_false_snpc          = f3_half_snpc(f3_last_validIdx)
769
770  val f3_lastHalf_mask    = VecInit((0 until PredictWidth).map( i => if(i ==0) false.B else true.B )).asUInt
771  val f3_lastHalf_disable = RegInit(false.B)
772
773  when(f3_flush || (f3_fire && f3_lastHalf_disable)){
774    f3_lastHalf_disable := false.B
775  }
776
777  when (f3_flush) {
778    f3_lastHalf.valid := false.B
779  }.elsewhen (f3_fire) {
780    f3_lastHalf.valid := f3_hasLastHalf && !f3_lastHalf_disable
781    f3_lastHalf.middlePC := f3_ftq_req.nextStartAddr
782  }
783
784  f3_instr_valid := Mux(f3_lastHalf.valid,f3_hasHalfValid ,VecInit(f3_pd.map(inst => inst.valid)))
785
786  /*** frontend Trigger  ***/
787  frontendTrigger.io.pds  := f3_pd
788  frontendTrigger.io.pc   := f3_pc
789  frontendTrigger.io.data   := f3_cut_data
790
791  frontendTrigger.io.frontendTrigger  := io.frontendTrigger
792
793  val f3_triggered = frontendTrigger.io.triggered
794  val f3_toIbuffer_valid = f3_valid && (!f3_req_is_mmio || f3_mmio_can_go) && !f3_flush
795
796  /*** send to Ibuffer  ***/
797  io.toIbuffer.valid            := f3_toIbuffer_valid
798  io.toIbuffer.bits.instrs      := f3_expd_instr
799  io.toIbuffer.bits.valid       := f3_instr_valid.asUInt
800  io.toIbuffer.bits.enqEnable   := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt
801  io.toIbuffer.bits.pd          := f3_pd
802  io.toIbuffer.bits.ftqPtr      := f3_ftq_req.ftqIdx
803  io.toIbuffer.bits.pc          := f3_pc
804  io.toIbuffer.bits.ftqOffset.zipWithIndex.map{case(a, i) => a.bits := i.U; a.valid := checkerOutStage1.fixedTaken(i) && !f3_req_is_mmio}
805  io.toIbuffer.bits.foldpc      := f3_foldpc
806  io.toIbuffer.bits.exceptionType := (0 until PredictWidth).map(i => MuxCase(ExceptionType.none, Array(
807    (f3_pf_vec(i) || f3_crossPageFault(i)) -> ExceptionType.ipf,
808    (f3_gpf_vec(i) || f3_crossGuestPageFault(i)) -> ExceptionType.igpf,
809    f3_af_vec(i) -> ExceptionType.acf
810  )))
811  io.toIbuffer.bits.crossPageIPFFix := (0 until PredictWidth).map(i => f3_crossPageFault(i) || f3_crossGuestPageFault(i))
812  io.toIbuffer.bits.triggered   := f3_triggered
813
814  when(f3_lastHalf.valid){
815    io.toIbuffer.bits.enqEnable := checkerOutStage1.fixedRange.asUInt & f3_instr_valid.asUInt & f3_lastHalf_mask
816    io.toIbuffer.bits.valid     := f3_lastHalf_mask & f3_instr_valid.asUInt
817  }
818
819  /** to backend */
820  // f3_gpaddr is valid iff gpf is detected
821  io.toBackend.gpaddrMem_wen   := f3_toIbuffer_valid && (f3_gpf_vec.asUInt.orR || f3_crossGuestPageFault.asUInt.orR)
822  io.toBackend.gpaddrMem_waddr := f3_ftq_req.ftqIdx.value
823  io.toBackend.gpaddrMem_wdata := f3_gpaddr
824
825
826  //Write back to Ftq
827  val f3_cache_fetch = f3_valid && !(f2_fire && !f2_flush)
828  val finishFetchMaskReg = RegNext(f3_cache_fetch)
829
830  val mmioFlushWb = Wire(Valid(new PredecodeWritebackBundle))
831  val f3_mmio_missOffset = Wire(ValidUndirectioned(UInt(log2Ceil(PredictWidth).W)))
832  f3_mmio_missOffset.valid := f3_req_is_mmio
833  f3_mmio_missOffset.bits  := 0.U
834
835  // Send mmioFlushWb back to FTQ 1 cycle after uncache fetch return
836  // When backend redirect, mmio_state reset after 1 cycle.
837  // In this case, mask .valid to avoid overriding backend redirect
838  mmioFlushWb.valid           := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire) &&
839    f3_mmio_use_seq_pc && !f3_ftq_flush_self && !f3_ftq_flush_by_older)
840  mmioFlushWb.bits.pc         := f3_pc
841  mmioFlushWb.bits.pd         := f3_pd
842  mmioFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid :=  f3_mmio_range(i)}
843  mmioFlushWb.bits.ftqIdx     := f3_ftq_req.ftqIdx
844  mmioFlushWb.bits.ftqOffset  := f3_ftq_req.ftqOffset.bits
845  mmioFlushWb.bits.misOffset  := f3_mmio_missOffset
846  mmioFlushWb.bits.cfiOffset  := DontCare
847  mmioFlushWb.bits.target     := Mux(mmio_is_RVC, f3_ftq_req.startAddr + 2.U , f3_ftq_req.startAddr + 4.U)
848  mmioFlushWb.bits.jalTarget  := DontCare
849  mmioFlushWb.bits.instrRange := f3_mmio_range
850
851  /** external predecode for MMIO instruction */
852  when(f3_req_is_mmio){
853    val inst  = Cat(f3_mmio_data(1), f3_mmio_data(0))
854    val currentIsRVC   = isRVC(inst)
855
856    val brType::isCall::isRet::Nil = brInfo(inst)
857    val jalOffset = jal_offset(inst, currentIsRVC)
858    val brOffset  = br_offset(inst, currentIsRVC)
859
860    io.toIbuffer.bits.instrs(0) := new RVCDecoder(inst, XLEN, fLen, useAddiForMv = true).decode.bits
861
862
863    io.toIbuffer.bits.pd(0).valid   := true.B
864    io.toIbuffer.bits.pd(0).isRVC   := currentIsRVC
865    io.toIbuffer.bits.pd(0).brType  := brType
866    io.toIbuffer.bits.pd(0).isCall  := isCall
867    io.toIbuffer.bits.pd(0).isRet   := isRet
868
869    when (mmio_resend_af) {
870      io.toIbuffer.bits.exceptionType(0) := ExceptionType.acf
871    } .elsewhen (mmio_resend_pf) {
872      io.toIbuffer.bits.exceptionType(0) := ExceptionType.ipf
873    }
874    io.toIbuffer.bits.crossPageIPFFix(0) := mmio_resend_pf
875
876    io.toIbuffer.bits.enqEnable   := f3_mmio_range.asUInt
877
878    mmioFlushWb.bits.pd(0).valid   := true.B
879    mmioFlushWb.bits.pd(0).isRVC   := currentIsRVC
880    mmioFlushWb.bits.pd(0).brType  := brType
881    mmioFlushWb.bits.pd(0).isCall  := isCall
882    mmioFlushWb.bits.pd(0).isRet   := isRet
883  }
884
885  mmio_redirect := (f3_req_is_mmio && mmio_state === m_waitCommit && RegNext(fromUncache.fire)  && f3_mmio_use_seq_pc)
886
887  XSPerfAccumulate("fetch_bubble_ibuffer_not_ready",   io.toIbuffer.valid && !io.toIbuffer.ready )
888
889
890  /**
891    ******************************************************************************
892    * IFU Write Back Stage
893    * - write back predecode information to Ftq to update
894    * - redirect if found fault prediction
895    * - redirect if has false hit last half (last PC is not start + 32 Bytes, but in the midle of an notCFI RVI instruction)
896    ******************************************************************************
897    */
898  val wb_enable         = RegNext(f2_fire && !f2_flush) && !f3_req_is_mmio && !f3_flush
899  val wb_valid          = RegNext(wb_enable, init = false.B)
900  val wb_ftq_req        = RegEnable(f3_ftq_req, wb_enable)
901
902  val wb_check_result_stage1   = RegEnable(checkerOutStage1, wb_enable)
903  val wb_check_result_stage2   = checkerOutStage2
904  val wb_instr_range    = RegEnable(io.toIbuffer.bits.enqEnable, wb_enable)
905
906  val wb_pc_lower_result        = RegEnable(f3_pc_lower_result, wb_enable)
907  val wb_pc_high                = RegEnable(f3_pc_high, wb_enable)
908  val wb_pc_high_plus1          = RegEnable(f3_pc_high_plus1, wb_enable)
909  val wb_pc                     = CatPC(wb_pc_lower_result, wb_pc_high, wb_pc_high_plus1)
910
911  //val wb_pc             = RegEnable(f3_pc, wb_enable)
912  val wb_pd             = RegEnable(f3_pd, wb_enable)
913  val wb_instr_valid    = RegEnable(f3_instr_valid, wb_enable)
914
915  /* false hit lastHalf */
916  val wb_lastIdx        = RegEnable(f3_last_validIdx, wb_enable)
917  val wb_false_lastHalf = RegEnable(f3_false_lastHalf, wb_enable) && wb_lastIdx =/= (PredictWidth - 1).U
918  val wb_false_target   = RegEnable(f3_false_snpc, wb_enable)
919
920  val wb_half_flush = wb_false_lastHalf
921  val wb_half_target = wb_false_target
922
923  /* false oversize */
924  val lastIsRVC = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool())).last  && wb_pd.last.isRVC
925  val lastIsRVI = wb_instr_range.asTypeOf(Vec(PredictWidth,Bool()))(PredictWidth - 2) && !wb_pd(PredictWidth - 2).isRVC
926  val lastTaken = wb_check_result_stage1.fixedTaken.last
927
928  f3_wb_not_flush := wb_ftq_req.ftqIdx === f3_ftq_req.ftqIdx && f3_valid && wb_valid
929
930  /** if a req with a last half but miss predicted enters in wb stage, and this cycle f3 stalls,
931    * we set a flag to notify f3 that the last half flag need not to be set.
932    */
933  //f3_fire is after wb_valid
934  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
935        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && !f3_fire  && !RegNext(f3_fire,init = false.B) && !f3_flush
936      ){
937    f3_lastHalf_disable := true.B
938  }
939
940  //wb_valid and f3_fire are in same cycle
941  when(wb_valid && RegNext(f3_hasLastHalf,init = false.B)
942        && wb_check_result_stage2.fixedMissPred(PredictWidth - 1) && f3_fire
943      ){
944    f3_lastHalf.valid := false.B
945  }
946
947  val checkFlushWb = Wire(Valid(new PredecodeWritebackBundle))
948  val checkFlushWbjalTargetIdx = ParallelPriorityEncoder(VecInit(wb_pd.zip(wb_instr_valid).map{case (pd, v) => v && pd.isJal }))
949  val checkFlushWbTargetIdx = ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred)
950  checkFlushWb.valid                  := wb_valid
951  checkFlushWb.bits.pc                := wb_pc
952  checkFlushWb.bits.pd                := wb_pd
953  checkFlushWb.bits.pd.zipWithIndex.map{case(instr,i) => instr.valid := wb_instr_valid(i)}
954  checkFlushWb.bits.ftqIdx            := wb_ftq_req.ftqIdx
955  checkFlushWb.bits.ftqOffset         := wb_ftq_req.ftqOffset.bits
956  checkFlushWb.bits.misOffset.valid   := ParallelOR(wb_check_result_stage2.fixedMissPred) || wb_half_flush
957  checkFlushWb.bits.misOffset.bits    := Mux(wb_half_flush, wb_lastIdx, ParallelPriorityEncoder(wb_check_result_stage2.fixedMissPred))
958  checkFlushWb.bits.cfiOffset.valid   := ParallelOR(wb_check_result_stage1.fixedTaken)
959  checkFlushWb.bits.cfiOffset.bits    := ParallelPriorityEncoder(wb_check_result_stage1.fixedTaken)
960  checkFlushWb.bits.target            := Mux(wb_half_flush, wb_half_target, wb_check_result_stage2.fixedTarget(checkFlushWbTargetIdx))
961  checkFlushWb.bits.jalTarget         := wb_check_result_stage2.jalTarget(checkFlushWbjalTargetIdx)
962  checkFlushWb.bits.instrRange        := wb_instr_range.asTypeOf(Vec(PredictWidth, Bool()))
963
964  toFtq.pdWb := Mux(wb_valid, checkFlushWb,  mmioFlushWb)
965
966  wb_redirect := checkFlushWb.bits.misOffset.valid && wb_valid
967
968  /*write back flush type*/
969  val checkFaultType = wb_check_result_stage2.faultType
970  val checkJalFault =  wb_valid && checkFaultType.map(_.isjalFault).reduce(_||_)
971  val checkRetFault =  wb_valid && checkFaultType.map(_.isRetFault).reduce(_||_)
972  val checkTargetFault =  wb_valid && checkFaultType.map(_.istargetFault).reduce(_||_)
973  val checkNotCFIFault =  wb_valid && checkFaultType.map(_.notCFIFault).reduce(_||_)
974  val checkInvalidTaken =  wb_valid && checkFaultType.map(_.invalidTakenFault).reduce(_||_)
975
976
977  XSPerfAccumulate("predecode_flush_jalFault",   checkJalFault )
978  XSPerfAccumulate("predecode_flush_retFault",   checkRetFault )
979  XSPerfAccumulate("predecode_flush_targetFault",   checkTargetFault )
980  XSPerfAccumulate("predecode_flush_notCFIFault",   checkNotCFIFault )
981  XSPerfAccumulate("predecode_flush_incalidTakenFault",   checkInvalidTaken )
982
983  when(checkRetFault){
984    XSDebug("startAddr:%x  nextstartAddr:%x  taken:%d    takenIdx:%d\n",
985        wb_ftq_req.startAddr, wb_ftq_req.nextStartAddr, wb_ftq_req.ftqOffset.valid, wb_ftq_req.ftqOffset.bits)
986  }
987
988
989  /** performance counter */
990  val f3_perf_info     = RegEnable(f2_perf_info,  f2_fire)
991  val f3_req_0    = io.toIbuffer.fire
992  val f3_req_1    = io.toIbuffer.fire && f3_doubleLine
993  val f3_hit_0    = io.toIbuffer.fire && f3_perf_info.bank_hit(0)
994  val f3_hit_1    = io.toIbuffer.fire && f3_doubleLine & f3_perf_info.bank_hit(1)
995  val f3_hit      = f3_perf_info.hit
996  val perfEvents = Seq(
997    ("frontendFlush                ", wb_redirect                                ),
998    ("ifu_req                      ", io.toIbuffer.fire                        ),
999    ("ifu_miss                     ", io.toIbuffer.fire && !f3_perf_info.hit   ),
1000    ("ifu_req_cacheline_0          ", f3_req_0                                   ),
1001    ("ifu_req_cacheline_1          ", f3_req_1                                   ),
1002    ("ifu_req_cacheline_0_hit      ", f3_hit_1                                   ),
1003    ("ifu_req_cacheline_1_hit      ", f3_hit_1                                   ),
1004    ("only_0_hit                   ", f3_perf_info.only_0_hit       && io.toIbuffer.fire ),
1005    ("only_0_miss                  ", f3_perf_info.only_0_miss      && io.toIbuffer.fire ),
1006    ("hit_0_hit_1                  ", f3_perf_info.hit_0_hit_1      && io.toIbuffer.fire ),
1007    ("hit_0_miss_1                 ", f3_perf_info.hit_0_miss_1     && io.toIbuffer.fire ),
1008    ("miss_0_hit_1                 ", f3_perf_info.miss_0_hit_1     && io.toIbuffer.fire ),
1009    ("miss_0_miss_1                ", f3_perf_info.miss_0_miss_1    && io.toIbuffer.fire ),
1010  )
1011  generatePerfEvent()
1012
1013  XSPerfAccumulate("ifu_req",   io.toIbuffer.fire )
1014  XSPerfAccumulate("ifu_miss",  io.toIbuffer.fire && !f3_hit )
1015  XSPerfAccumulate("ifu_req_cacheline_0", f3_req_0  )
1016  XSPerfAccumulate("ifu_req_cacheline_1", f3_req_1  )
1017  XSPerfAccumulate("ifu_req_cacheline_0_hit",   f3_hit_0 )
1018  XSPerfAccumulate("ifu_req_cacheline_1_hit",   f3_hit_1 )
1019  XSPerfAccumulate("frontendFlush",  wb_redirect )
1020  XSPerfAccumulate("only_0_hit",      f3_perf_info.only_0_hit   && io.toIbuffer.fire  )
1021  XSPerfAccumulate("only_0_miss",     f3_perf_info.only_0_miss  && io.toIbuffer.fire  )
1022  XSPerfAccumulate("hit_0_hit_1",     f3_perf_info.hit_0_hit_1  && io.toIbuffer.fire  )
1023  XSPerfAccumulate("hit_0_miss_1",    f3_perf_info.hit_0_miss_1  && io.toIbuffer.fire  )
1024  XSPerfAccumulate("miss_0_hit_1",    f3_perf_info.miss_0_hit_1   && io.toIbuffer.fire )
1025  XSPerfAccumulate("miss_0_miss_1",   f3_perf_info.miss_0_miss_1 && io.toIbuffer.fire )
1026  XSPerfAccumulate("hit_0_except_1",   f3_perf_info.hit_0_except_1 && io.toIbuffer.fire )
1027  XSPerfAccumulate("miss_0_except_1",   f3_perf_info.miss_0_except_1 && io.toIbuffer.fire )
1028  XSPerfAccumulate("except_0",   f3_perf_info.except_0 && io.toIbuffer.fire )
1029  XSPerfHistogram("ifu2ibuffer_validCnt", PopCount(io.toIbuffer.bits.valid & io.toIbuffer.bits.enqEnable), io.toIbuffer.fire, 0, PredictWidth + 1, 1)
1030
1031  val hartId = p(XSCoreParamsKey).HartId
1032  val isWriteFetchToIBufferTable = Constantin.createRecord(s"isWriteFetchToIBufferTable$hartId")
1033  val isWriteIfuWbToFtqTable = Constantin.createRecord(s"isWriteIfuWbToFtqTable$hartId")
1034  val fetchToIBufferTable = ChiselDB.createTable(s"FetchToIBuffer$hartId", new FetchToIBufferDB)
1035  val ifuWbToFtqTable = ChiselDB.createTable(s"IfuWbToFtq$hartId", new IfuWbToFtqDB)
1036
1037  val fetchIBufferDumpData = Wire(new FetchToIBufferDB)
1038  fetchIBufferDumpData.start_addr := f3_ftq_req.startAddr
1039  fetchIBufferDumpData.instr_count := PopCount(io.toIbuffer.bits.enqEnable)
1040  fetchIBufferDumpData.exception := (f3_perf_info.except_0 && io.toIbuffer.fire) || (f3_perf_info.hit_0_except_1 && io.toIbuffer.fire) || (f3_perf_info.miss_0_except_1 && io.toIbuffer.fire)
1041  fetchIBufferDumpData.is_cache_hit := f3_hit
1042
1043  val ifuWbToFtqDumpData = Wire(new IfuWbToFtqDB)
1044  ifuWbToFtqDumpData.start_addr := wb_ftq_req.startAddr
1045  ifuWbToFtqDumpData.is_miss_pred := checkFlushWb.bits.misOffset.valid
1046  ifuWbToFtqDumpData.miss_pred_offset := checkFlushWb.bits.misOffset.bits
1047  ifuWbToFtqDumpData.checkJalFault := checkJalFault
1048  ifuWbToFtqDumpData.checkRetFault := checkRetFault
1049  ifuWbToFtqDumpData.checkTargetFault := checkTargetFault
1050  ifuWbToFtqDumpData.checkNotCFIFault := checkNotCFIFault
1051  ifuWbToFtqDumpData.checkInvalidTaken := checkInvalidTaken
1052
1053  fetchToIBufferTable.log(
1054    data = fetchIBufferDumpData,
1055    en = isWriteFetchToIBufferTable.orR && io.toIbuffer.fire,
1056    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
1057    clock = clock,
1058    reset = reset
1059  )
1060  ifuWbToFtqTable.log(
1061    data = ifuWbToFtqDumpData,
1062    en = isWriteIfuWbToFtqTable.orR && checkFlushWb.valid,
1063    site = "IFU" + p(XSCoreParamsKey).HartId.toString,
1064    clock = clock,
1065    reset = reset
1066  )
1067
1068}
1069