xref: /XiangShan/src/main/scala/xiangshan/frontend/Frontend.scala (revision 800ac0f1d01fac5d118955113cd5a0cc7844aff4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15*
16*
17* Acknowledgement
18*
19* This implementation is inspired by several key papers:
20* [1] Alex Ramirez, Oliverio J. Santana, Josep L. Larriba-Pey, and Mateo Valero. "[Fetching instruction streams.]
21* (https://doi.org/10.1109/MICRO.2002.1176264)" 35th Annual IEEE/ACM International Symposium on Microarchitecture
22* (MICRO). 2002.
23* [2] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Rebasing instruction prefetching: An industry
24* perspective.](https://doi.org/10.1109/LCA.2020.3035068)" IEEE Computer Architecture Letters 19.2: 147-150. 2020.
25* [3] Yasuo Ishii, Jaekyu Lee, Krishnendra Nathella, and Dam Sunwoo. "[Re-establishing fetch-directed instruction
26* prefetching: An industry perspective.](https://doi.org/10.1109/ISPASS51385.2021.00034)" 2021 IEEE International
27* Symposium on Performance Analysis of Systems and Software (ISPASS). 2021.
28***************************************************************************************/
29
30package xiangshan.frontend
31import chisel3._
32import chisel3.util._
33import freechips.rocketchip.diplomacy.LazyModule
34import freechips.rocketchip.diplomacy.LazyModuleImp
35import org.chipsalliance.cde.config.Parameters
36import utility._
37import xiangshan._
38import xiangshan.backend.fu.PFEvent
39import xiangshan.backend.fu.PMP
40import xiangshan.backend.fu.PMPChecker
41import xiangshan.backend.fu.PMPReqBundle
42import xiangshan.cache.mmu._
43import xiangshan.frontend.icache._
44
45class Frontend()(implicit p: Parameters) extends LazyModule with HasXSParameter {
46  override def shouldBeInlined: Boolean = false
47  val inner       = LazyModule(new FrontendInlined)
48  lazy val module = new FrontendImp(this)
49}
50
51class FrontendImp(wrapper: Frontend)(implicit p: Parameters) extends LazyModuleImp(wrapper) {
52  val io      = IO(wrapper.inner.module.io.cloneType)
53  val io_perf = IO(wrapper.inner.module.io_perf.cloneType)
54  io <> wrapper.inner.module.io
55  io_perf <> wrapper.inner.module.io_perf
56  if (p(DebugOptionsKey).ResetGen) {
57    ResetGen(ResetGenNode(Seq(ModuleNode(wrapper.inner.module))), reset, sim = false)
58  }
59}
60
61class FrontendInlined()(implicit p: Parameters) extends LazyModule with HasXSParameter {
62  override def shouldBeInlined: Boolean = true
63
64  val instrUncache = LazyModule(new InstrUncache())
65  val icache       = LazyModule(new ICache())
66
67  lazy val module = new FrontendInlinedImp(this)
68}
69
70class FrontendInlinedImp(outer: FrontendInlined) extends LazyModuleImp(outer)
71    with HasXSParameter
72    with HasPerfEvents {
73  val io = IO(new Bundle() {
74    val hartId       = Input(UInt(hartIdLen.W))
75    val reset_vector = Input(UInt(PAddrBits.W))
76    val fencei       = Input(Bool())
77    val ptw          = new TlbPtwIO()
78    val backend      = new FrontendToCtrlIO
79    val softPrefetch = Vec(backendParams.LduCnt, Flipped(Valid(new SoftIfetchPrefetchBundle)))
80    val sfence       = Input(new SfenceBundle)
81    val tlbCsr       = Input(new TlbCsrBundle)
82    val csrCtrl      = Input(new CustomCSRCtrlIO)
83    val error        = ValidIO(new L1CacheErrorInfo)
84    val frontendInfo = new Bundle {
85      val ibufFull = Output(Bool())
86      val bpuInfo = new Bundle {
87        val bpRight = Output(UInt(XLEN.W))
88        val bpWrong = Output(UInt(XLEN.W))
89      }
90    }
91    val resetInFrontend = Output(Bool())
92    val debugTopDown = new Bundle {
93      val robHeadVaddr = Flipped(Valid(UInt(VAddrBits.W)))
94    }
95  })
96
97  // decouped-frontend modules
98  val instrUncache = outer.instrUncache.module
99  val icache       = outer.icache.module
100  val bpu          = Module(new Predictor)
101  val ifu          = Module(new NewIFU)
102  val ibuffer      = Module(new IBuffer)
103  val ftq          = Module(new Ftq)
104
105  val needFlush            = RegNext(io.backend.toFtq.redirect.valid)
106  val FlushControlRedirect = RegNext(io.backend.toFtq.redirect.bits.debugIsCtrl)
107  val FlushMemVioRedirect  = RegNext(io.backend.toFtq.redirect.bits.debugIsMemVio)
108  val FlushControlBTBMiss  = Wire(Bool())
109  val FlushTAGEMiss        = Wire(Bool())
110  val FlushSCMiss          = Wire(Bool())
111  val FlushITTAGEMiss      = Wire(Bool())
112  val FlushRASMiss         = Wire(Bool())
113
114  val tlbCsr  = DelayN(io.tlbCsr, 2)
115  val csrCtrl = DelayN(io.csrCtrl, 2)
116  val sfence  = RegNext(RegNext(io.sfence))
117
118  // trigger
119  ifu.io.frontendTrigger := csrCtrl.frontend_trigger
120
121  // RVCDecoder fsIsOff
122  ifu.io.csr_fsIsOff := csrCtrl.fsIsOff
123
124  // bpu ctrl
125  bpu.io.ctrl         := csrCtrl.bp_ctrl
126  bpu.io.reset_vector := io.reset_vector
127
128  // pmp
129  val PortNumber = ICacheParameters().PortNumber
130  val pmp        = Module(new PMP())
131  val pmp_check  = VecInit(Seq.fill(coreParams.ipmpPortNum)(Module(new PMPChecker(3, sameCycle = true)).io))
132  pmp.io.distribute_csr := csrCtrl.distribute_csr
133  val pmp_req_vec = Wire(Vec(coreParams.ipmpPortNum, Valid(new PMPReqBundle())))
134  (0 until 2 * PortNumber).foreach(i => pmp_req_vec(i) <> icache.io.pmp(i).req)
135  pmp_req_vec.last <> ifu.io.pmp.req
136
137  for (i <- pmp_check.indices) {
138    if (HasBitmapCheck) {
139      pmp_check(i).apply(tlbCsr.mbmc.CMODE.asBool, tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i))
140    } else {
141      pmp_check(i).apply(tlbCsr.priv.imode, pmp.io.pmp, pmp.io.pma, pmp_req_vec(i))
142    }
143  }
144  (0 until 2 * PortNumber).foreach(i => icache.io.pmp(i).resp <> pmp_check(i).resp)
145  ifu.io.pmp.resp <> pmp_check.last.resp
146
147  val itlb =
148    Module(new TLB(coreParams.itlbPortNum, nRespDups = 1, Seq.fill(PortNumber)(false) ++ Seq(true), itlbParams))
149  itlb.io.requestor.take(PortNumber) zip icache.io.itlb foreach { case (a, b) => a <> b }
150  itlb.io.requestor.last <> ifu.io.iTLBInter // mmio may need re-tlb, blocked
151  itlb.io.hartId := io.hartId
152  itlb.io.base_connect(sfence, tlbCsr)
153  itlb.io.flushPipe.foreach(_ := icache.io.itlbFlushPipe)
154  itlb.io.redirect := DontCare // itlb has flushpipe, don't need redirect signal
155
156  val itlb_ptw = Wire(new VectorTlbPtwIO(coreParams.itlbPortNum))
157  itlb_ptw.connect(itlb.io.ptw)
158  val itlbRepeater1 = PTWFilter(itlbParams.fenceDelay, itlb_ptw, sfence, tlbCsr, l2tlbParams.ifilterSize)
159  val itlbRepeater2 =
160    PTWRepeaterNB(passReady = false, itlbParams.fenceDelay, itlbRepeater1.io.ptw, io.ptw, sfence, tlbCsr)
161
162  icache.io.ftqPrefetch <> ftq.io.toPrefetch
163  icache.io.softPrefetch <> io.softPrefetch
164
165  // IFU-Ftq
166  ifu.io.ftqInter.fromFtq <> ftq.io.toIfu
167  ftq.io.toIfu.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready
168
169  ftq.io.fromIfu <> ifu.io.ftqInter.toFtq
170  bpu.io.ftq_to_bpu <> ftq.io.toBpu
171  ftq.io.fromBpu <> bpu.io.bpu_to_ftq
172
173  ftq.io.mmioCommitRead <> ifu.io.mmioCommitRead
174
175  // IFU-ICache
176  icache.io.fetch.req <> ftq.io.toICache.req
177  ftq.io.toICache.req.ready := ifu.io.ftqInter.fromFtq.req.ready && icache.io.fetch.req.ready
178
179  ifu.io.icacheInter.resp <> icache.io.fetch.resp
180  ifu.io.icacheInter.icacheReady       := icache.io.toIFU
181  ifu.io.icacheInter.topdownIcacheMiss := icache.io.fetch.topdownIcacheMiss
182  ifu.io.icacheInter.topdownItlbMiss   := icache.io.fetch.topdownItlbMiss
183  icache.io.stop                       := ifu.io.icacheStop
184  icache.io.flush                      := ftq.io.icacheFlush
185
186  ifu.io.icachePerfInfo := icache.io.perfInfo
187
188  icache.io.csr_pf_enable := RegNext(csrCtrl.pf_ctrl.l1I_pf_enable)
189
190  icache.io.fencei := RegNext(io.fencei)
191
192  // IFU-Ibuffer
193  ifu.io.toIbuffer <> ibuffer.io.in
194
195  ftq.io.fromBackend <> io.backend.toFtq
196  io.backend.fromFtq := ftq.io.toBackend
197  io.backend.fromIfu := ifu.io.toBackend
198  io.frontendInfo.bpuInfo <> ftq.io.bpuInfo
199
200  val checkPcMem = Reg(Vec(FtqSize, new Ftq_RF_Components))
201  when(ftq.io.toBackend.pc_mem_wen) {
202    checkPcMem(ftq.io.toBackend.pc_mem_waddr) := ftq.io.toBackend.pc_mem_wdata
203  }
204
205  val checkTargetPtr = Wire(Vec(DecodeWidth, new FtqPtr))
206  val checkTarget    = Wire(Vec(DecodeWidth, UInt(VAddrBits.W)))
207
208  for (i <- 0 until DecodeWidth) {
209    checkTargetPtr(i) := ibuffer.io.out(i).bits.ftqPtr
210    checkTarget(i) := Mux(
211      ftq.io.toBackend.newest_entry_ptr.value === checkTargetPtr(i).value,
212      ftq.io.toBackend.newest_entry_target,
213      checkPcMem((checkTargetPtr(i) + 1.U).value).startAddr
214    )
215  }
216
217  // commented out for this br could be the last instruction in the fetch block
218  def checkNotTakenConsecutive = {
219    val prevNotTakenValid  = RegInit(0.B)
220    val prevNotTakenFtqPtr = Reg(new FtqPtr)
221    for (i <- 0 until DecodeWidth - 1) {
222      // for instrs that is not the last, if a not-taken br, the next instr should have the same ftqPtr
223      // for instrs that is the last, record and check next request
224      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr) {
225        when(ibuffer.io.out(i + 1).fire) {
226          // not last br, check now
227        }.otherwise {
228          // last br, record its info
229          prevNotTakenValid  := true.B
230          prevNotTakenFtqPtr := checkTargetPtr(i)
231        }
232      }
233      XSError(
234        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr &&
235          ibuffer.io.out(i + 1).fire &&
236          checkTargetPtr(i).value =/= checkTargetPtr(i + 1).value,
237        "not-taken br should have same ftqPtr\n"
238      )
239    }
240    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr) {
241      // last instr is a br, record its info
242      prevNotTakenValid  := true.B
243      prevNotTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
244    }
245    when(prevNotTakenValid && ibuffer.io.out(0).fire) {
246      prevNotTakenValid := false.B
247    }
248    XSError(
249      prevNotTakenValid && ibuffer.io.out(0).fire &&
250        prevNotTakenFtqPtr.value =/= checkTargetPtr(0).value,
251      "not-taken br should have same ftqPtr\n"
252    )
253
254    when(needFlush) {
255      prevNotTakenValid := false.B
256    }
257  }
258
259  def checkTakenNotConsecutive = {
260    val prevTakenValid  = RegInit(0.B)
261    val prevTakenFtqPtr = Reg(new FtqPtr)
262    for (i <- 0 until DecodeWidth - 1) {
263      // for instrs that is not the last, if a taken br, the next instr should not have the same ftqPtr
264      // for instrs that is the last, record and check next request
265      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken) {
266        when(ibuffer.io.out(i + 1).fire) {
267          // not last br, check now
268        }.otherwise {
269          // last br, record its info
270          prevTakenValid  := true.B
271          prevTakenFtqPtr := checkTargetPtr(i)
272        }
273      }
274      XSError(
275        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && ibuffer.io.out(i).bits.pred_taken &&
276          ibuffer.io.out(i + 1).fire &&
277          (checkTargetPtr(i) + 1.U).value =/= checkTargetPtr(i + 1).value,
278        "taken br should have consecutive ftqPtr\n"
279      )
280    }
281    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && ibuffer.io.out(
282      DecodeWidth - 1
283    ).bits.pred_taken) {
284      // last instr is a br, record its info
285      prevTakenValid  := true.B
286      prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
287    }
288    when(prevTakenValid && ibuffer.io.out(0).fire) {
289      prevTakenValid := false.B
290    }
291    XSError(
292      prevTakenValid && ibuffer.io.out(0).fire &&
293        (prevTakenFtqPtr + 1.U).value =/= checkTargetPtr(0).value,
294      "taken br should have consecutive ftqPtr\n"
295    )
296    when(needFlush) {
297      prevTakenValid := false.B
298    }
299  }
300
301  def checkNotTakenPC = {
302    val prevNotTakenPC    = Reg(UInt(VAddrBits.W))
303    val prevIsRVC         = Reg(Bool())
304    val prevNotTakenValid = RegInit(0.B)
305
306    for (i <- 0 until DecodeWidth - 1) {
307      when(ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken) {
308        when(ibuffer.io.out(i + 1).fire) {}.otherwise {
309          prevNotTakenValid := true.B
310          prevIsRVC         := ibuffer.io.out(i).bits.pd.isRVC
311          prevNotTakenPC    := ibuffer.io.out(i).bits.pc
312        }
313      }
314      XSError(
315        ibuffer.io.out(i).fire && ibuffer.io.out(i).bits.pd.isBr && !ibuffer.io.out(i).bits.pred_taken &&
316          ibuffer.io.out(i + 1).fire &&
317          ibuffer.io.out(i).bits.pc + Mux(ibuffer.io.out(i).bits.pd.isRVC, 2.U, 4.U) =/= ibuffer.io.out(
318            i + 1
319          ).bits.pc,
320        "not-taken br should have consecutive pc\n"
321      )
322    }
323    when(ibuffer.io.out(DecodeWidth - 1).fire && ibuffer.io.out(DecodeWidth - 1).bits.pd.isBr && !ibuffer.io.out(
324      DecodeWidth - 1
325    ).bits.pred_taken) {
326      prevNotTakenValid := true.B
327      prevIsRVC         := ibuffer.io.out(DecodeWidth - 1).bits.pd.isRVC
328      prevNotTakenPC    := ibuffer.io.out(DecodeWidth - 1).bits.pc
329    }
330    when(prevNotTakenValid && ibuffer.io.out(0).fire) {
331      prevNotTakenValid := false.B
332    }
333    XSError(
334      prevNotTakenValid && ibuffer.io.out(0).fire &&
335        prevNotTakenPC + Mux(prevIsRVC, 2.U, 4.U) =/= ibuffer.io.out(0).bits.pc,
336      "not-taken br should have same pc\n"
337    )
338    when(needFlush) {
339      prevNotTakenValid := false.B
340    }
341  }
342
343  def checkTakenPC = {
344    val prevTakenFtqPtr = Reg(new FtqPtr)
345    val prevTakenValid  = RegInit(0.B)
346    val prevTakenTarget = Wire(UInt(VAddrBits.W))
347    prevTakenTarget := checkPcMem((prevTakenFtqPtr + 1.U).value).startAddr
348
349    for (i <- 0 until DecodeWidth - 1) {
350      when(ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken) {
351        when(ibuffer.io.out(i + 1).fire) {}.otherwise {
352          prevTakenValid  := true.B
353          prevTakenFtqPtr := checkTargetPtr(i)
354        }
355      }
356      XSError(
357        ibuffer.io.out(i).fire && !ibuffer.io.out(i).bits.pd.notCFI && ibuffer.io.out(i).bits.pred_taken &&
358          ibuffer.io.out(i + 1).fire &&
359          checkTarget(i) =/= ibuffer.io.out(i + 1).bits.pc,
360        "taken instr should follow target pc\n"
361      )
362    }
363    when(ibuffer.io.out(DecodeWidth - 1).fire && !ibuffer.io.out(DecodeWidth - 1).bits.pd.notCFI && ibuffer.io.out(
364      DecodeWidth - 1
365    ).bits.pred_taken) {
366      prevTakenValid  := true.B
367      prevTakenFtqPtr := checkTargetPtr(DecodeWidth - 1)
368    }
369    when(prevTakenValid && ibuffer.io.out(0).fire) {
370      prevTakenValid := false.B
371    }
372    XSError(
373      prevTakenValid && ibuffer.io.out(0).fire &&
374        prevTakenTarget =/= ibuffer.io.out(0).bits.pc,
375      "taken instr should follow target pc\n"
376    )
377    when(needFlush) {
378      prevTakenValid := false.B
379    }
380  }
381
382  // checkNotTakenConsecutive
383  checkTakenNotConsecutive
384  checkTakenPC
385  checkNotTakenPC
386
387  ifu.io.rob_commits <> io.backend.toFtq.rob_commits
388
389  ibuffer.io.flush                := needFlush
390  ibuffer.io.ControlRedirect      := FlushControlRedirect
391  ibuffer.io.MemVioRedirect       := FlushMemVioRedirect
392  ibuffer.io.ControlBTBMissBubble := FlushControlBTBMiss
393  ibuffer.io.TAGEMissBubble       := FlushTAGEMiss
394  ibuffer.io.SCMissBubble         := FlushSCMiss
395  ibuffer.io.ITTAGEMissBubble     := FlushITTAGEMiss
396  ibuffer.io.RASMissBubble        := FlushRASMiss
397  ibuffer.io.decodeCanAccept      := io.backend.canAccept
398
399  FlushControlBTBMiss := ftq.io.ControlBTBMissBubble
400  FlushTAGEMiss       := ftq.io.TAGEMissBubble
401  FlushSCMiss         := ftq.io.SCMissBubble
402  FlushITTAGEMiss     := ftq.io.ITTAGEMissBubble
403  FlushRASMiss        := ftq.io.RASMissBubble
404
405  io.backend.cfVec <> ibuffer.io.out
406  io.backend.stallReason <> ibuffer.io.stallReason
407
408  instrUncache.io.req <> ifu.io.uncacheInter.toUncache
409  ifu.io.uncacheInter.fromUncache <> instrUncache.io.resp
410  instrUncache.io.flush := false.B
411  io.error <> RegNext(RegNext(icache.io.error))
412
413  icache.io.hartId := io.hartId
414
415  itlbRepeater1.io.debugTopDown.robHeadVaddr := io.debugTopDown.robHeadVaddr
416
417  io.frontendInfo.ibufFull := RegNext(ibuffer.io.full)
418  io.resetInFrontend       := reset.asBool
419
420  // PFEvent
421  val pfevent = Module(new PFEvent)
422  pfevent.io.distribute_csr := io.csrCtrl.distribute_csr
423  val csrevents = pfevent.io.hpmevent.take(8)
424
425  val perfFromUnits = Seq(ifu, ibuffer, icache, ftq, bpu).flatMap(_.getPerfEvents)
426  val perfFromIO    = Seq()
427  val perfBlock     = Seq()
428  // let index = 0 be no event
429  val allPerfEvents = Seq(("noEvent", 0.U)) ++ perfFromUnits ++ perfFromIO ++ perfBlock
430
431  if (printEventCoding) {
432    for (((name, inc), i) <- allPerfEvents.zipWithIndex) {
433      println("Frontend perfEvents Set", name, inc, i)
434    }
435  }
436
437  val allPerfInc          = allPerfEvents.map(_._2.asTypeOf(new PerfEvent))
438  override val perfEvents = HPerfMonitor(csrevents, allPerfInc).getPerfEvents
439  generatePerfEvent()
440}
441