xref: /XiangShan/src/main/scala/xiangshan/backend/Backend.scala (revision f7063a43ab34da917ba6c670d21871314340c550)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.backend
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
23import utility.{Constantin, ZeroExt}
24import xiangshan._
25import xiangshan.backend.Bundles.{DynInst, IssueQueueIQWakeUpBundle, LoadShouldCancel, MemExuInput, MemExuOutput, VPUCtrlSignals}
26import xiangshan.backend.ctrlblock.{DebugLSIO, LsTopdownInfo}
27import xiangshan.backend.datapath.DataConfig.{IntData, VecData}
28import xiangshan.backend.datapath.RdConfig.{IntRD, VfRD}
29import xiangshan.backend.datapath.WbConfig._
30import xiangshan.backend.datapath._
31import xiangshan.backend.dispatch.CoreDispatchTopDownIO
32import xiangshan.backend.exu.ExuBlock
33import xiangshan.backend.fu.vector.Bundles.{VConfig, VType}
34import xiangshan.backend.fu.{FenceIO, FenceToSbuffer, FuConfig, FuType, PerfCounterIO}
35import xiangshan.backend.issue.EntryBundles._
36import xiangshan.backend.issue.{CancelNetwork, Scheduler, SchedulerImpBase}
37import xiangshan.backend.rob.{RobCoreTopDownIO, RobDebugRollingIO, RobLsqIO, RobPtr}
38import xiangshan.frontend.{FtqPtr, FtqRead, PreDecodeInfo}
39import xiangshan.mem.{LqPtr, LsqEnqIO, SqPtr}
40import scala.collection.mutable
41
42class Backend(val params: BackendParams)(implicit p: Parameters) extends LazyModule
43  with HasXSParameter {
44
45  override def shouldBeInlined: Boolean = false
46
47  // check read & write port config
48  params.configChecks
49
50  /* Only update the idx in mem-scheduler here
51   * Idx in other schedulers can be updated the same way if needed
52   *
53   * Also note that we filter out the 'stData issue-queues' when counting
54   */
55  for ((ibp, idx) <- params.memSchdParams.get.issueBlockParams.filter(iq => iq.StdCnt == 0).zipWithIndex) {
56    ibp.updateIdx(idx)
57  }
58
59  println(params.iqWakeUpParams)
60
61  for ((schdCfg, i) <- params.allSchdParams.zipWithIndex) {
62    schdCfg.bindBackendParam(params)
63  }
64
65  for ((iqCfg, i) <- params.allIssueParams.zipWithIndex) {
66    iqCfg.bindBackendParam(params)
67  }
68
69  for ((exuCfg, i) <- params.allExuParams.zipWithIndex) {
70    exuCfg.bindBackendParam(params)
71    exuCfg.updateIQWakeUpConfigs(params.iqWakeUpParams)
72    exuCfg.updateExuIdx(i)
73  }
74
75  println("[Backend] ExuConfigs:")
76  for (exuCfg <- params.allExuParams) {
77    val fuConfigs = exuCfg.fuConfigs
78    val wbPortConfigs = exuCfg.wbPortConfigs
79    val immType = exuCfg.immType
80
81    println("[Backend]   " +
82      s"${exuCfg.name}: " +
83      (if (exuCfg.fakeUnit) "fake, " else "") +
84      (if (exuCfg.hasLoadFu || exuCfg.hasHyldaFu) s"LdExuIdx(${backendParams.getLdExuIdx(exuCfg)})" else "") +
85      s"${fuConfigs.map(_.name).mkString("fu(s): {", ",", "}")}, " +
86      s"${wbPortConfigs.mkString("wb: {", ",", "}")}, " +
87      s"${immType.map(SelImm.mkString(_)).mkString("imm: {", ",", "}")}, " +
88      s"latMax(${exuCfg.latencyValMax}), ${exuCfg.fuLatancySet.mkString("lat: {", ",", "}")}, " +
89      s"srcReg(${exuCfg.numRegSrc})"
90    )
91    require(
92      wbPortConfigs.collectFirst { case x: IntWB => x }.nonEmpty ==
93        fuConfigs.map(_.writeIntRf).reduce(_ || _),
94      s"${exuCfg.name} int wb port has no priority"
95    )
96    require(
97      wbPortConfigs.collectFirst { case x: VfWB => x }.nonEmpty ==
98        fuConfigs.map(x => x.writeFpRf || x.writeVecRf).reduce(_ || _),
99      s"${exuCfg.name} vec wb port has no priority"
100    )
101  }
102
103  println(s"[Backend] all fu configs")
104  for (cfg <- FuConfig.allConfigs) {
105    println(s"[Backend]   $cfg")
106  }
107
108  println(s"[Backend] Int RdConfigs: ExuName(Priority)")
109  for ((port, seq) <- params.getRdPortParams(IntData())) {
110    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
111  }
112
113  println(s"[Backend] Int WbConfigs: ExuName(Priority)")
114  for ((port, seq) <- params.getWbPortParams(IntData())) {
115    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
116  }
117
118  println(s"[Backend] Vf RdConfigs: ExuName(Priority)")
119  for ((port, seq) <- params.getRdPortParams(VecData())) {
120    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
121  }
122
123  println(s"[Backend] Vf WbConfigs: ExuName(Priority)")
124  for ((port, seq) <- params.getWbPortParams(VecData())) {
125    println(s"[Backend]   port($port): ${seq.map(x => params.getExuName(x._1) + "(" + x._2.toString + ")").mkString(",")}")
126  }
127
128  println(s"[Backend] Dispatch Configs:")
129  println(s"[Backend] Load IQ enq width(${params.numLoadDp}), Store IQ enq width(${params.numStoreDp})")
130  println(s"[Backend] Load DP width(${LSQLdEnqWidth}), Store DP width(${LSQStEnqWidth})")
131
132  params.updateCopyPdestInfo
133  println(s"[Backend] copyPdestInfo ${params.copyPdestInfo}")
134  params.allExuParams.map(_.copyNum)
135  val ctrlBlock = LazyModule(new CtrlBlock(params))
136  val pcTargetMem = LazyModule(new PcTargetMem(params))
137  val intScheduler = params.intSchdParams.map(x => LazyModule(new Scheduler(x)))
138  val vfScheduler = params.vfSchdParams.map(x => LazyModule(new Scheduler(x)))
139  val memScheduler = params.memSchdParams.map(x => LazyModule(new Scheduler(x)))
140  val dataPath = LazyModule(new DataPath(params))
141  val intExuBlock = params.intSchdParams.map(x => LazyModule(new ExuBlock(x)))
142  val vfExuBlock = params.vfSchdParams.map(x => LazyModule(new ExuBlock(x)))
143  val wbFuBusyTable = LazyModule(new WbFuBusyTable(params))
144
145  lazy val module = new BackendImp(this)
146}
147
148class BackendImp(override val wrapper: Backend)(implicit p: Parameters) extends LazyModuleImp(wrapper)
149  with HasXSParameter {
150  implicit private val params = wrapper.params
151
152  val io = IO(new BackendIO()(p, wrapper.params))
153
154  private val ctrlBlock = wrapper.ctrlBlock.module
155  private val pcTargetMem = wrapper.pcTargetMem.module
156  private val intScheduler: SchedulerImpBase = wrapper.intScheduler.get.module
157  private val vfScheduler = wrapper.vfScheduler.get.module
158  private val memScheduler = wrapper.memScheduler.get.module
159  private val dataPath = wrapper.dataPath.module
160  private val intExuBlock = wrapper.intExuBlock.get.module
161  private val vfExuBlock = wrapper.vfExuBlock.get.module
162  private val og2ForVector = Module(new Og2ForVector(params))
163  private val bypassNetwork = Module(new BypassNetwork)
164  private val wbDataPath = Module(new WbDataPath(params))
165  private val wbFuBusyTable = wrapper.wbFuBusyTable.module
166
167  private val iqWakeUpMappedBundle: Map[Int, ValidIO[IssueQueueIQWakeUpBundle]] = (
168    intScheduler.io.toSchedulers.wakeupVec ++
169      vfScheduler.io.toSchedulers.wakeupVec ++
170      memScheduler.io.toSchedulers.wakeupVec
171    ).map(x => (x.bits.exuIdx, x)).toMap
172
173  println(s"[Backend] iq wake up keys: ${iqWakeUpMappedBundle.keys}")
174
175  wbFuBusyTable.io.in.intSchdBusyTable := intScheduler.io.wbFuBusyTable
176  wbFuBusyTable.io.in.vfSchdBusyTable := vfScheduler.io.wbFuBusyTable
177  wbFuBusyTable.io.in.memSchdBusyTable := memScheduler.io.wbFuBusyTable
178  intScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.intRespRead
179  vfScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.vfRespRead
180  memScheduler.io.fromWbFuBusyTable.fuBusyTableRead := wbFuBusyTable.io.out.memRespRead
181  dataPath.io.wbConfictRead := wbFuBusyTable.io.out.wbConflictRead
182
183  private val og1CancelOH: UInt = dataPath.io.og1CancelOH
184  private val og0CancelOH: UInt = dataPath.io.og0CancelOH
185  private val cancelToBusyTable = dataPath.io.cancelToBusyTable
186
187  ctrlBlock.io.IQValidNumVec := intScheduler.io.IQValidNumVec
188  ctrlBlock.io.fromTop.hartId := io.fromTop.hartId
189  ctrlBlock.io.frontend <> io.frontend
190  ctrlBlock.io.fromWB.wbData <> wbDataPath.io.toCtrlBlock.writeback
191  ctrlBlock.io.fromMem.stIn <> io.mem.stIn
192  ctrlBlock.io.fromMem.violation <> io.mem.memoryViolation
193  ctrlBlock.io.lqCanAccept := io.mem.lqCanAccept
194  ctrlBlock.io.sqCanAccept := io.mem.sqCanAccept
195  ctrlBlock.io.csrCtrl <> intExuBlock.io.csrio.get.customCtrl
196  ctrlBlock.io.robio.csr.intrBitSet := intExuBlock.io.csrio.get.interrupt
197  ctrlBlock.io.robio.csr.trapTarget := intExuBlock.io.csrio.get.trapTarget
198  ctrlBlock.io.robio.csr.isXRet := intExuBlock.io.csrio.get.isXRet
199  ctrlBlock.io.robio.csr.wfiEvent := intExuBlock.io.csrio.get.wfi_event
200  ctrlBlock.io.robio.lsq <> io.mem.robLsqIO
201  ctrlBlock.io.robio.lsTopdownInfo <> io.mem.lsTopdownInfo
202  ctrlBlock.io.robio.debug_ls <> io.mem.debugLS
203  ctrlBlock.perfinfo := DontCare // TODO: Implement backend hpm
204  ctrlBlock.io.debugEnqLsq.canAccept := io.mem.lsqEnqIO.canAccept
205  ctrlBlock.io.debugEnqLsq.resp := io.mem.lsqEnqIO.resp
206  ctrlBlock.io.debugEnqLsq.req := memScheduler.io.memIO.get.lsqEnqIO.req
207  ctrlBlock.io.debugEnqLsq.needAlloc := memScheduler.io.memIO.get.lsqEnqIO.needAlloc
208
209  intScheduler.io.fromTop.hartId := io.fromTop.hartId
210  intScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
211  intScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
212  intScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.intUops
213  intScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
214  intScheduler.io.vfWriteBack := 0.U.asTypeOf(intScheduler.io.vfWriteBack)
215  intScheduler.io.fromDataPath.resp := dataPath.io.toIntIQ
216  intScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
217  intScheduler.io.fromDataPath.og0Cancel := og0CancelOH
218  intScheduler.io.fromDataPath.og1Cancel := og1CancelOH
219  intScheduler.io.ldCancel := io.mem.ldCancel
220  intScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
221
222  memScheduler.io.fromTop.hartId := io.fromTop.hartId
223  memScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
224  memScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
225  memScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.memUops
226  memScheduler.io.intWriteBack := wbDataPath.io.toIntPreg
227  memScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
228  memScheduler.io.fromMem.get.scommit := io.mem.sqDeq
229  memScheduler.io.fromMem.get.lcommit := io.mem.lqDeq
230  memScheduler.io.fromMem.get.wakeup := io.mem.wakeup
231  memScheduler.io.fromMem.get.sqDeqPtr := io.mem.sqDeqPtr
232  memScheduler.io.fromMem.get.lqDeqPtr := io.mem.lqDeqPtr
233  memScheduler.io.fromMem.get.sqCancelCnt := io.mem.sqCancelCnt
234  memScheduler.io.fromMem.get.lqCancelCnt := io.mem.lqCancelCnt
235  memScheduler.io.fromMem.get.stIssuePtr := io.mem.stIssuePtr
236  require(memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.length == io.mem.stIn.length)
237  memScheduler.io.fromMem.get.memWaitUpdateReq.robIdx.zip(io.mem.stIn).foreach { case (sink, source) =>
238    sink.valid := source.valid
239    sink.bits  := source.bits.robIdx
240  }
241  memScheduler.io.fromMem.get.memWaitUpdateReq.sqIdx := DontCare // TODO
242  memScheduler.io.fromDataPath.resp := dataPath.io.toMemIQ
243  memScheduler.io.fromMem.get.ldaFeedback := io.mem.ldaIqFeedback
244  memScheduler.io.fromMem.get.staFeedback := io.mem.staIqFeedback
245  memScheduler.io.fromMem.get.hyuFeedback := io.mem.hyuIqFeedback
246  memScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
247  memScheduler.io.fromDataPath.og0Cancel := og0CancelOH
248  memScheduler.io.fromDataPath.og1Cancel := og1CancelOH
249  memScheduler.io.ldCancel := io.mem.ldCancel
250  memScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
251
252  vfScheduler.io.fromTop.hartId := io.fromTop.hartId
253  vfScheduler.io.fromCtrlBlock.flush := ctrlBlock.io.toIssueBlock.flush
254  vfScheduler.io.fromDispatch.allocPregs <> ctrlBlock.io.toIssueBlock.allocPregs
255  vfScheduler.io.fromDispatch.uops <> ctrlBlock.io.toIssueBlock.vfUops
256  vfScheduler.io.intWriteBack := 0.U.asTypeOf(vfScheduler.io.intWriteBack)
257  vfScheduler.io.vfWriteBack := wbDataPath.io.toVfPreg
258  vfScheduler.io.fromDataPath.resp := dataPath.io.toVfIQ
259  vfScheduler.io.fromSchedulers.wakeupVec.foreach { wakeup => wakeup := iqWakeUpMappedBundle(wakeup.bits.exuIdx) }
260  vfScheduler.io.fromDataPath.og0Cancel := og0CancelOH
261  vfScheduler.io.fromDataPath.og1Cancel := og1CancelOH
262  vfScheduler.io.ldCancel := io.mem.ldCancel
263  vfScheduler.io.fromDataPath.cancelToBusyTable := cancelToBusyTable
264  vfScheduler.io.fromOg2.get := og2ForVector.io.toVfIQ
265
266  dataPath.io.hartId := io.fromTop.hartId
267  dataPath.io.flush := ctrlBlock.io.toDataPath.flush
268
269  dataPath.io.fromIntIQ <> intScheduler.io.toDataPathAfterDelay
270  dataPath.io.fromVfIQ <> vfScheduler.io.toDataPathAfterDelay
271  dataPath.io.fromMemIQ <> memScheduler.io.toDataPathAfterDelay
272
273  dataPath.io.ldCancel := io.mem.ldCancel
274
275  println(s"[Backend] wbDataPath.io.toIntPreg: ${wbDataPath.io.toIntPreg.size}, dataPath.io.fromIntWb: ${dataPath.io.fromIntWb.size}")
276  println(s"[Backend] wbDataPath.io.toVfPreg: ${wbDataPath.io.toVfPreg.size}, dataPath.io.fromFpWb: ${dataPath.io.fromVfWb.size}")
277  dataPath.io.fromIntWb := wbDataPath.io.toIntPreg
278  dataPath.io.fromVfWb := wbDataPath.io.toVfPreg
279  dataPath.io.debugIntRat    .foreach(_ := ctrlBlock.io.debug_int_rat.get)
280  dataPath.io.debugFpRat     .foreach(_ := ctrlBlock.io.debug_fp_rat.get)
281  dataPath.io.debugVecRat    .foreach(_ := ctrlBlock.io.debug_vec_rat.get)
282  dataPath.io.debugVconfigRat.foreach(_ := ctrlBlock.io.debug_vconfig_rat.get)
283
284  og2ForVector.io.flush := ctrlBlock.io.toDataPath.flush
285  og2ForVector.io.ldCancel := io.mem.ldCancel
286  og2ForVector.io.fromOg1NoReg <> dataPath.io.toFpExu
287
288  bypassNetwork.io.fromDataPath.int <> dataPath.io.toIntExu
289  bypassNetwork.io.fromDataPath.vf <> og2ForVector.io.toVfExu
290  bypassNetwork.io.fromDataPath.mem <> dataPath.io.toMemExu
291  bypassNetwork.io.fromDataPath.immInfo := dataPath.io.og1ImmInfo
292  bypassNetwork.io.fromExus.connectExuOutput(_.int)(intExuBlock.io.out)
293  bypassNetwork.io.fromExus.connectExuOutput(_.vf)(vfExuBlock.io.out)
294
295  require(bypassNetwork.io.fromExus.mem.flatten.size == io.mem.writeBack.size,
296    s"bypassNetwork.io.fromExus.mem.flatten.size(${bypassNetwork.io.fromExus.mem.flatten.size}: ${bypassNetwork.io.fromExus.mem.map(_.size)}, " +
297    s"io.mem.writeback(${io.mem.writeBack.size})"
298  )
299  bypassNetwork.io.fromExus.mem.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
300    sink.valid := source.valid
301    sink.bits.pdest := source.bits.uop.pdest
302    sink.bits.data := source.bits.data
303  }
304
305
306  intExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
307  for (i <- 0 until intExuBlock.io.in.length) {
308    for (j <- 0 until intExuBlock.io.in(i).length) {
309      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.int(i)(j).bits.loadDependency, io.mem.ldCancel)
310      NewPipelineConnect(
311        bypassNetwork.io.toExus.int(i)(j), intExuBlock.io.in(i)(j), intExuBlock.io.in(i)(j).fire,
312        Mux(
313          bypassNetwork.io.toExus.int(i)(j).fire,
314          bypassNetwork.io.toExus.int(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
315          intExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
316        ),
317        Option("intExuBlock2bypassNetwork")
318      )
319    }
320  }
321
322  pcTargetMem.io.fromFrontendFtq := io.frontend.fromFtq
323  pcTargetMem.io.toDataPath <> dataPath.io.fromPcTargetMem
324
325  private val csrio = intExuBlock.io.csrio.get
326  csrio.hartId := io.fromTop.hartId
327  csrio.fpu.fflags := ctrlBlock.io.robio.csr.fflags
328  csrio.fpu.isIllegal := false.B // Todo: remove it
329  csrio.fpu.dirty_fs := ctrlBlock.io.robio.csr.dirty_fs
330  csrio.vpu <> 0.U.asTypeOf(csrio.vpu) // Todo
331
332  val vsetvlVType = intExuBlock.io.vtype.getOrElse(0.U.asTypeOf(new VType))
333  ctrlBlock.io.robio.vsetvlVType := vsetvlVType
334
335  val debugVconfig = dataPath.io.debugVconfig match {
336    case Some(x) => dataPath.io.debugVconfig.get.asTypeOf(new VConfig)
337    case None => 0.U.asTypeOf(new VConfig)
338  }
339  val commitVType = ctrlBlock.io.robio.commitVType.vtype
340  val hasVsetvl = ctrlBlock.io.robio.commitVType.hasVsetvl
341  val vtype = VType.toVtypeStruct(Mux(hasVsetvl, vsetvlVType, commitVType.bits)).asUInt
342  val debugVl = debugVconfig.vl
343  csrio.vpu.set_vxsat := ctrlBlock.io.robio.csr.vxsat
344  csrio.vpu.set_vstart.valid := ctrlBlock.io.robio.csr.vstart.valid
345  csrio.vpu.set_vstart.bits := ctrlBlock.io.robio.csr.vstart.bits
346  csrio.vpu.set_vtype.valid := ctrlBlock.io.robio.csr.vcsrFlag
347  //Todo here need change design
348  csrio.vpu.set_vtype.valid := commitVType.valid
349  csrio.vpu.set_vtype.bits := ZeroExt(vtype, XLEN)
350  csrio.vpu.set_vl.valid := ctrlBlock.io.robio.csr.vcsrFlag
351  csrio.vpu.set_vl.bits := ZeroExt(debugVl, XLEN)
352  csrio.vpu.dirty_vs := ctrlBlock.io.robio.csr.dirty_vs
353  csrio.exception := ctrlBlock.io.robio.exception
354  csrio.memExceptionVAddr := io.mem.exceptionAddr.vaddr
355  csrio.memExceptionGPAddr := io.mem.exceptionAddr.gpaddr
356  csrio.externalInterrupt := io.fromTop.externalInterrupt
357  csrio.distributedUpdate(0) := io.mem.csrDistributedUpdate
358  csrio.distributedUpdate(1) := io.frontendCsrDistributedUpdate
359  csrio.perf <> io.perf
360  csrio.perf.retiredInstr <> ctrlBlock.io.robio.csr.perfinfo.retiredInstr
361  csrio.perf.ctrlInfo <> ctrlBlock.io.perfInfo.ctrlInfo
362  csrio.perf.perfEventsCtrl <> ctrlBlock.getPerf
363  private val fenceio = intExuBlock.io.fenceio.get
364  io.fenceio <> fenceio
365  fenceio.disableSfence := csrio.disableSfence
366  fenceio.disableHfenceg := csrio.disableHfenceg
367  fenceio.disableHfencev := csrio.disableHfencev
368  fenceio.virtMode := csrio.customCtrl.virtMode
369
370  vfExuBlock.io.flush := ctrlBlock.io.toExuBlock.flush
371  for (i <- 0 until vfExuBlock.io.in.size) {
372    for (j <- 0 until vfExuBlock.io.in(i).size) {
373      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.vf(i)(j).bits.loadDependency, io.mem.ldCancel)
374      NewPipelineConnect(
375        bypassNetwork.io.toExus.vf(i)(j), vfExuBlock.io.in(i)(j), vfExuBlock.io.in(i)(j).fire,
376        Mux(
377          bypassNetwork.io.toExus.vf(i)(j).fire,
378          bypassNetwork.io.toExus.vf(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
379          vfExuBlock.io.in(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush)
380        ),
381        Option("vfExuBlock2bypassNetwork")
382      )
383
384      vfExuBlock.io.in(i)(j).bits.vpu.foreach(_.vstart := csrio.vpu.vstart)
385    }
386  }
387
388  intExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
389  vfExuBlock.io.frm.foreach(_ := csrio.fpu.frm)
390  vfExuBlock.io.vxrm.foreach(_ := csrio.vpu.vxrm)
391
392  wbDataPath.io.flush := ctrlBlock.io.redirect
393  wbDataPath.io.fromTop.hartId := io.fromTop.hartId
394  wbDataPath.io.fromIntExu <> intExuBlock.io.out
395  wbDataPath.io.fromVfExu <> vfExuBlock.io.out
396  wbDataPath.io.fromMemExu.flatten.zip(io.mem.writeBack).foreach { case (sink, source) =>
397    sink.valid := source.valid
398    source.ready := sink.ready
399    sink.bits.data   := source.bits.data
400    sink.bits.pdest  := source.bits.uop.pdest
401    sink.bits.robIdx := source.bits.uop.robIdx
402    sink.bits.intWen.foreach(_ := source.bits.uop.rfWen)
403    sink.bits.fpWen.foreach(_ := source.bits.uop.fpWen)
404    sink.bits.vecWen.foreach(_ := source.bits.uop.vecWen)
405    sink.bits.exceptionVec.foreach(_ := source.bits.uop.exceptionVec)
406    sink.bits.flushPipe.foreach(_ := source.bits.uop.flushPipe)
407    sink.bits.replay.foreach(_ := source.bits.uop.replayInst)
408    sink.bits.debug := source.bits.debug
409    sink.bits.debugInfo := source.bits.uop.debugInfo
410    sink.bits.lqIdx.foreach(_ := source.bits.uop.lqIdx)
411    sink.bits.sqIdx.foreach(_ := source.bits.uop.sqIdx)
412    sink.bits.predecodeInfo.foreach(_ := source.bits.uop.preDecodeInfo)
413    sink.bits.vls.foreach(x => {
414      x.vdIdx := source.bits.vdIdx.get
415      x.vdIdxInField := source.bits.vdIdxInField.get
416      x.vpu   := source.bits.uop.vpu
417      x.oldVdPsrc := source.bits.uop.psrc(2)
418      x.isIndexed := VlduType.isIndexed(source.bits.uop.fuOpType)
419      x.isMasked := VlduType.isMasked(source.bits.uop.fuOpType)
420    })
421    sink.bits.trigger.foreach(_ := source.bits.uop.trigger)
422  }
423
424  // to mem
425  private val memIssueParams = params.memSchdParams.get.issueBlockParams
426  private val memExuBlocksHasLDU = memIssueParams.map(_.exuBlockParams.map(x => x.hasLoadFu || x.hasHyldaFu))
427  println(s"[Backend] memExuBlocksHasLDU: $memExuBlocksHasLDU")
428
429  private val toMem = Wire(bypassNetwork.io.toExus.mem.cloneType)
430  for (i <- toMem.indices) {
431    for (j <- toMem(i).indices) {
432      val shouldLdCancel = LoadShouldCancel(bypassNetwork.io.toExus.mem(i)(j).bits.loadDependency, io.mem.ldCancel)
433      val issueTimeout =
434        if (memExuBlocksHasLDU(i)(j))
435          Counter(0 until 16, toMem(i)(j).valid && !toMem(i)(j).fire, bypassNetwork.io.toExus.mem(i)(j).fire)._2
436        else
437          false.B
438
439      if (memScheduler.io.loadFinalIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
440        memScheduler.io.loadFinalIssueResp(i)(j).valid := issueTimeout
441        memScheduler.io.loadFinalIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
442        memScheduler.io.loadFinalIssueResp(i)(j).bits.resp := RespType.block
443        memScheduler.io.loadFinalIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
444        memScheduler.io.loadFinalIssueResp(i)(j).bits.uopIdx.foreach(_ := toMem(i)(j).bits.vpu.get.vuopIdx)
445      }
446
447      NewPipelineConnect(
448        bypassNetwork.io.toExus.mem(i)(j), toMem(i)(j), toMem(i)(j).fire,
449        Mux(
450          bypassNetwork.io.toExus.mem(i)(j).fire,
451          bypassNetwork.io.toExus.mem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || shouldLdCancel,
452          toMem(i)(j).bits.robIdx.needFlush(ctrlBlock.io.toExuBlock.flush) || issueTimeout
453        ),
454        Option("bypassNetwork2toMemExus")
455      )
456
457      if (memScheduler.io.memAddrIssueResp(i).nonEmpty && memExuBlocksHasLDU(i)(j)) {
458        memScheduler.io.memAddrIssueResp(i)(j).valid := toMem(i)(j).fire && FuType.isLoad(toMem(i)(j).bits.fuType)
459        memScheduler.io.memAddrIssueResp(i)(j).bits.fuType := toMem(i)(j).bits.fuType
460        memScheduler.io.memAddrIssueResp(i)(j).bits.robIdx := toMem(i)(j).bits.robIdx
461        memScheduler.io.memAddrIssueResp(i)(j).bits.resp := RespType.success // for load inst, firing at toMem means issuing successfully
462      }
463    }
464  }
465
466  io.mem.redirect := ctrlBlock.io.redirect
467  io.mem.issueUops.zip(toMem.flatten).foreach { case (sink, source) =>
468    val enableMdp = Constantin.createRecord("EnableMdp", true.B)(0)
469    sink.valid := source.valid
470    source.ready := sink.ready
471    sink.bits.iqIdx              := source.bits.iqIdx
472    sink.bits.isFirstIssue       := source.bits.isFirstIssue
473    sink.bits.uop                := 0.U.asTypeOf(sink.bits.uop)
474    sink.bits.src                := 0.U.asTypeOf(sink.bits.src)
475    sink.bits.src.zip(source.bits.src).foreach { case (l, r) => l := r}
476    sink.bits.uop.fuType         := source.bits.fuType
477    sink.bits.uop.fuOpType       := source.bits.fuOpType
478    sink.bits.uop.imm            := source.bits.imm
479    sink.bits.uop.robIdx         := source.bits.robIdx
480    sink.bits.uop.pdest          := source.bits.pdest
481    sink.bits.uop.rfWen          := source.bits.rfWen.getOrElse(false.B)
482    sink.bits.uop.fpWen          := source.bits.fpWen.getOrElse(false.B)
483    sink.bits.uop.vecWen         := source.bits.vecWen.getOrElse(false.B)
484    sink.bits.uop.flushPipe      := source.bits.flushPipe.getOrElse(false.B)
485    sink.bits.uop.pc             := source.bits.pc.getOrElse(0.U)
486    sink.bits.uop.loadWaitBit    := Mux(enableMdp, source.bits.loadWaitBit.getOrElse(false.B), false.B)
487    sink.bits.uop.waitForRobIdx  := Mux(enableMdp, source.bits.waitForRobIdx.getOrElse(0.U.asTypeOf(new RobPtr)), 0.U.asTypeOf(new RobPtr))
488    sink.bits.uop.storeSetHit    := Mux(enableMdp, source.bits.storeSetHit.getOrElse(false.B), false.B)
489    sink.bits.uop.loadWaitStrict := Mux(enableMdp, source.bits.loadWaitStrict.getOrElse(false.B), false.B)
490    sink.bits.uop.ssid           := Mux(enableMdp, source.bits.ssid.getOrElse(0.U(SSIDWidth.W)), 0.U(SSIDWidth.W))
491    sink.bits.uop.lqIdx          := source.bits.lqIdx.getOrElse(0.U.asTypeOf(new LqPtr))
492    sink.bits.uop.sqIdx          := source.bits.sqIdx.getOrElse(0.U.asTypeOf(new SqPtr))
493    sink.bits.uop.ftqPtr         := source.bits.ftqIdx.getOrElse(0.U.asTypeOf(new FtqPtr))
494    sink.bits.uop.ftqOffset      := source.bits.ftqOffset.getOrElse(0.U)
495    sink.bits.uop.debugInfo      := source.bits.perfDebugInfo
496    sink.bits.uop.vpu            := source.bits.vpu.getOrElse(0.U.asTypeOf(new VPUCtrlSignals))
497    sink.bits.uop.preDecodeInfo  := source.bits.preDecode.getOrElse(0.U.asTypeOf(new PreDecodeInfo))
498  }
499  io.mem.loadFastMatch := memScheduler.io.toMem.get.loadFastMatch.map(_.fastMatch)
500  io.mem.loadFastImm := memScheduler.io.toMem.get.loadFastMatch.map(_.fastImm)
501  io.mem.tlbCsr := csrio.tlb
502  io.mem.csrCtrl := csrio.customCtrl
503  io.mem.sfence := fenceio.sfence
504  io.mem.isStoreException := CommitType.lsInstIsStore(ctrlBlock.io.robio.exception.bits.commitType)
505  io.mem.isVlsException := ctrlBlock.io.robio.exception.bits.vls
506  require(io.mem.loadPcRead.size == params.LduCnt)
507  io.mem.loadPcRead.zipWithIndex.foreach { case (loadPcRead, i) =>
508    loadPcRead := ctrlBlock.io.memLdPcRead(i).data
509    ctrlBlock.io.memLdPcRead(i).vld := io.mem.issueLda(i).valid
510    ctrlBlock.io.memLdPcRead(i).ptr := io.mem.issueLda(i).bits.uop.ftqPtr
511    ctrlBlock.io.memLdPcRead(i).offset := io.mem.issueLda(i).bits.uop.ftqOffset
512  }
513
514  io.mem.storePcRead.zipWithIndex.foreach { case (storePcRead, i) =>
515    storePcRead := ctrlBlock.io.memStPcRead(i).data
516    ctrlBlock.io.memStPcRead(i).vld := io.mem.issueSta(i).valid
517    ctrlBlock.io.memStPcRead(i).ptr := io.mem.issueSta(i).bits.uop.ftqPtr
518    ctrlBlock.io.memStPcRead(i).offset := io.mem.issueSta(i).bits.uop.ftqOffset
519  }
520
521  io.mem.hyuPcRead.zipWithIndex.foreach( { case (hyuPcRead, i) =>
522    hyuPcRead := ctrlBlock.io.memHyPcRead(i).data
523    ctrlBlock.io.memHyPcRead(i).vld := io.mem.issueHylda(i).valid
524    ctrlBlock.io.memHyPcRead(i).ptr := io.mem.issueHylda(i).bits.uop.ftqPtr
525    ctrlBlock.io.memHyPcRead(i).offset := io.mem.issueHylda(i).bits.uop.ftqOffset
526  })
527
528  ctrlBlock.io.robio.robHeadLsIssue := io.mem.issueUops.map(deq => deq.fire && deq.bits.uop.robIdx === ctrlBlock.io.robio.robDeqPtr).reduce(_ || _)
529
530  // mem io
531  io.mem.lsqEnqIO <> memScheduler.io.memIO.get.lsqEnqIO
532  io.mem.robLsqIO <> ctrlBlock.io.robio.lsq
533
534  io.frontendSfence := fenceio.sfence
535  io.frontendTlbCsr := csrio.tlb
536  io.frontendCsrCtrl := csrio.customCtrl
537
538  io.tlb <> csrio.tlb
539
540  io.csrCustomCtrl := csrio.customCtrl
541
542  io.toTop.cpuHalted := false.B // TODO: implement cpu halt
543
544  io.debugTopDown.fromRob := ctrlBlock.io.debugTopDown.fromRob
545  ctrlBlock.io.debugTopDown.fromCore := io.debugTopDown.fromCore
546
547  io.debugRolling := ctrlBlock.io.debugRolling
548
549  if(backendParams.debugEn) {
550    dontTouch(memScheduler.io)
551    dontTouch(dataPath.io.toMemExu)
552    dontTouch(wbDataPath.io.fromMemExu)
553  }
554}
555
556class BackendMemIO(implicit p: Parameters, params: BackendParams) extends XSBundle {
557  // Since fast load replay always use load unit 0, Backend flips two load port to avoid conflicts
558  val flippedLda = true
559  // params alias
560  private val LoadQueueSize = VirtualLoadQueueSize
561  // In/Out // Todo: split it into one-direction bundle
562  val lsqEnqIO = Flipped(new LsqEnqIO)
563  val robLsqIO = new RobLsqIO
564  val ldaIqFeedback = Vec(params.LduCnt, Flipped(new MemRSFeedbackIO))
565  val staIqFeedback = Vec(params.StaCnt, Flipped(new MemRSFeedbackIO))
566  val hyuIqFeedback = Vec(params.HyuCnt, Flipped(new MemRSFeedbackIO))
567  val ldCancel = Vec(params.LdExuCnt, Flipped(new LoadCancelIO))
568  val wakeup = Vec(params.LdExuCnt, Flipped(Valid(new DynInst)))
569  val loadPcRead = Vec(params.LduCnt, Output(UInt(VAddrBits.W)))
570  val storePcRead = Vec(params.StaCnt, Output(UInt(VAddrBits.W)))
571  val hyuPcRead = Vec(params.HyuCnt, Output(UInt(VAddrBits.W)))
572  // Input
573  val writebackLda = Vec(params.LduCnt, Flipped(DecoupledIO(new MemExuOutput)))
574  val writebackSta = Vec(params.StaCnt, Flipped(DecoupledIO(new MemExuOutput)))
575  val writebackStd = Vec(params.StdCnt, Flipped(DecoupledIO(new MemExuOutput)))
576  val writebackHyuLda = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
577  val writebackHyuSta = Vec(params.HyuCnt, Flipped(DecoupledIO(new MemExuOutput)))
578  val writebackVldu = Vec(params.VlduCnt, Flipped(DecoupledIO(new MemExuOutput(true))))
579
580  val s3_delayed_load_error = Input(Vec(LoadPipelineWidth, Bool()))
581  val stIn = Input(Vec(params.StaExuCnt, ValidIO(new DynInst())))
582  val memoryViolation = Flipped(ValidIO(new Redirect))
583  val exceptionAddr = Input(new Bundle {
584    val vaddr = UInt(VAddrBits.W)
585    val gpaddr = UInt(GPAddrBits.W)
586  })
587  val sqDeq = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
588  val lqDeq = Input(UInt(log2Up(CommitWidth + 1).W))
589  val sqDeqPtr = Input(new SqPtr)
590  val lqDeqPtr = Input(new LqPtr)
591
592  val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
593  val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
594
595  val lqCanAccept = Input(Bool())
596  val sqCanAccept = Input(Bool())
597
598  val otherFastWakeup = Flipped(Vec(params.LduCnt + params.HyuCnt, ValidIO(new DynInst)))
599  val stIssuePtr = Input(new SqPtr())
600
601  val csrDistributedUpdate = Flipped(new DistributedCSRUpdateReq)
602
603  val debugLS = Flipped(Output(new DebugLSIO))
604
605  val lsTopdownInfo = Vec(params.LduCnt + params.HyuCnt, Flipped(Output(new LsTopdownInfo)))
606  // Output
607  val redirect = ValidIO(new Redirect)   // rob flush MemBlock
608  val issueLda = MixedVec(Seq.fill(params.LduCnt)(DecoupledIO(new MemExuInput())))
609  val issueSta = MixedVec(Seq.fill(params.StaCnt)(DecoupledIO(new MemExuInput())))
610  val issueStd = MixedVec(Seq.fill(params.StdCnt)(DecoupledIO(new MemExuInput())))
611  val issueHylda = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
612  val issueHysta = MixedVec(Seq.fill(params.HyuCnt)(DecoupledIO(new MemExuInput())))
613  val issueVldu = MixedVec(Seq.fill(params.VlduCnt)(DecoupledIO(new MemExuInput(true))))
614
615  val loadFastMatch = Vec(params.LduCnt, Output(UInt(params.LduCnt.W)))
616  val loadFastImm   = Vec(params.LduCnt, Output(UInt(12.W))) // Imm_I
617
618  val tlbCsr = Output(new TlbCsrBundle)
619  val csrCtrl = Output(new CustomCSRCtrlIO)
620  val sfence = Output(new SfenceBundle)
621  val isStoreException = Output(Bool())
622  val isVlsException = Output(Bool())
623
624  // ATTENTION: The issue ports' sequence order should be the same as IQs' deq config
625  private [backend] def issueUops: Seq[DecoupledIO[MemExuInput]] = {
626    issueSta ++
627      issueHylda ++ issueHysta ++
628      issueLda ++
629      issueVldu ++
630      issueStd
631  }.toSeq
632
633  // ATTENTION: The writeback ports' sequence order should be the same as IQs' deq config
634  private [backend] def writeBack: Seq[DecoupledIO[MemExuOutput]] = {
635    writebackSta ++
636      writebackHyuLda ++ writebackHyuSta ++
637      writebackLda ++
638      writebackVldu ++
639      writebackStd
640  }
641}
642
643class BackendIO(implicit p: Parameters, params: BackendParams) extends XSBundle {
644  val fromTop = new Bundle {
645    val hartId = Input(UInt(hartIdLen.W))
646    val externalInterrupt = new ExternalInterruptIO
647  }
648
649  val toTop = new Bundle {
650    val cpuHalted = Output(Bool())
651  }
652
653  val fenceio = new FenceIO
654  // Todo: merge these bundles into BackendFrontendIO
655  val frontend = Flipped(new FrontendToCtrlIO)
656  val frontendSfence = Output(new SfenceBundle)
657  val frontendCsrCtrl = Output(new CustomCSRCtrlIO)
658  val frontendTlbCsr = Output(new TlbCsrBundle)
659  // distributed csr write
660  val frontendCsrDistributedUpdate = Flipped(new DistributedCSRUpdateReq)
661
662  val mem = new BackendMemIO
663
664  val perf = Input(new PerfCounterIO)
665
666  val tlb = Output(new TlbCsrBundle)
667
668  val csrCustomCtrl = Output(new CustomCSRCtrlIO)
669
670  val debugTopDown = new Bundle {
671    val fromRob = new RobCoreTopDownIO
672    val fromCore = new CoreDispatchTopDownIO
673  }
674  val debugRolling = new RobDebugRollingIO
675}
676