xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 1b5e3cda2e8bbc4254b900b0321cbc4d396ef041)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.cache._
25import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants}
26import xiangshan.cache.mmu.TlbRequestIO
27import xiangshan.mem._
28import xiangshan.backend.rob.RobLsqIO
29import xiangshan.backend.fu.HasExceptionNO
30import xiangshan.frontend.FtqPtr
31import xiangshan.backend.fu.fpu.FPU
32
33
34class LqPtr(implicit p: Parameters) extends CircularQueuePtr[LqPtr](
35  p => p(XSCoreParamsKey).LoadQueueSize
36){
37  override def cloneType = (new LqPtr).asInstanceOf[this.type]
38}
39
40object LqPtr {
41  def apply(f: Bool, v: UInt)(implicit p: Parameters): LqPtr = {
42    val ptr = Wire(new LqPtr)
43    ptr.flag := f
44    ptr.value := v
45    ptr
46  }
47}
48
49trait HasLoadHelper { this: XSModule =>
50  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
51    val fpWen = uop.ctrl.fpWen
52    LookupTree(uop.ctrl.fuOpType, List(
53      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
54      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
55      /*
56          riscv-spec-20191213: 12.2 NaN Boxing of Narrower Values
57          Any operation that writes a narrower result to an f register must write
58          all 1s to the uppermost FLEN−n bits to yield a legal NaN-boxed value.
59      */
60      LSUOpType.lw   -> Mux(fpWen, FPU.box(rdata, FPU.S), SignExt(rdata(31, 0), XLEN)),
61      LSUOpType.ld   -> Mux(fpWen, FPU.box(rdata, FPU.D), SignExt(rdata(63, 0), XLEN)),
62      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
63      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
64      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
65    ))
66  }
67}
68
69class LqEnqIO(implicit p: Parameters) extends XSBundle {
70  val canAccept = Output(Bool())
71  val sqCanAccept = Input(Bool())
72  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
73  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
74  val resp = Vec(exuParameters.LsExuCnt, Output(new LqPtr))
75}
76
77// Load Queue
78class LoadQueue(implicit p: Parameters) extends XSModule
79  with HasDCacheParameters
80  with HasCircularQueuePtrHelper
81  with HasLoadHelper
82  with HasExceptionNO
83{
84  val io = IO(new Bundle() {
85    val enq = new LqEnqIO
86    val brqRedirect = Flipped(ValidIO(new Redirect))
87    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
88    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
89    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
90    val needReplayFromRS = Vec(LoadPipelineWidth, Input(Bool()))
91    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
92    val load_s1 = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) // TODO: to be renamed
93    val loadViolationQuery = Vec(LoadPipelineWidth, Flipped(new LoadViolationQueryIO))
94    val rob = Flipped(new RobLsqIO)
95    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
96    val dcache = Flipped(ValidIO(new Refill)) // TODO: to be renamed
97    val release = Flipped(ValidIO(new Release))
98    val uncache = new DCacheWordIO
99    val exceptionAddr = new ExceptionAddrIO
100    val lqFull = Output(Bool())
101  })
102
103  println("LoadQueue: size:" + LoadQueueSize)
104
105  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
106  // val data = Reg(Vec(LoadQueueSize, new LsRobEntry))
107  val dataModule = Module(new LoadQueueDataWrapper(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
108  dataModule.io := DontCare
109  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 3, numWrite = LoadPipelineWidth))
110  vaddrModule.io := DontCare
111  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
112  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
113  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
114  val released = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // load data has been released by dcache
115  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
116  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
117  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
118  val refilling = WireInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
119
120  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
121  val debug_paddr = Reg(Vec(LoadQueueSize, UInt(PAddrBits.W))) // mmio: inst is an mmio inst
122
123  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new LqPtr))))
124  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
125  val deqPtrExtNext = Wire(new LqPtr)
126  val allowEnqueue = RegInit(true.B)
127
128  val enqPtr = enqPtrExt(0).value
129  val deqPtr = deqPtrExt.value
130
131  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
132  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
133
134  val commitCount = RegNext(io.rob.lcommit)
135
136  /**
137    * Enqueue at dispatch
138    *
139    * Currently, LoadQueue only allows enqueue when #emptyEntries > EnqWidth
140    */
141  io.enq.canAccept := allowEnqueue
142
143  for (i <- 0 until io.enq.req.length) {
144    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
145    val lqIdx = enqPtrExt(offset)
146    val index = lqIdx.value
147    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
148      uop(index) := io.enq.req(i).bits
149      allocated(index) := true.B
150      datavalid(index) := false.B
151      writebacked(index) := false.B
152      released(index) := false.B
153      miss(index) := false.B
154      // listening(index) := false.B
155      pending(index) := false.B
156    }
157    io.enq.resp(i) := lqIdx
158  }
159  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
160
161  /**
162    * Writeback load from load units
163    *
164    * Most load instructions writeback to regfile at the same time.
165    * However,
166    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
167    *   (2) For an mmio instruction without exceptions, it does not write back.
168    * The mmio instruction will be sent to lower level when it reaches ROB's head.
169    * After uncache response, it will write back through arbiter with loadUnit.
170    *   (3) For cache misses, it is marked miss and sent to dcache later.
171    * After cache refills, it will write back through arbiter with loadUnit.
172    */
173  for (i <- 0 until LoadPipelineWidth) {
174    dataModule.io.wb.wen(i) := false.B
175    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
176    when(io.loadIn(i).fire()) {
177      when(io.loadIn(i).bits.miss) {
178        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
179          io.loadIn(i).bits.uop.lqIdx.asUInt,
180          io.loadIn(i).bits.uop.cf.pc,
181          io.loadIn(i).bits.vaddr,
182          io.loadIn(i).bits.paddr,
183          io.loadIn(i).bits.data,
184          io.loadIn(i).bits.mask,
185          io.loadIn(i).bits.forwardData.asUInt,
186          io.loadIn(i).bits.forwardMask.asUInt,
187          io.loadIn(i).bits.mmio
188        )
189      }.otherwise {
190        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
191        io.loadIn(i).bits.uop.lqIdx.asUInt,
192        io.loadIn(i).bits.uop.cf.pc,
193        io.loadIn(i).bits.vaddr,
194        io.loadIn(i).bits.paddr,
195        io.loadIn(i).bits.data,
196        io.loadIn(i).bits.mask,
197        io.loadIn(i).bits.forwardData.asUInt,
198        io.loadIn(i).bits.forwardMask.asUInt,
199        io.loadIn(i).bits.mmio
200      )}
201      datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) &&
202        !io.loadIn(i).bits.mmio && // mmio data is not valid until we finished uncache access
203        !io.needReplayFromRS(i) // do not writeback if that inst will be resend from rs
204      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
205
206      val loadWbData = Wire(new LQDataEntry)
207      loadWbData.paddr := io.loadIn(i).bits.paddr
208      loadWbData.mask := io.loadIn(i).bits.mask
209      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
210      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
211      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
212      dataModule.io.wb.wen(i) := true.B
213
214
215      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
216      debug_paddr(loadWbIndex) := io.loadIn(i).bits.paddr
217
218      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
219      miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i) && !io.needReplayFromRS(i)
220      pending(loadWbIndex) := io.loadIn(i).bits.mmio
221      uop(loadWbIndex).debugInfo := io.loadIn(i).bits.uop.debugInfo
222      // update replayInst (replay from fetch) bit,
223      // for replayInst may be set to true in load pipeline
224      uop(loadWbIndex).ctrl.replayInst := io.loadIn(i).bits.uop.ctrl.replayInst
225    }
226    // vaddrModule write is delayed, as vaddrModule will not be read right after write
227    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
228    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
229    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
230  }
231
232  when(io.dcache.valid) {
233    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
234  }
235
236  // Refill 64 bit in a cycle
237  // Refill data comes back from io.dcache.resp
238  dataModule.io.refill.valid := io.dcache.valid
239  dataModule.io.refill.paddr := io.dcache.bits.addr
240  dataModule.io.refill.data := io.dcache.bits.data
241
242  (0 until LoadQueueSize).map(i => {
243    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
244    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
245      datavalid(i) := true.B
246      miss(i) := false.B
247      refilling(i) := true.B
248    }
249  })
250
251  // Writeback up to 2 missed load insts to CDB
252  //
253  // Pick 2 missed load (data refilled), write them back to cdb
254  // 2 refilled load will be selected from even/odd entry, separately
255
256  // Stage 0
257  // Generate writeback indexes
258
259  def getEvenBits(input: UInt): UInt = {
260    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
261  }
262  def getOddBits(input: UInt): UInt = {
263    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
264  }
265
266  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
267  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
268
269  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
270    allocated(i) && !writebacked(i) && (datavalid(i) || refilling(i))
271  })).asUInt() // use uint instead vec to reduce verilog lines
272  val evenDeqMask = getEvenBits(deqMask)
273  val oddDeqMask = getOddBits(deqMask)
274  // generate lastCycleSelect mask
275  val evenFireMask = getEvenBits(UIntToOH(loadWbSel(0)))
276  val oddFireMask = getOddBits(UIntToOH(loadWbSel(1)))
277  // generate real select vec
278  def toVec(a: UInt): Vec[Bool] = {
279    VecInit(a.asBools)
280  }
281  val loadEvenSelVecFire = getEvenBits(loadWbSelVec) & ~evenFireMask
282  val loadOddSelVecFire = getOddBits(loadWbSelVec) & ~oddFireMask
283  val loadEvenSelVecNotFire = getEvenBits(loadWbSelVec)
284  val loadOddSelVecNotFire = getOddBits(loadWbSelVec)
285  val loadEvenSel = Mux(
286    io.ldout(0).fire(),
287    getFirstOne(toVec(loadEvenSelVecFire), evenDeqMask),
288    getFirstOne(toVec(loadEvenSelVecNotFire), evenDeqMask)
289  )
290  val loadOddSel= Mux(
291    io.ldout(1).fire(),
292    getFirstOne(toVec(loadOddSelVecFire), oddDeqMask),
293    getFirstOne(toVec(loadOddSelVecNotFire), oddDeqMask)
294  )
295
296
297  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
298  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
299  loadWbSelGen(0) := Cat(loadEvenSel, 0.U(1.W))
300  loadWbSelVGen(0):= Mux(io.ldout(0).fire(), loadEvenSelVecFire.asUInt.orR, loadEvenSelVecNotFire.asUInt.orR)
301  loadWbSelGen(1) := Cat(loadOddSel, 1.U(1.W))
302  loadWbSelVGen(1) := Mux(io.ldout(1).fire(), loadOddSelVecFire.asUInt.orR, loadOddSelVecNotFire.asUInt.orR)
303
304  (0 until LoadPipelineWidth).map(i => {
305    loadWbSel(i) := RegNext(loadWbSelGen(i))
306    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
307    when(io.ldout(i).fire()){
308      // Mark them as writebacked, so they will not be selected in the next cycle
309      writebacked(loadWbSel(i)) := true.B
310    }
311  })
312
313  // Stage 1
314  // Use indexes generated in cycle 0 to read data
315  // writeback data to cdb
316  (0 until LoadPipelineWidth).map(i => {
317    // data select
318    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
319    val rdata = dataModule.io.wb.rdata(i).data
320    val seluop = uop(loadWbSel(i))
321    val func = seluop.ctrl.fuOpType
322    val raddr = dataModule.io.wb.rdata(i).paddr
323    val rdataSel = LookupTree(raddr(2, 0), List(
324      "b000".U -> rdata(63, 0),
325      "b001".U -> rdata(63, 8),
326      "b010".U -> rdata(63, 16),
327      "b011".U -> rdata(63, 24),
328      "b100".U -> rdata(63, 32),
329      "b101".U -> rdata(63, 40),
330      "b110".U -> rdata(63, 48),
331      "b111".U -> rdata(63, 56)
332    ))
333    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
334
335    // writeback missed int/fp load
336    //
337    // Int load writeback will finish (if not blocked) in one cycle
338    io.ldout(i).bits.uop := seluop
339    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
340    io.ldout(i).bits.data := rdataPartialLoad
341    io.ldout(i).bits.redirectValid := false.B
342    io.ldout(i).bits.redirect := DontCare
343    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
344    io.ldout(i).bits.debug.isPerfCnt := false.B
345    io.ldout(i).bits.debug.paddr := debug_paddr(loadWbSel(i))
346    io.ldout(i).bits.debug.vaddr := vaddrModule.io.rdata(i+1)
347    io.ldout(i).bits.fflags := DontCare
348    io.ldout(i).valid := loadWbSelV(i)
349
350    when(io.ldout(i).fire()) {
351      XSInfo("int load miss write to cbd robidx %d lqidx %d pc 0x%x mmio %x\n",
352        io.ldout(i).bits.uop.robIdx.asUInt,
353        io.ldout(i).bits.uop.lqIdx.asUInt,
354        io.ldout(i).bits.uop.cf.pc,
355        debug_mmio(loadWbSel(i))
356      )
357    }
358
359  })
360
361  /**
362    * Load commits
363    *
364    * When load commited, mark it as !allocated and move deqPtrExt forward.
365    */
366  (0 until CommitWidth).map(i => {
367    when(commitCount > i.U){
368      allocated((deqPtrExt+i.U).value) := false.B
369    }
370  })
371
372  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
373    val length = mask.length
374    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
375    val highBitsUint = Cat(highBits.reverse)
376    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
377  }
378
379  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
380    assert(valid.length == uop.length)
381    assert(valid.length == 2)
382    Mux(valid(0) && valid(1),
383      Mux(isAfter(uop(0).robIdx, uop(1).robIdx), uop(1), uop(0)),
384      Mux(valid(0) && !valid(1), uop(0), uop(1)))
385  }
386
387  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
388    assert(valid.length == uop.length)
389    val length = valid.length
390    (0 until length).map(i => {
391      (0 until length).map(j => {
392        Mux(valid(i) && valid(j),
393          isAfter(uop(i).robIdx, uop(j).robIdx),
394          Mux(!valid(i), true.B, false.B))
395      })
396    })
397  }
398
399  /**
400    * Store-Load Memory violation detection
401    *
402    * When store writes back, it searches LoadQueue for younger load instructions
403    * with the same load physical address. They loaded wrong data and need re-execution.
404    *
405    * Cycle 0: Store Writeback
406    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
407    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
408    * Cycle 1: Redirect Generation
409    *   There're three possible types of violations, up to 6 possible redirect requests.
410    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
411    * Cycle 2: Redirect Fire
412    *   Choose the oldest load (part 2). (3 -> 1)
413    *   Prepare redirect request according to the detected violation.
414    *   Fire redirect request (if valid)
415    */
416
417  // stage 0:        lq l1 wb     l1 wb lq
418  //                 |  |  |      |  |  |  (paddr match)
419  // stage 1:        lq l1 wb     l1 wb lq
420  //                 |  |  |      |  |  |
421  //                 |  |------------|  |
422  //                 |        |         |
423  // stage 2:        lq      l1wb       lq
424  //                 |        |         |
425  //                 --------------------
426  //                          |
427  //                      rollback req
428  io.load_s1 := DontCare
429  def detectRollback(i: Int) = {
430    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
431    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
432    val xorMask = lqIdxMask ^ enqMask
433    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
434    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
435
436    // check if load already in lq needs to be rolledback
437    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
438    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
439    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
440    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
441      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
442    })))
443    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
444      addrMaskMatch(j) && entryNeedCheck(j)
445    }))
446    val lqViolation = lqViolationVec.asUInt().orR()
447    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
448    val lqViolationUop = uop(lqViolationIndex)
449    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
450    // lqViolationUop.lqIdx.value := lqViolationIndex
451    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
452
453    // when l/s writeback to rob together, check if rollback is needed
454    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
455      io.loadIn(j).valid &&
456        isAfter(io.loadIn(j).bits.uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
457        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
458        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
459    })))
460    val wbViolation = wbViolationVec.asUInt().orR()
461    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
462    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
463
464    // check if rollback is needed for load in l1
465    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
466      io.load_s1(j).valid && // L1 valid
467        isAfter(io.load_s1(j).uop.robIdx, io.storeIn(i).bits.uop.robIdx) &&
468        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
469        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
470    })))
471    val l1Violation = l1ViolationVec.asUInt().orR()
472    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
473    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
474
475    XSDebug(
476      l1Violation,
477      "need rollback (l1 load) pc %x robidx %d target %x\n",
478      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, l1ViolationUop.robIdx.asUInt
479    )
480    XSDebug(
481      lqViolation,
482      "need rollback (ld wb before store) pc %x robidx %d target %x\n",
483      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, lqViolationUop.robIdx.asUInt
484    )
485    XSDebug(
486      wbViolation,
487      "need rollback (ld/st wb together) pc %x robidx %d target %x\n",
488      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.robIdx.asUInt, wbViolationUop.robIdx.asUInt
489    )
490
491    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
492  }
493
494  def rollbackSel(a: Valid[MicroOpRbExt], b: Valid[MicroOpRbExt]): ValidIO[MicroOpRbExt] = {
495    Mux(
496      a.valid,
497      Mux(
498        b.valid,
499        Mux(isAfter(a.bits.uop.robIdx, b.bits.uop.robIdx), b, a), // a,b both valid, sel oldest
500        a // sel a
501      ),
502      b // sel b
503    )
504  }
505  val lastCycleRedirect = RegNext(io.brqRedirect)
506  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
507
508  // S2: select rollback (part1) and generate rollback request
509  // rollback check
510  // Wb/L1 rollback seq check is done in s2
511  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
512  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
513  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOpRbExt)))
514  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
515  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOpRbExt)))
516  // store ftq index for store set update
517  val stFtqIdxS2 = Wire(Vec(StorePipelineWidth, new FtqPtr))
518  val stFtqOffsetS2 = Wire(Vec(StorePipelineWidth, UInt(log2Up(PredictWidth).W)))
519  for (i <- 0 until StorePipelineWidth) {
520    val detectedRollback = detectRollback(i)
521    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
522    rollbackLq(i).bits.uop := detectedRollback._1._2
523    rollbackLq(i).bits.flag := i.U
524    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
525    rollbackWb(i).bits.uop := detectedRollback._2._2
526    rollbackWb(i).bits.flag := i.U
527    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
528    rollbackL1(i).bits.uop := detectedRollback._3._2
529    rollbackL1(i).bits.flag := i.U
530    rollbackL1Wb(2*i) := rollbackL1(i)
531    rollbackL1Wb(2*i+1) := rollbackWb(i)
532    stFtqIdxS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqPtr)
533    stFtqOffsetS2(i) := RegNext(io.storeIn(i).bits.uop.cf.ftqOffset)
534  }
535
536  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
537  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
538  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
539  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
540  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
541  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
542  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
543
544  // S3: select rollback (part2), generate rollback request, then fire rollback request
545  // Note that we use robIdx - 1.U to flush the load instruction itself.
546  // Thus, here if last cycle's robIdx equals to this cycle's robIdx, it still triggers the redirect.
547
548  // FIXME: this is ugly
549  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
550  val rollbackUopExtVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
551
552  // select uop in parallel
553  val mask = getAfterMask(rollbackValidVec, rollbackUopExtVec.map(i => i.uop))
554  val oneAfterZero = mask(1)(0)
555  val rollbackUopExt = Mux(oneAfterZero && mask(2)(0),
556    rollbackUopExtVec(0),
557    Mux(!oneAfterZero && mask(2)(1), rollbackUopExtVec(1), rollbackUopExtVec(2)))
558  val stFtqIdxS3 = RegNext(stFtqIdxS2)
559  val stFtqOffsetS3 = RegNext(stFtqOffsetS2)
560  val rollbackUop = rollbackUopExt.uop
561  val rollbackStFtqIdx = stFtqIdxS3(rollbackUopExt.flag)
562  val rollbackStFtqOffset = stFtqOffsetS3(rollbackUopExt.flag)
563
564  // check if rollback request is still valid in parallel
565  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
566  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopExtVec.map(i => i.uop)).zipWithIndex) {
567    rollbackValidVecChecked(idx) := v &&
568      (!lastCycleRedirect.valid || isBefore(uop.robIdx, lastCycleRedirect.bits.robIdx)) &&
569      (!lastlastCycleRedirect.valid || isBefore(uop.robIdx, lastlastCycleRedirect.bits.robIdx))
570  }
571
572  io.rollback.bits.robIdx := rollbackUop.robIdx
573  io.rollback.bits.ftqIdx := rollbackUop.cf.ftqPtr
574  io.rollback.bits.stFtqIdx := rollbackStFtqIdx
575  io.rollback.bits.ftqOffset := rollbackUop.cf.ftqOffset
576  io.rollback.bits.stFtqOffset := rollbackStFtqOffset
577  io.rollback.bits.level := RedirectLevel.flush
578  io.rollback.bits.interrupt := DontCare
579  io.rollback.bits.cfiUpdate := DontCare
580  io.rollback.bits.cfiUpdate.target := rollbackUop.cf.pc
581  io.rollback.bits.debug_runahead_checkpoint_id := rollbackUop.debugInfo.runahead_checkpoint_id
582  // io.rollback.bits.pc := DontCare
583
584  io.rollback.valid := rollbackValidVecChecked.asUInt.orR
585
586  when(io.rollback.valid) {
587    // XSDebug("Mem rollback: pc %x robidx %d\n", io.rollback.bits.cfi, io.rollback.bits.robIdx.asUInt)
588  }
589
590  /**
591  * Load-Load Memory violation detection
592  *
593  * When load arrives load_s1, it searches LoadQueue for younger load instructions
594  * with the same load physical address. If younger load has been released (or observed),
595  * the younger load needs to be re-execed.
596  *
597  * For now, if re-exec it found to be needed in load_s1, we mark the older load as replayInst,
598  * the two loads will be replayed if the older load becomes the head of rob.
599  *
600  * When dcache releases a line, mark all writebacked entrys in load queue with
601  * the same line paddr as released.
602  */
603
604  // Load-Load Memory violation query
605  val deqRightMask = UIntToMask.rightmask(deqPtr, LoadQueueSize)
606  (0 until LoadPipelineWidth).map(i => {
607    dataModule.io.release_violation(i).paddr := io.loadViolationQuery(i).req.bits.paddr
608    io.loadViolationQuery(i).req.ready := true.B
609    io.loadViolationQuery(i).resp.valid := RegNext(io.loadViolationQuery(i).req.fire())
610    // Generate real violation mask
611    // Note that we use UIntToMask.rightmask here
612    val startIndex = io.loadViolationQuery(i).req.bits.uop.lqIdx.value
613    val lqIdxMask = UIntToMask.rightmask(startIndex, LoadQueueSize)
614    val xorMask = lqIdxMask ^ deqRightMask
615    val sameFlag = io.loadViolationQuery(i).req.bits.uop.lqIdx.flag === deqPtrExt.flag
616    val toDeqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
617    val ldld_violation_mask = WireInit(VecInit((0 until LoadQueueSize).map(j => {
618      dataModule.io.release_violation(i).match_mask(j) && // addr match
619      toDeqPtrMask(j) && // the load is younger than current load
620      allocated(j) && // entry is valid
621      released(j) && // cacheline is released
622      (datavalid(j) || miss(j)) // paddr is valid
623    })))
624    dontTouch(ldld_violation_mask)
625    ldld_violation_mask.suggestName("ldldViolationMask_" + i)
626    io.loadViolationQuery(i).resp.bits.have_violation := RegNext(ldld_violation_mask.asUInt.orR)
627  })
628
629  // "released" flag update
630  //
631  // When io.release.valid, it uses the last ld-ld paddr cam port to
632  // update release flag in 1 cycle
633  when(io.release.valid){
634    // Take over ld-ld paddr cam port
635    dataModule.io.release_violation.takeRight(1)(0).paddr := io.release.bits.paddr
636    io.loadViolationQuery.takeRight(1)(0).req.ready := false.B
637    // If a load needs that cam port, replay it from rs
638    (0 until LoadQueueSize).map(i => {
639      when(dataModule.io.release_violation.takeRight(1)(0).match_mask(i) && allocated(i) && writebacked(i)){
640        // Note: if a load has missed in dcache and is waiting for refill in load queue,
641        // its released flag still needs to be set as true if addr matches.
642        released(i) := true.B
643      }
644    })
645  }
646
647  /**
648    * Memory mapped IO / other uncached operations
649    *
650    * States:
651    * (1) writeback from store units: mark as pending
652    * (2) when they reach ROB's head, they can be sent to uncache channel
653    * (3) response from uncache channel: mark as datavalid
654    * (4) writeback to ROB (and other units): mark as writebacked
655    * (5) ROB commits the instruction: same as normal instructions
656    */
657  //(2) when they reach ROB's head, they can be sent to uncache channel
658  val lqTailMmioPending = WireInit(pending(deqPtr))
659  val lqTailAllocated = WireInit(allocated(deqPtr))
660  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
661  val uncacheState = RegInit(s_idle)
662  switch(uncacheState) {
663    is(s_idle) {
664      when(RegNext(io.rob.pendingld) && lqTailMmioPending && lqTailAllocated) {
665        uncacheState := s_req
666      }
667    }
668    is(s_req) {
669      when(io.uncache.req.fire()) {
670        uncacheState := s_resp
671      }
672    }
673    is(s_resp) {
674      when(io.uncache.resp.fire()) {
675        uncacheState := s_wait
676      }
677    }
678    is(s_wait) {
679      when(RegNext(io.rob.commit)) {
680        uncacheState := s_idle // ready for next mmio
681      }
682    }
683  }
684  io.uncache.req.valid := uncacheState === s_req
685
686  dataModule.io.uncache.raddr := deqPtrExtNext.value
687
688  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
689  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
690  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
691  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
692
693  io.uncache.req.bits.id   := DontCare
694  io.uncache.req.bits.instrtype := DontCare
695
696  io.uncache.resp.ready := true.B
697
698  when (io.uncache.req.fire()) {
699    pending(deqPtr) := false.B
700
701    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
702      uop(deqPtr).cf.pc,
703      io.uncache.req.bits.addr,
704      io.uncache.req.bits.data,
705      io.uncache.req.bits.cmd,
706      io.uncache.req.bits.mask
707    )
708  }
709
710  // (3) response from uncache channel: mark as datavalid
711  dataModule.io.uncache.wen := false.B
712  when(io.uncache.resp.fire()){
713    datavalid(deqPtr) := true.B
714    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
715    dataModule.io.uncache.wen := true.B
716
717    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
718  }
719
720  // Read vaddr for mem exception
721  // no inst will be commited 1 cycle before tval update
722  vaddrModule.io.raddr(0) := (deqPtrExt + commitCount).value
723  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
724
725  // Read vaddr for debug trigger
726  (0 until LoadPipelineWidth).map(i => {
727    vaddrModule.io.raddr(i+1) := loadWbSel(i)
728  })
729
730
731  // misprediction recovery / exception redirect
732  // invalidate lq term using robIdx
733  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
734  for (i <- 0 until LoadQueueSize) {
735    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i)
736    when (needCancel(i)) {
737        allocated(i) := false.B
738    }
739  }
740
741  /**
742    * update pointers
743    */
744  val lastCycleCancelCount = PopCount(RegNext(needCancel))
745  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
746  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
747  when (lastCycleRedirect.valid) {
748    // we recover the pointers in the next cycle after redirect
749    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
750  }.otherwise {
751    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
752  }
753
754  deqPtrExtNext := deqPtrExt + commitCount
755  deqPtrExt := deqPtrExtNext
756
757  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
758
759  allowEnqueue := validCount + enqNumber <= (LoadQueueSize - io.enq.req.length).U
760
761  /**
762    * misc
763    */
764  io.rob.storeDataRobWb := DontCare // will be overwriten by store queue's result
765
766  // perf counter
767  QueuePerf(LoadQueueSize, validCount, !allowEnqueue)
768  io.lqFull := !allowEnqueue
769  XSPerfAccumulate("rollback", io.rollback.valid) // rollback redirect generated
770  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
771  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
772  XSPerfAccumulate("refill", io.dcache.valid)
773  XSPerfAccumulate("writeback_success", PopCount(VecInit(io.ldout.map(i => i.fire()))))
774  XSPerfAccumulate("writeback_blocked", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready))))
775  XSPerfAccumulate("utilization_miss", PopCount((0 until LoadQueueSize).map(i => allocated(i) && miss(i))))
776
777  val perfinfo = IO(new Bundle(){
778    val perfEvents = Output(new PerfEventsBundle(10))
779  })
780  val perfEvents = Seq(
781    ("rollback          ", io.rollback.valid                                                               ),
782    ("mmioCycle         ", uncacheState =/= s_idle                                                         ),
783    ("mmio_Cnt          ", io.uncache.req.fire()                                                           ),
784    ("refill            ", io.dcache.valid                                                                 ),
785    ("writeback_success ", PopCount(VecInit(io.ldout.map(i => i.fire())))                                  ),
786    ("writeback_blocked ", PopCount(VecInit(io.ldout.map(i => i.valid && !i.ready)))                       ),
787    ("ltq_1/4_valid     ", (validCount < (LoadQueueSize.U/4.U))                                            ),
788    ("ltq_2/4_valid     ", (validCount > (LoadQueueSize.U/4.U)) & (validCount <= (LoadQueueSize.U/2.U))    ),
789    ("ltq_3/4_valid     ", (validCount > (LoadQueueSize.U/2.U)) & (validCount <= (LoadQueueSize.U*3.U/4.U))),
790    ("ltq_4/4_valid     ", (validCount > (LoadQueueSize.U*3.U/4.U))                                        ),
791  )
792
793  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
794    perf_out.incr_step := RegNext(perf)
795  }
796  // debug info
797  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
798
799  def PrintFlag(flag: Bool, name: String): Unit = {
800    when(flag) {
801      XSDebug(false, true.B, name)
802    }.otherwise {
803      XSDebug(false, true.B, " ")
804    }
805  }
806
807  for (i <- 0 until LoadQueueSize) {
808    XSDebug(i + " pc %x pa %x ", uop(i).cf.pc, debug_paddr(i))
809    PrintFlag(allocated(i), "a")
810    PrintFlag(allocated(i) && datavalid(i), "v")
811    PrintFlag(allocated(i) && writebacked(i), "w")
812    PrintFlag(allocated(i) && miss(i), "m")
813    PrintFlag(allocated(i) && pending(i), "p")
814    XSDebug(false, true.B, "\n")
815  }
816
817}
818