xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision ec4b629128e8d079c26c89cba29b20f2c77748a2)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqPtr
13
14
15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
16
17object LqPtr extends HasXSParameter {
18  def apply(f: Bool, v: UInt): LqPtr = {
19    val ptr = Wire(new LqPtr)
20    ptr.flag := f
21    ptr.value := v
22    ptr
23  }
24}
25
26trait HasLoadHelper { this: XSModule =>
27  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
28    val fpWen = uop.ctrl.fpWen
29    LookupTree(uop.ctrl.fuOpType, List(
30      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
31      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
32      LSUOpType.lw   -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)),
33      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
34      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
35      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
36      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
37    ))
38  }
39
40  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
41    LookupTree(uop.ctrl.fuOpType, List(
42      LSUOpType.lw   -> recode(rdata(31, 0), S),
43      LSUOpType.ld   -> recode(rdata(63, 0), D)
44    ))
45  }
46}
47
48class LqEnqIO extends XSBundle {
49  val canAccept = Output(Bool())
50  val sqCanAccept = Input(Bool())
51  val needAlloc = Vec(RenameWidth, Input(Bool()))
52  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
53  val resp = Vec(RenameWidth, Output(new LqPtr))
54}
55
56// Load Queue
57class LoadQueue extends XSModule
58  with HasDCacheParameters
59  with HasCircularQueuePtrHelper
60  with HasLoadHelper
61{
62  val io = IO(new Bundle() {
63    val enq = new LqEnqIO
64    val brqRedirect = Input(Valid(new Redirect))
65    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
66    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
67    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
68    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
69    val commits = Flipped(new RoqCommitIO)
70    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
71    val dcache = Flipped(ValidIO(new Refill))
72    val uncache = new DCacheWordIO
73    val roqDeqPtr = Input(new RoqPtr)
74    val exceptionAddr = new ExceptionAddrIO
75  })
76
77  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
78  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
79  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
80  dataModule.io := DontCare
81  val vaddrModule = Module(new AsyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
82  vaddrModule.io := DontCare
83  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
84  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
85  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
86  val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB
87  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
88  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
89  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
90
91  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
92
93  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
94  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
95  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
96  val allowEnqueue = RegInit(true.B)
97
98  val enqPtr = enqPtrExt(0).value
99  val deqPtr = deqPtrExt.value
100  val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag
101  val isEmpty = enqPtr === deqPtr && sameFlag
102  val isFull = enqPtr === deqPtr && !sameFlag
103  val allowIn = !isFull
104
105  val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD)
106  val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value)
107
108  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
109  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
110
111  /**
112    * Enqueue at dispatch
113    *
114    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
115    */
116  io.enq.canAccept := allowEnqueue
117
118  for (i <- 0 until RenameWidth) {
119    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
120    val lqIdx = enqPtrExt(offset)
121    val index = lqIdx.value
122    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
123      uop(index) := io.enq.req(i).bits
124      allocated(index) := true.B
125      datavalid(index) := false.B
126      writebacked(index) := false.B
127      commited(index) := false.B
128      miss(index) := false.B
129      // listening(index) := false.B
130      pending(index) := false.B
131    }
132    io.enq.resp(i) := lqIdx
133  }
134  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
135
136  /**
137    * Writeback load from load units
138    *
139    * Most load instructions writeback to regfile at the same time.
140    * However,
141    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
142    *   (2) For an mmio instruction without exceptions, it does not write back.
143    * The mmio instruction will be sent to lower level when it reaches ROB's head.
144    * After uncache response, it will write back through arbiter with loadUnit.
145    *   (3) For cache misses, it is marked miss and sent to dcache later.
146    * After cache refills, it will write back through arbiter with loadUnit.
147    */
148  for (i <- 0 until LoadPipelineWidth) {
149    dataModule.io.wb.wen(i) := false.B
150    vaddrModule.io.wen(i) := false.B
151    when(io.loadIn(i).fire()) {
152      when(io.loadIn(i).bits.miss) {
153        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
154          io.loadIn(i).bits.uop.lqIdx.asUInt,
155          io.loadIn(i).bits.uop.cf.pc,
156          io.loadIn(i).bits.vaddr,
157          io.loadIn(i).bits.paddr,
158          io.loadIn(i).bits.data,
159          io.loadIn(i).bits.mask,
160          io.loadIn(i).bits.forwardData.asUInt,
161          io.loadIn(i).bits.forwardMask.asUInt,
162          io.loadIn(i).bits.mmio,
163          io.loadIn(i).bits.rollback,
164          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
165          )
166        }.otherwise {
167          XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n",
168          io.loadIn(i).bits.uop.lqIdx.asUInt,
169          io.loadIn(i).bits.uop.cf.pc,
170          io.loadIn(i).bits.vaddr,
171          io.loadIn(i).bits.paddr,
172          io.loadIn(i).bits.data,
173          io.loadIn(i).bits.mask,
174          io.loadIn(i).bits.forwardData.asUInt,
175          io.loadIn(i).bits.forwardMask.asUInt,
176          io.loadIn(i).bits.mmio,
177          io.loadIn(i).bits.rollback,
178          io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
179          )
180        }
181        val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
182        datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
183        writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
184
185        val loadWbData = Wire(new LQDataEntry)
186        loadWbData.paddr := io.loadIn(i).bits.paddr
187        loadWbData.mask := io.loadIn(i).bits.mask
188        loadWbData.data := io.loadIn(i).bits.data // fwd data
189        loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
190        loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt
191        dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
192        dataModule.io.wb.wen(i) := true.B
193
194        vaddrModule.io.waddr(i) := loadWbIndex
195        vaddrModule.io.wdata(i) := io.loadIn(i).bits.vaddr
196        vaddrModule.io.wen(i) := true.B
197
198        debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
199
200        val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
201        miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
202        // listening(loadWbIndex) := dcacheMissed
203        pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR
204    }
205  }
206
207  when(io.dcache.valid) {
208    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
209  }
210
211  // Refill 64 bit in a cycle
212  // Refill data comes back from io.dcache.resp
213  dataModule.io.refill.valid := io.dcache.valid
214  dataModule.io.refill.paddr := io.dcache.bits.addr
215  dataModule.io.refill.data := io.dcache.bits.data
216
217  (0 until LoadQueueSize).map(i => {
218    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
219    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
220      datavalid(i) := true.B
221      miss(i) := false.B
222    }
223  })
224
225  // Writeback up to 2 missed load insts to CDB
226  //
227  // Pick 2 missed load (data refilled), write them back to cdb
228  // 2 refilled load will be selected from even/odd entry, separately
229
230  // Stage 0
231  // Generate writeback indexes
232  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
233    allocated(i) && !writebacked(i) && datavalid(i)
234  })).asUInt() // use uint instead vec to reduce verilog lines
235  val loadEvenSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i)}))
236  val loadOddSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i+1)}))
237  val evenDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i)})).asUInt
238  val oddDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i+1)})).asUInt
239
240  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
241  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
242  loadWbSelGen(0) := Cat(getFirstOne(loadEvenSelVec, evenDeqMask), 0.U(1.W))
243  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
244  loadWbSelGen(1) := Cat(getFirstOne(loadOddSelVec, oddDeqMask), 1.U(1.W))
245  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
246
247  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
248  val loadWbSelV = RegInit(VecInit(List.fill(LoadPipelineWidth)(false.B)))
249  (0 until LoadPipelineWidth).map(i => {
250    val canGo = io.ldout(i).fire() || !loadWbSelV(i)
251    val valid = loadWbSelVGen(i)
252    // store selected index in pipeline reg
253    loadWbSel(i) := RegEnable(loadWbSelGen(i), valid && canGo)
254    // Mark them as writebacked, so they will not be selected in the next cycle
255    when(valid && canGo){
256      writebacked(loadWbSelGen(i)) := true.B
257    }
258    // update loadWbSelValidReg
259    when(io.ldout(i).fire()){
260      loadWbSelV(i) := false.B
261    }
262    when(valid && canGo){
263      loadWbSelV(i) := true.B
264    }
265  })
266
267  // Stage 1
268  // Use indexes generated in cycle 0 to read data
269  // writeback data to cdb
270  (0 until LoadPipelineWidth).map(i => {
271    // data select
272    dataModule.io.wb.raddr(i) := loadWbSel(i)
273    val rdata = dataModule.io.wb.rdata(i).data
274    val seluop = uop(loadWbSel(i))
275    val func = seluop.ctrl.fuOpType
276    val raddr = dataModule.io.wb.rdata(i).paddr
277    val rdataSel = LookupTree(raddr(2, 0), List(
278      "b000".U -> rdata(63, 0),
279      "b001".U -> rdata(63, 8),
280      "b010".U -> rdata(63, 16),
281      "b011".U -> rdata(63, 24),
282      "b100".U -> rdata(63, 32),
283      "b101".U -> rdata(63, 40),
284      "b110".U -> rdata(63, 48),
285      "b111".U -> rdata(63, 56)
286    ))
287    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
288
289    // writeback missed int/fp load
290    //
291    // Int load writeback will finish (if not blocked) in one cycle
292    io.ldout(i).bits.uop := seluop
293    io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.wb.rdata(i).exception.asBools
294    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
295    io.ldout(i).bits.data := rdataPartialLoad
296    io.ldout(i).bits.redirectValid := false.B
297    io.ldout(i).bits.redirect := DontCare
298    io.ldout(i).bits.brUpdate := DontCare
299    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
300    io.ldout(i).bits.debug.isPerfCnt := false.B
301    io.ldout(i).bits.fflags := DontCare
302    io.ldout(i).valid := loadWbSelV(i)
303
304    when(io.ldout(i).fire()) {
305      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n",
306        io.ldout(i).bits.uop.roqIdx.asUInt,
307        io.ldout(i).bits.uop.lqIdx.asUInt,
308        io.ldout(i).bits.uop.cf.pc,
309        dataModule.io.debug(loadWbSel(i)).paddr,
310        dataModule.io.debug(loadWbSel(i)).data,
311        debug_mmio(loadWbSel(i))
312      )
313    }
314
315  })
316
317  /**
318    * Load commits
319    *
320    * When load commited, mark it as !allocated and move deqPtrExt forward.
321    */
322  (0 until CommitWidth).map(i => {
323    when(loadCommit(i)) {
324      allocated(mcommitIdx(i)) := false.B
325      XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc)
326    }
327  })
328
329  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
330    val length = mask.length
331    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
332    val highBitsUint = Cat(highBits.reverse)
333    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
334  }
335
336  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
337    assert(valid.length == uop.length)
338    assert(valid.length == 2)
339    Mux(valid(0) && valid(1),
340      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
341      Mux(valid(0) && !valid(1), uop(0), uop(1)))
342  }
343
344  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
345    assert(valid.length == uop.length)
346    val length = valid.length
347    (0 until length).map(i => {
348      (0 until length).map(j => {
349        Mux(valid(i) && valid(j),
350          isAfter(uop(i).roqIdx, uop(j).roqIdx),
351          Mux(!valid(i), true.B, false.B))
352      })
353    })
354  }
355
356  /**
357    * Memory violation detection
358    *
359    * When store writes back, it searches LoadQueue for younger load instructions
360    * with the same load physical address. They loaded wrong data and need re-execution.
361    *
362    * Cycle 0: Store Writeback
363    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
364    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
365    * Cycle 1: Redirect Generation
366    *   There're three possible types of violations. Choose the oldest load.
367    *   Prepare redirect request according to the detected violation.
368    * Cycle 2: Redirect Fire
369    *   Fire redirect request (if valid)
370    */
371  io.load_s1 := DontCare
372  def detectRollback(i: Int) = {
373    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
374    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
375    val xorMask = lqIdxMask ^ enqMask
376    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
377    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
378
379    // check if load already in lq needs to be rolledback
380    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
381    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
382    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
383    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
384      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
385    })))
386    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
387      addrMaskMatch(j) && entryNeedCheck(j)
388    }))
389    val lqViolation = lqViolationVec.asUInt().orR()
390    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
391    val lqViolationUop = uop(lqViolationIndex)
392    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
393    // lqViolationUop.lqIdx.value := lqViolationIndex
394    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
395
396    // when l/s writeback to roq together, check if rollback is needed
397    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
398      io.loadIn(j).valid &&
399        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
400        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
401        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
402    })))
403    val wbViolation = wbViolationVec.asUInt().orR()
404    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
405    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
406
407    // check if rollback is needed for load in l1
408    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
409      io.load_s1(j).valid && // L1 valid
410        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
411        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
412        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
413    })))
414    val l1Violation = l1ViolationVec.asUInt().orR()
415    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
416    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
417
418    val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation)
419    val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop)
420
421    val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
422    val oneAfterZero = mask(1)(0)
423    val rollbackUop = Mux(oneAfterZero && mask(2)(0),
424      rollbackUopVec(0),
425      Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
426
427    XSDebug(
428      l1Violation,
429      "need rollback (l4 load) pc %x roqidx %d target %x\n",
430      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
431    )
432    XSDebug(
433      lqViolation,
434      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
435      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
436    )
437    XSDebug(
438      wbViolation,
439      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
440      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
441    )
442
443    (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop)
444  }
445
446  // rollback check
447  val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
448  for (i <- 0 until StorePipelineWidth) {
449    val detectedRollback = detectRollback(i)
450    rollback(i).valid := detectedRollback._1
451    rollback(i).bits := detectedRollback._2
452  }
453
454  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
455    Mux(
456      a.valid,
457      Mux(
458        b.valid,
459        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
460        a // sel a
461      ),
462      b // sel b
463    )
464  }
465
466  val rollbackSelected = ParallelOperation(rollback, rollbackSel)
467  val lastCycleRedirect = RegNext(io.brqRedirect)
468
469  // S2: select rollback and generate rollback request
470  // Note that we use roqIdx - 1.U to flush the load instruction itself.
471  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
472  val rollbackGen = Wire(Valid(new Redirect))
473  val rollbackReg = Reg(Valid(new Redirect))
474  rollbackGen.valid := rollbackSelected.valid &&
475    (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
476    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
477
478  rollbackGen.bits.roqIdx := rollbackSelected.bits.roqIdx
479  rollbackGen.bits.level := RedirectLevel.flush
480  rollbackGen.bits.interrupt := DontCare
481  rollbackGen.bits.pc := DontCare
482  rollbackGen.bits.target := rollbackSelected.bits.cf.pc
483  rollbackGen.bits.brTag := rollbackSelected.bits.brTag
484
485  rollbackReg := rollbackGen
486
487  // S3: fire rollback request
488  io.rollback := rollbackReg
489  io.rollback.valid := rollbackReg.valid &&
490    (!lastCycleRedirect.valid || !isAfter(rollbackReg.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
491    !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional())
492
493  when(io.rollback.valid) {
494    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
495  }
496
497  /**
498    * Memory mapped IO / other uncached operations
499    *
500    */
501  io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) &&
502    io.commits.info(0).commitType === CommitType.LOAD &&
503    io.roqDeqPtr === uop(deqPtr).roqIdx &&
504    !io.commits.isWalk
505
506  dataModule.io.uncache.raddr := deqPtr
507
508  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
509  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
510  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
511  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
512
513  io.uncache.req.bits.meta.id       := DontCare
514  io.uncache.req.bits.meta.vaddr    := DontCare
515  io.uncache.req.bits.meta.paddr    := dataModule.io.uncache.rdata.paddr
516  io.uncache.req.bits.meta.uop      := uop(deqPtr)
517  io.uncache.req.bits.meta.mmio     := true.B
518  io.uncache.req.bits.meta.tlb_miss := false.B
519  io.uncache.req.bits.meta.mask     := dataModule.io.uncache.rdata.mask
520  io.uncache.req.bits.meta.replay   := false.B
521
522  io.uncache.resp.ready := true.B
523
524  when (io.uncache.req.fire()) {
525    pending(deqPtr) := false.B
526
527    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
528      uop(deqPtr).cf.pc,
529      io.uncache.req.bits.addr,
530      io.uncache.req.bits.data,
531      io.uncache.req.bits.cmd,
532      io.uncache.req.bits.mask
533    )
534  }
535
536  dataModule.io.uncache.wen := false.B
537  when(io.uncache.resp.fire()){
538    datavalid(deqPtr) := true.B
539    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
540    dataModule.io.uncache.wen := true.B
541
542    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
543  }
544
545  // Read vaddr for mem exception
546  vaddrModule.io.raddr(0) := io.exceptionAddr.lsIdx.lqIdx.value
547  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
548
549  // misprediction recovery / exception redirect
550  // invalidate lq term using robIdx
551  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
552  for (i <- 0 until LoadQueueSize) {
553    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
554    when (needCancel(i)) {
555        allocated(i) := false.B
556    }
557  }
558
559  /**
560    * update pointers
561    */
562  val lastCycleCancelCount = PopCount(RegNext(needCancel))
563  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
564  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
565  when (lastCycleRedirect.valid) {
566    // we recover the pointers in the next cycle after redirect
567    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
568  }.otherwise {
569    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
570  }
571
572  val commitCount = PopCount(loadCommit)
573  deqPtrExt := deqPtrExt + commitCount
574
575  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
576  val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt)
577  validCounter := Mux(lastLastCycleRedirect,
578    trueValidCounter,
579    validCounter + enqNumber - commitCount
580  )
581
582  allowEnqueue := Mux(io.brqRedirect.valid,
583    false.B,
584    Mux(lastLastCycleRedirect,
585      trueValidCounter <= (LoadQueueSize - RenameWidth).U,
586      validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U
587    )
588  )
589
590  // debug info
591  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
592
593  def PrintFlag(flag: Bool, name: String): Unit = {
594    when(flag) {
595      XSDebug(false, true.B, name)
596    }.otherwise {
597      XSDebug(false, true.B, " ")
598    }
599  }
600
601  for (i <- 0 until LoadQueueSize) {
602    if (i % 4 == 0) XSDebug("")
603    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
604    PrintFlag(allocated(i), "a")
605    PrintFlag(allocated(i) && datavalid(i), "v")
606    PrintFlag(allocated(i) && writebacked(i), "w")
607    PrintFlag(allocated(i) && commited(i), "c")
608    PrintFlag(allocated(i) && miss(i), "m")
609    // PrintFlag(allocated(i) && listening(i), "l")
610    PrintFlag(allocated(i) && pending(i), "p")
611    XSDebug(false, true.B, " ")
612    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
613  }
614
615}
616