xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LoadQueue.scala (revision 3802dba502b91d813c1e563035b876c4e6288166)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import freechips.rocketchip.tile.HasFPUParameters
6import utils._
7import xiangshan._
8import xiangshan.cache._
9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO}
10import xiangshan.backend.LSUOpType
11import xiangshan.mem._
12import xiangshan.backend.roq.RoqLsqIO
13import xiangshan.backend.fu.HasExceptionNO
14
15
16class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { }
17
18object LqPtr extends HasXSParameter {
19  def apply(f: Bool, v: UInt): LqPtr = {
20    val ptr = Wire(new LqPtr)
21    ptr.flag := f
22    ptr.value := v
23    ptr
24  }
25}
26
27trait HasLoadHelper { this: XSModule =>
28  def rdataHelper(uop: MicroOp, rdata: UInt): UInt = {
29    val fpWen = uop.ctrl.fpWen
30    LookupTree(uop.ctrl.fuOpType, List(
31      LSUOpType.lb   -> SignExt(rdata(7, 0) , XLEN),
32      LSUOpType.lh   -> SignExt(rdata(15, 0), XLEN),
33      LSUOpType.lw   -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)),
34      LSUOpType.ld   -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)),
35      LSUOpType.lbu  -> ZeroExt(rdata(7, 0) , XLEN),
36      LSUOpType.lhu  -> ZeroExt(rdata(15, 0), XLEN),
37      LSUOpType.lwu  -> ZeroExt(rdata(31, 0), XLEN),
38    ))
39  }
40
41  def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = {
42    LookupTree(uop.ctrl.fuOpType, List(
43      LSUOpType.lw   -> recode(rdata(31, 0), S),
44      LSUOpType.ld   -> recode(rdata(63, 0), D)
45    ))
46  }
47}
48
49class LqEnqIO extends XSBundle {
50  val canAccept = Output(Bool())
51  val sqCanAccept = Input(Bool())
52  val needAlloc = Vec(RenameWidth, Input(Bool()))
53  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
54  val resp = Vec(RenameWidth, Output(new LqPtr))
55}
56
57// Load Queue
58class LoadQueue extends XSModule
59  with HasDCacheParameters
60  with HasCircularQueuePtrHelper
61  with HasLoadHelper
62  with HasExceptionNO
63{
64  val io = IO(new Bundle() {
65    val enq = new LqEnqIO
66    val brqRedirect = Input(Valid(new Redirect))
67    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
68    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
69    val loadDataForwarded = Vec(LoadPipelineWidth, Input(Bool()))
70    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load
71    val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
72    val roq = Flipped(new RoqLsqIO)
73    val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store
74    val dcache = Flipped(ValidIO(new Refill))
75    val uncache = new DCacheWordIO
76    val exceptionAddr = new ExceptionAddrIO
77  })
78
79  val uop = Reg(Vec(LoadQueueSize, new MicroOp))
80  // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry))
81  val dataModule = Module(new LoadQueueData(LoadQueueSize, wbNumRead = LoadPipelineWidth, wbNumWrite = LoadPipelineWidth))
82  dataModule.io := DontCare
83  val vaddrModule = Module(new SyncDataModuleTemplate(UInt(VAddrBits.W), LoadQueueSize, numRead = 1, numWrite = LoadPipelineWidth))
84  vaddrModule.io := DontCare
85  val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated
86  val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid
87  val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB
88  val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request
89  // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result
90  val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq
91
92  val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst
93
94  val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr))))
95  val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr))
96  val deqPtrExtNext = Wire(new LqPtr)
97  val allowEnqueue = RegInit(true.B)
98
99  val enqPtr = enqPtrExt(0).value
100  val deqPtr = deqPtrExt.value
101
102  val deqMask = UIntToMask(deqPtr, LoadQueueSize)
103  val enqMask = UIntToMask(enqPtr, LoadQueueSize)
104
105  val commitCount = RegNext(io.roq.lcommit)
106
107  /**
108    * Enqueue at dispatch
109    *
110    * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth)
111    */
112  io.enq.canAccept := allowEnqueue
113
114  for (i <- 0 until RenameWidth) {
115    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
116    val lqIdx = enqPtrExt(offset)
117    val index = lqIdx.value
118    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) {
119      uop(index) := io.enq.req(i).bits
120      allocated(index) := true.B
121      datavalid(index) := false.B
122      writebacked(index) := false.B
123      miss(index) := false.B
124      // listening(index) := false.B
125      pending(index) := false.B
126    }
127    io.enq.resp(i) := lqIdx
128  }
129  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
130
131  /**
132    * Writeback load from load units
133    *
134    * Most load instructions writeback to regfile at the same time.
135    * However,
136    *   (1) For an mmio instruction with exceptions, it writes back to ROB immediately.
137    *   (2) For an mmio instruction without exceptions, it does not write back.
138    * The mmio instruction will be sent to lower level when it reaches ROB's head.
139    * After uncache response, it will write back through arbiter with loadUnit.
140    *   (3) For cache misses, it is marked miss and sent to dcache later.
141    * After cache refills, it will write back through arbiter with loadUnit.
142    */
143  for (i <- 0 until LoadPipelineWidth) {
144    dataModule.io.wb.wen(i) := false.B
145    val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value
146    when(io.loadIn(i).fire()) {
147      when(io.loadIn(i).bits.miss) {
148        XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
149          io.loadIn(i).bits.uop.lqIdx.asUInt,
150          io.loadIn(i).bits.uop.cf.pc,
151          io.loadIn(i).bits.vaddr,
152          io.loadIn(i).bits.paddr,
153          io.loadIn(i).bits.data,
154          io.loadIn(i).bits.mask,
155          io.loadIn(i).bits.forwardData.asUInt,
156          io.loadIn(i).bits.forwardMask.asUInt,
157          io.loadIn(i).bits.mmio
158        )
159      }.otherwise {
160        XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x\n",
161        io.loadIn(i).bits.uop.lqIdx.asUInt,
162        io.loadIn(i).bits.uop.cf.pc,
163        io.loadIn(i).bits.vaddr,
164        io.loadIn(i).bits.paddr,
165        io.loadIn(i).bits.data,
166        io.loadIn(i).bits.mask,
167        io.loadIn(i).bits.forwardData.asUInt,
168        io.loadIn(i).bits.forwardMask.asUInt,
169        io.loadIn(i).bits.mmio
170      )}
171      datavalid(loadWbIndex) := (!io.loadIn(i).bits.miss || io.loadDataForwarded(i)) && !io.loadIn(i).bits.mmio
172      writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
173
174      val loadWbData = Wire(new LQDataEntry)
175      loadWbData.paddr := io.loadIn(i).bits.paddr
176      loadWbData.mask := io.loadIn(i).bits.mask
177      loadWbData.data := io.loadIn(i).bits.forwardData.asUInt // fwd data
178      loadWbData.fwdMask := io.loadIn(i).bits.forwardMask
179      dataModule.io.wbWrite(i, loadWbIndex, loadWbData)
180      dataModule.io.wb.wen(i) := true.B
181
182
183      debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio
184
185      val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio
186      miss(loadWbIndex) := dcacheMissed && !io.loadDataForwarded(i)
187      pending(loadWbIndex) := io.loadIn(i).bits.mmio
188      uop(loadWbIndex).debugInfo.issueTime := io.loadIn(i).bits.uop.debugInfo.issueTime
189    }
190    // vaddrModule write is delayed, as vaddrModule will not be read right after write
191    vaddrModule.io.waddr(i) := RegNext(loadWbIndex)
192    vaddrModule.io.wdata(i) := RegNext(io.loadIn(i).bits.vaddr)
193    vaddrModule.io.wen(i) := RegNext(io.loadIn(i).fire())
194  }
195
196  when(io.dcache.valid) {
197    XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data)
198  }
199
200  // Refill 64 bit in a cycle
201  // Refill data comes back from io.dcache.resp
202  dataModule.io.refill.valid := io.dcache.valid
203  dataModule.io.refill.paddr := io.dcache.bits.addr
204  dataModule.io.refill.data := io.dcache.bits.data
205
206  (0 until LoadQueueSize).map(i => {
207    dataModule.io.refill.refillMask(i) := allocated(i) && miss(i)
208    when(dataModule.io.refill.valid && dataModule.io.refill.refillMask(i) && dataModule.io.refill.matchMask(i)) {
209      datavalid(i) := true.B
210      miss(i) := false.B
211    }
212  })
213
214  // Writeback up to 2 missed load insts to CDB
215  //
216  // Pick 2 missed load (data refilled), write them back to cdb
217  // 2 refilled load will be selected from even/odd entry, separately
218
219  // Stage 0
220  // Generate writeback indexes
221
222  def getEvenBits(input: UInt): UInt = {
223    require(input.getWidth == LoadQueueSize)
224    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i)})).asUInt
225  }
226  def getOddBits(input: UInt): UInt = {
227    require(input.getWidth == LoadQueueSize)
228    VecInit((0 until LoadQueueSize/2).map(i => {input(2*i+1)})).asUInt
229  }
230
231  val loadWbSel = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W))) // index selected last cycle
232  val loadWbSelV = Wire(Vec(LoadPipelineWidth, Bool())) // index selected in last cycle is valid
233
234  val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => {
235    allocated(i) && !writebacked(i) && datavalid(i)
236  })).asUInt() // use uint instead vec to reduce verilog lines
237  val evenDeqMask = getEvenBits(deqMask)
238  val oddDeqMask = getOddBits(deqMask)
239  // generate lastCycleSelect mask
240  val evenSelectMask = Mux(io.ldout(0).fire(), getEvenBits(UIntToOH(loadWbSel(0))), 0.U)
241  val oddSelectMask = Mux(io.ldout(1).fire(), getOddBits(UIntToOH(loadWbSel(1))), 0.U)
242  // generate real select vec
243  val loadEvenSelVec = getEvenBits(loadWbSelVec) & ~evenSelectMask
244  val loadOddSelVec = getOddBits(loadWbSelVec) & ~oddSelectMask
245
246  def toVec(a: UInt): Vec[Bool] = {
247    VecInit(a.asBools)
248  }
249
250  val loadWbSelGen = Wire(Vec(LoadPipelineWidth, UInt(log2Up(LoadQueueSize).W)))
251  val loadWbSelVGen = Wire(Vec(LoadPipelineWidth, Bool()))
252  loadWbSelGen(0) := Cat(getFirstOne(toVec(loadEvenSelVec), evenDeqMask), 0.U(1.W))
253  loadWbSelVGen(0):= loadEvenSelVec.asUInt.orR
254  loadWbSelGen(1) := Cat(getFirstOne(toVec(loadOddSelVec), oddDeqMask), 1.U(1.W))
255  loadWbSelVGen(1) := loadOddSelVec.asUInt.orR
256
257  (0 until LoadPipelineWidth).map(i => {
258    loadWbSel(i) := RegNext(loadWbSelGen(i))
259    loadWbSelV(i) := RegNext(loadWbSelVGen(i), init = false.B)
260    when(io.ldout(i).fire()){
261      // Mark them as writebacked, so they will not be selected in the next cycle
262      writebacked(loadWbSel(i)) := true.B
263    }
264  })
265
266  // Stage 1
267  // Use indexes generated in cycle 0 to read data
268  // writeback data to cdb
269  (0 until LoadPipelineWidth).map(i => {
270    // data select
271    dataModule.io.wb.raddr(i) := loadWbSelGen(i)
272    val rdata = dataModule.io.wb.rdata(i).data
273    val seluop = uop(loadWbSel(i))
274    val func = seluop.ctrl.fuOpType
275    val raddr = dataModule.io.wb.rdata(i).paddr
276    val rdataSel = LookupTree(raddr(2, 0), List(
277      "b000".U -> rdata(63, 0),
278      "b001".U -> rdata(63, 8),
279      "b010".U -> rdata(63, 16),
280      "b011".U -> rdata(63, 24),
281      "b100".U -> rdata(63, 32),
282      "b101".U -> rdata(63, 40),
283      "b110".U -> rdata(63, 48),
284      "b111".U -> rdata(63, 56)
285    ))
286    val rdataPartialLoad = rdataHelper(seluop, rdataSel)
287
288    // writeback missed int/fp load
289    //
290    // Int load writeback will finish (if not blocked) in one cycle
291    io.ldout(i).bits.uop := seluop
292    io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr)
293    io.ldout(i).bits.data := rdataPartialLoad
294    io.ldout(i).bits.redirectValid := false.B
295    io.ldout(i).bits.redirect := DontCare
296    io.ldout(i).bits.brUpdate := DontCare
297    io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i))
298    io.ldout(i).bits.debug.isPerfCnt := false.B
299    io.ldout(i).bits.fflags := DontCare
300    io.ldout(i).valid := loadWbSelV(i)
301
302    when(io.ldout(i).fire()) {
303      XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x mmio %x\n",
304        io.ldout(i).bits.uop.roqIdx.asUInt,
305        io.ldout(i).bits.uop.lqIdx.asUInt,
306        io.ldout(i).bits.uop.cf.pc,
307        debug_mmio(loadWbSel(i))
308      )
309    }
310
311  })
312
313  /**
314    * Load commits
315    *
316    * When load commited, mark it as !allocated and move deqPtrExt forward.
317    */
318  (0 until CommitWidth).map(i => {
319    when(commitCount > i.U){
320      allocated(deqPtr+i.U) := false.B
321    }
322  })
323
324  def getFirstOne(mask: Vec[Bool], startMask: UInt) = {
325    val length = mask.length
326    val highBits = (0 until length).map(i => mask(i) & ~startMask(i))
327    val highBitsUint = Cat(highBits.reverse)
328    PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt))
329  }
330
331  def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = {
332    assert(valid.length == uop.length)
333    assert(valid.length == 2)
334    Mux(valid(0) && valid(1),
335      Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)),
336      Mux(valid(0) && !valid(1), uop(0), uop(1)))
337  }
338
339  def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = {
340    assert(valid.length == uop.length)
341    val length = valid.length
342    (0 until length).map(i => {
343      (0 until length).map(j => {
344        Mux(valid(i) && valid(j),
345          isAfter(uop(i).roqIdx, uop(j).roqIdx),
346          Mux(!valid(i), true.B, false.B))
347      })
348    })
349  }
350
351  /**
352    * Memory violation detection
353    *
354    * When store writes back, it searches LoadQueue for younger load instructions
355    * with the same load physical address. They loaded wrong data and need re-execution.
356    *
357    * Cycle 0: Store Writeback
358    *   Generate match vector for store address with rangeMask(stPtr, enqPtr).
359    *   Besides, load instructions in LoadUnit_S1 and S2 are also checked.
360    * Cycle 1: Redirect Generation
361    *   There're three possible types of violations, up to 6 possible redirect requests.
362    *   Choose the oldest load (part 1). (4 + 2) -> (1 + 2)
363    * Cycle 2: Redirect Fire
364    *   Choose the oldest load (part 2). (3 -> 1)
365    *   Prepare redirect request according to the detected violation.
366    *   Fire redirect request (if valid)
367    */
368
369  // stage 0:        lq l1 wb     l1 wb lq
370  //                 |  |  |      |  |  |  (paddr match)
371  // stage 1:        lq l1 wb     l1 wb lq
372  //                 |  |  |      |  |  |
373  //                 |  |------------|  |
374  //                 |        |         |
375  // stage 2:        lq      l1wb       lq
376  //                 |        |         |
377  //                 --------------------
378  //                          |
379  //                      rollback req
380  io.load_s1 := DontCare
381  def detectRollback(i: Int) = {
382    val startIndex = io.storeIn(i).bits.uop.lqIdx.value
383    val lqIdxMask = UIntToMask(startIndex, LoadQueueSize)
384    val xorMask = lqIdxMask ^ enqMask
385    val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag
386    val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask)
387
388    // check if load already in lq needs to be rolledback
389    dataModule.io.violation(i).paddr := io.storeIn(i).bits.paddr
390    dataModule.io.violation(i).mask := io.storeIn(i).bits.mask
391    val addrMaskMatch = RegNext(dataModule.io.violation(i).violationMask)
392    val entryNeedCheck = RegNext(VecInit((0 until LoadQueueSize).map(j => {
393      allocated(j) && toEnqPtrMask(j) && (datavalid(j) || miss(j))
394    })))
395    val lqViolationVec = VecInit((0 until LoadQueueSize).map(j => {
396      addrMaskMatch(j) && entryNeedCheck(j)
397    }))
398    val lqViolation = lqViolationVec.asUInt().orR()
399    val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask))
400    val lqViolationUop = uop(lqViolationIndex)
401    // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag
402    // lqViolationUop.lqIdx.value := lqViolationIndex
403    XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n")
404
405    // when l/s writeback to roq together, check if rollback is needed
406    val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
407      io.loadIn(j).valid &&
408        isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
409        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) &&
410        (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR
411    })))
412    val wbViolation = wbViolationVec.asUInt().orR()
413    val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop))))
414    XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n")
415
416    // check if rollback is needed for load in l1
417    val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => {
418      io.load_s1(j).valid && // L1 valid
419        isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) &&
420        io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) &&
421        (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR
422    })))
423    val l1Violation = l1ViolationVec.asUInt().orR()
424    val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop))))
425    XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n")
426
427    XSDebug(
428      l1Violation,
429      "need rollback (l4 load) pc %x roqidx %d target %x\n",
430      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt
431    )
432    XSDebug(
433      lqViolation,
434      "need rollback (ld wb before store) pc %x roqidx %d target %x\n",
435      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt
436    )
437    XSDebug(
438      wbViolation,
439      "need rollback (ld/st wb together) pc %x roqidx %d target %x\n",
440      io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt
441    )
442
443    ((lqViolation, lqViolationUop), (wbViolation, wbViolationUop), (l1Violation, l1ViolationUop))
444  }
445
446  def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = {
447    Mux(
448      a.valid,
449      Mux(
450        b.valid,
451        Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest
452        a // sel a
453      ),
454      b // sel b
455    )
456  }
457  val lastCycleRedirect = RegNext(io.brqRedirect)
458  val lastlastCycleRedirect = RegNext(lastCycleRedirect)
459
460  // S2: select rollback (part1) and generate rollback request
461  // rollback check
462  // Wb/L1 rollback seq check is done in s2
463  val rollbackWb = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
464  val rollbackL1 = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
465  val rollbackL1Wb = Wire(Vec(StorePipelineWidth*2, Valid(new MicroOp)))
466  // Lq rollback seq check is done in s3 (next stage), as getting rollbackLq MicroOp is slow
467  val rollbackLq = Wire(Vec(StorePipelineWidth, Valid(new MicroOp)))
468  for (i <- 0 until StorePipelineWidth) {
469    val detectedRollback = detectRollback(i)
470    rollbackLq(i).valid := detectedRollback._1._1 && RegNext(io.storeIn(i).valid)
471    rollbackLq(i).bits := detectedRollback._1._2
472    rollbackWb(i).valid := detectedRollback._2._1 && RegNext(io.storeIn(i).valid)
473    rollbackWb(i).bits := detectedRollback._2._2
474    rollbackL1(i).valid := detectedRollback._3._1 && RegNext(io.storeIn(i).valid)
475    rollbackL1(i).bits := detectedRollback._3._2
476    rollbackL1Wb(2*i) := rollbackL1(i)
477    rollbackL1Wb(2*i+1) := rollbackWb(i)
478  }
479
480  val rollbackL1WbSelected = ParallelOperation(rollbackL1Wb, rollbackSel)
481  val rollbackL1WbVReg = RegNext(rollbackL1WbSelected.valid)
482  val rollbackL1WbReg = RegEnable(rollbackL1WbSelected.bits, rollbackL1WbSelected.valid)
483  val rollbackLq0VReg = RegNext(rollbackLq(0).valid)
484  val rollbackLq0Reg = RegEnable(rollbackLq(0).bits, rollbackLq(0).valid)
485  val rollbackLq1VReg = RegNext(rollbackLq(1).valid)
486  val rollbackLq1Reg = RegEnable(rollbackLq(1).bits, rollbackLq(1).valid)
487
488  // S3: select rollback (part2), generate rollback request, then fire rollback request
489  // Note that we use roqIdx - 1.U to flush the load instruction itself.
490  // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect.
491
492  // FIXME: this is ugly
493  val rollbackValidVec = Seq(rollbackL1WbVReg, rollbackLq0VReg, rollbackLq1VReg)
494  val rollbackUopVec = Seq(rollbackL1WbReg, rollbackLq0Reg, rollbackLq1Reg)
495
496  // select uop in parallel
497  val mask = getAfterMask(rollbackValidVec, rollbackUopVec)
498  val oneAfterZero = mask(1)(0)
499  val rollbackUop = Mux(oneAfterZero && mask(2)(0),
500    rollbackUopVec(0),
501    Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2)))
502
503  // check if rollback request is still valid in parallel
504  val rollbackValidVecChecked = Wire(Vec(3, Bool()))
505  for(((v, uop), idx) <- rollbackValidVec.zip(rollbackUopVec).zipWithIndex) {
506    rollbackValidVecChecked(idx) := v &&
507      (!lastCycleRedirect.valid || !isAfter(uop.roqIdx, lastCycleRedirect.bits.roqIdx)) &&
508      !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional()) &&
509      (!lastlastCycleRedirect.valid || !isAfter(uop.roqIdx, lastlastCycleRedirect.bits.roqIdx)) &&
510      !(lastlastCycleRedirect.valid && lastlastCycleRedirect.bits.isUnconditional())
511  }
512
513  io.rollback.bits.roqIdx := rollbackUop.roqIdx
514  io.rollback.bits.level := RedirectLevel.flush
515  io.rollback.bits.interrupt := DontCare
516  io.rollback.bits.pc := DontCare
517  io.rollback.bits.target := rollbackUop.cf.pc
518  io.rollback.bits.brTag := rollbackUop.brTag
519
520  io.rollback.valid := rollbackValidVecChecked.asUInt.orR
521
522  when(io.rollback.valid) {
523    XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt)
524  }
525
526  /**
527    * Memory mapped IO / other uncached operations
528    *
529    * States:
530    * (1) writeback from store units: mark as pending
531    * (2) when they reach ROB's head, they can be sent to uncache channel
532    * (3) response from uncache channel: mark as datavalid
533    * (4) writeback to ROB (and other units): mark as writebacked
534    * (5) ROB commits the instruction: same as normal instructions
535    */
536  //(2) when they reach ROB's head, they can be sent to uncache channel
537  val s_idle :: s_req :: s_resp :: s_wait :: Nil = Enum(4)
538  val uncacheState = RegInit(s_idle)
539  switch(uncacheState) {
540    is(s_idle) {
541      when(io.roq.pendingld && pending(deqPtr) && allocated(deqPtr)) {
542        uncacheState := s_req
543      }
544    }
545    is(s_req) {
546      when(io.uncache.req.fire()) {
547        uncacheState := s_resp
548      }
549    }
550    is(s_resp) {
551      when(io.uncache.resp.fire()) {
552        uncacheState := s_wait
553      }
554    }
555    is(s_wait) {
556      when(io.roq.commit) {
557        uncacheState := s_idle // ready for next mmio
558      }
559    }
560  }
561  io.uncache.req.valid := uncacheState === s_req
562
563  dataModule.io.uncache.raddr := deqPtrExtNext.value
564
565  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XRD
566  io.uncache.req.bits.addr := dataModule.io.uncache.rdata.paddr
567  io.uncache.req.bits.data := dataModule.io.uncache.rdata.data
568  io.uncache.req.bits.mask := dataModule.io.uncache.rdata.mask
569
570  io.uncache.req.bits.meta.id       := DontCare
571  io.uncache.req.bits.meta.vaddr    := DontCare
572  io.uncache.req.bits.meta.paddr    := dataModule.io.uncache.rdata.paddr
573  io.uncache.req.bits.meta.uop      := uop(deqPtr)
574  io.uncache.req.bits.meta.mmio     := true.B
575  io.uncache.req.bits.meta.tlb_miss := false.B
576  io.uncache.req.bits.meta.mask     := dataModule.io.uncache.rdata.mask
577  io.uncache.req.bits.meta.replay   := false.B
578
579  io.uncache.resp.ready := true.B
580
581  when (io.uncache.req.fire()) {
582    pending(deqPtr) := false.B
583
584    XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n",
585      uop(deqPtr).cf.pc,
586      io.uncache.req.bits.addr,
587      io.uncache.req.bits.data,
588      io.uncache.req.bits.cmd,
589      io.uncache.req.bits.mask
590    )
591  }
592
593  // (3) response from uncache channel: mark as datavalid
594  dataModule.io.uncache.wen := false.B
595  when(io.uncache.resp.fire()){
596    datavalid(deqPtr) := true.B
597    dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0))
598    dataModule.io.uncache.wen := true.B
599
600    XSDebug("uncache resp: data %x\n", io.dcache.bits.data)
601  }
602
603  // Read vaddr for mem exception
604  vaddrModule.io.raddr(0) := deqPtr + io.roq.lcommit
605  io.exceptionAddr.vaddr := vaddrModule.io.rdata(0)
606
607  // misprediction recovery / exception redirect
608  // invalidate lq term using robIdx
609  val needCancel = Wire(Vec(LoadQueueSize, Bool()))
610  for (i <- 0 until LoadQueueSize) {
611    needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i)
612    when (needCancel(i)) {
613        allocated(i) := false.B
614    }
615  }
616
617  /**
618    * update pointers
619    */
620  val lastCycleCancelCount = PopCount(RegNext(needCancel))
621  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
622  val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
623  when (lastCycleRedirect.valid) {
624    // we recover the pointers in the next cycle after redirect
625    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
626  }.otherwise {
627    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
628  }
629
630  deqPtrExtNext := deqPtrExt + commitCount
631  deqPtrExt := deqPtrExtNext
632
633  val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid)
634  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt)
635
636  allowEnqueue := validCount + enqNumber <= (LoadQueueSize - RenameWidth).U
637
638  // debug info
639  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr)
640
641  def PrintFlag(flag: Bool, name: String): Unit = {
642    when(flag) {
643      XSDebug(false, true.B, name)
644    }.otherwise {
645      XSDebug(false, true.B, " ")
646    }
647  }
648
649  for (i <- 0 until LoadQueueSize) {
650    if (i % 4 == 0) XSDebug("")
651    XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.debug(i).paddr)
652    PrintFlag(allocated(i), "a")
653    PrintFlag(allocated(i) && datavalid(i), "v")
654    PrintFlag(allocated(i) && writebacked(i), "w")
655    PrintFlag(allocated(i) && miss(i), "m")
656    // PrintFlag(allocated(i) && listening(i), "l")
657    PrintFlag(allocated(i) && pending(i), "p")
658    XSDebug(false, true.B, " ")
659    if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n")
660  }
661
662}
663