xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreQueue.scala (revision 939a787932102e17cb14773366a1dc3579827eb3)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chisel3._
20import chisel3.util._
21import difftest._
22import difftest.common.DifftestMem
23import org.chipsalliance.cde.config.Parameters
24import utility._
25import utils._
26import xiangshan._
27import xiangshan.cache._
28import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants}
29import xiangshan.backend._
30import xiangshan.backend.rob.{RobLsqIO, RobPtr}
31import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
32import xiangshan.backend.decode.isa.bitfield.{Riscv32BitInst, XSInstBitFields}
33import xiangshan.backend.fu.FuConfig._
34import xiangshan.backend.fu.FuType
35import xiangshan.ExceptionNO._
36
37class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr](
38  p => p(XSCoreParamsKey).StoreQueueSize
39){
40}
41
42object SqPtr {
43  def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = {
44    val ptr = Wire(new SqPtr)
45    ptr.flag := f
46    ptr.value := v
47    ptr
48  }
49}
50
51class SqEnqIO(implicit p: Parameters) extends MemBlockBundle {
52  val canAccept = Output(Bool())
53  val lqCanAccept = Input(Bool())
54  val needAlloc = Vec(LSQEnqWidth, Input(Bool()))
55  val req = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
56  val resp = Vec(LSQEnqWidth, Output(new SqPtr))
57}
58
59class DataBufferEntry (implicit p: Parameters)  extends DCacheBundle {
60  val addr   = UInt(PAddrBits.W)
61  val vaddr  = UInt(VAddrBits.W)
62  val data   = UInt(VLEN.W)
63  val mask   = UInt((VLEN/8).W)
64  val wline = Bool()
65  val sqPtr  = new SqPtr
66  val prefetch = Bool()
67  val vecValid = Bool()
68}
69
70class StoreExceptionBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper {
71  val io = IO(new Bundle() {
72    val redirect = Flipped(ValidIO(new Redirect))
73    val storeAddrIn = Vec(StorePipelineWidth * 2 + VecStorePipelineWidth, Flipped(ValidIO(new LsPipelineBundle())))
74    val exceptionAddr = new ExceptionAddrIO
75  })
76
77  val req_valid = RegInit(false.B)
78  val req = Reg(new LsPipelineBundle())
79
80  // enqueue
81  // S1:
82  val s1_req = VecInit(io.storeAddrIn.map(_.bits))
83  val s1_valid = VecInit(io.storeAddrIn.map(_.valid))
84
85  // S2: delay 1 cycle
86  val s2_req = RegNext(s1_req)
87  val s2_valid = (0 until StorePipelineWidth * 2 + VecStorePipelineWidth).map(i =>
88    RegNext(s1_valid(i)) &&
89      !s2_req(i).uop.robIdx.needFlush(RegNext(io.redirect)) &&
90      !s2_req(i).uop.robIdx.needFlush(io.redirect)
91  )
92  val s2_has_exception = s2_req.map(x => ExceptionNO.selectByFu(x.uop.exceptionVec, StaCfg).asUInt.orR)
93
94  val s2_enqueue = Wire(Vec(StorePipelineWidth * 2 + VecStorePipelineWidth, Bool()))
95  for (w <- 0 until StorePipelineWidth * 2 + VecStorePipelineWidth) {
96    s2_enqueue(w) := s2_valid(w) && s2_has_exception(w)
97  }
98
99  when (req_valid && req.uop.robIdx.needFlush(io.redirect)) {
100    req_valid := s2_enqueue.asUInt.orR
101  }.elsewhen (s2_enqueue.asUInt.orR) {
102    req_valid := req_valid || true.B
103  }
104
105  def selectOldest[T <: LsPipelineBundle](valid: Seq[Bool], bits: Seq[T]): (Seq[Bool], Seq[T]) = {
106    assert(valid.length == bits.length)
107    if (valid.length == 0 || valid.length == 1) {
108      (valid, bits)
109    } else if (valid.length == 2) {
110      val res = Seq.fill(2)(Wire(Valid(chiselTypeOf(bits(0)))))
111      for (i <- res.indices) {
112        res(i).valid := valid(i)
113        res(i).bits := bits(i)
114      }
115      val oldest = Mux(valid(0) && valid(1),
116        Mux(isAfter(bits(0).uop.robIdx, bits(1).uop.robIdx) ||
117          (isNotBefore(bits(0).uop.robIdx, bits(1).uop.robIdx) && bits(0).uop.uopIdx > bits(1).uop.uopIdx), res(1), res(0)),
118        Mux(valid(0) && !valid(1), res(0), res(1)))
119      (Seq(oldest.valid), Seq(oldest.bits))
120    } else {
121      val left = selectOldest(valid.take(valid.length / 2), bits.take(bits.length / 2))
122      val right = selectOldest(valid.takeRight(valid.length - (valid.length / 2)), bits.takeRight(bits.length - (bits.length / 2)))
123      selectOldest(left._1 ++ right._1, left._2 ++ right._2)
124    }
125  }
126
127  val reqSel = selectOldest(s2_enqueue, s2_req)
128
129  when (req_valid) {
130    req := Mux(
131      reqSel._1(0) && (isAfter(req.uop.robIdx, reqSel._2(0).uop.robIdx) || (isNotBefore(req.uop.robIdx, reqSel._2(0).uop.robIdx) && req.uop.uopIdx > reqSel._2(0).uop.uopIdx)),
132      reqSel._2(0),
133      req)
134  } .elsewhen (s2_enqueue.asUInt.orR) {
135    req := reqSel._2(0)
136  }
137
138  io.exceptionAddr.vaddr  := req.vaddr
139  io.exceptionAddr.gpaddr := req.gpaddr
140  io.exceptionAddr.vstart := req.uop.vpu.vstart
141  io.exceptionAddr.vl     := req.uop.vpu.vl
142}
143
144// Store Queue
145class StoreQueue(implicit p: Parameters) extends XSModule
146  with HasDCacheParameters
147  with HasCircularQueuePtrHelper
148  with HasPerfEvents
149  with HasVLSUParameters {
150  val io = IO(new Bundle() {
151    val hartId = Input(UInt(hartIdLen.W))
152    val enq = new SqEnqIO
153    val brqRedirect = Flipped(ValidIO(new Redirect))
154    val vecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
155    val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included
156    val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception
157    val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // store data, send to sq from rs
158    val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // store mask, send to sq from rs
159    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag)) // write committed store to sbuffer
160    val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is, write committed store to sbuffer
161    val uncacheOutstanding = Input(Bool())
162    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
163    val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true))
164    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
165    // TODO: scommit is only for scalar store
166    val rob = Flipped(new RobLsqIO)
167    val uncache = new UncacheWordIO
168    // val refill = Flipped(Valid(new DCacheLineReq ))
169    val exceptionAddr = new ExceptionAddrIO
170    val sqEmpty = Output(Bool())
171    val stAddrReadySqPtr = Output(new SqPtr)
172    val stAddrReadyVec = Output(Vec(StoreQueueSize, Bool()))
173    val stDataReadySqPtr = Output(new SqPtr)
174    val stDataReadyVec = Output(Vec(StoreQueueSize, Bool()))
175    val stIssuePtr = Output(new SqPtr)
176    val sqDeqPtr = Output(new SqPtr)
177    val sqFull = Output(Bool())
178    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize + 1).W))
179    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
180    val force_write = Output(Bool())
181  })
182
183  println("StoreQueue: size:" + StoreQueueSize)
184
185  // data modules
186  val uop = Reg(Vec(StoreQueueSize, new DynInst))
187  // val data = Reg(Vec(StoreQueueSize, new LsqEntry))
188  val dataModule = Module(new SQDataModule(
189    numEntries = StoreQueueSize,
190    numRead = EnsbufferWidth,
191    numWrite = StorePipelineWidth,
192    numForward = LoadPipelineWidth
193  ))
194  dataModule.io := DontCare
195  val paddrModule = Module(new SQAddrModule(
196    dataWidth = PAddrBits,
197    numEntries = StoreQueueSize,
198    numRead = EnsbufferWidth,
199    numWrite = StorePipelineWidth,
200    numForward = LoadPipelineWidth
201  ))
202  paddrModule.io := DontCare
203  val vaddrModule = Module(new SQAddrModule(
204    dataWidth = VAddrBits,
205    numEntries = StoreQueueSize,
206    numRead = EnsbufferWidth, // sbuffer; badvaddr will be sent from exceptionBuffer
207    numWrite = StorePipelineWidth,
208    numForward = LoadPipelineWidth
209  ))
210  vaddrModule.io := DontCare
211  val dataBuffer = Module(new DatamoduleResultBuffer(new DataBufferEntry))
212  val difftestBuffer = if (env.EnableDifftest) Some(Module(new DatamoduleResultBuffer(new DynInst))) else None
213  val exceptionBuffer = Module(new StoreExceptionBuffer)
214  exceptionBuffer.io.redirect := io.brqRedirect
215  exceptionBuffer.io.exceptionAddr.isStore := DontCare
216  // vlsu exception!
217  for (i <- 0 until VecStorePipelineWidth) {
218    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).valid               := io.vecFeedback(i).valid && io.vecFeedback(i).bits.feedback(VecFeedbacks.FLUSH) // have exception
219    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits                := DontCare
220    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.vaddr          := io.vecFeedback(i).bits.vaddr
221    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.uopIdx     := io.vecFeedback(i).bits.uopidx
222    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.robIdx     := io.vecFeedback(i).bits.robidx
223    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vstart := io.vecFeedback(i).bits.vstart
224    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.vpu.vl     := io.vecFeedback(i).bits.vl
225    exceptionBuffer.io.storeAddrIn(StorePipelineWidth * 2 + i).bits.uop.exceptionVec     := io.vecFeedback(i).bits.exceptionVec
226  }
227
228
229  val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W)))
230  val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W)))
231  val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W)))
232
233  // state & misc
234  val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
235  val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio addr is valid
236  val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid
237  val allvalid  = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) // non-mmio data & addr is valid
238  val committed = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // inst has been committed by rob
239  val pending = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
240  val mmio = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // mmio: inst is an mmio inst
241  val atomic = RegInit(VecInit(List.fill(StoreQueueSize)(false.B)))
242  val prefetch = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // need prefetch when committing this store to sbuffer?
243  val isVec = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store instruction
244  //val vec_lastuop = Reg(Vec(StoreQueueSize, Bool())) // last uop of vector store instruction
245  val vecMbCommit = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store committed from merge buffer to rob
246  val vecDataValid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // vector store need write to sbuffer
247  val hasException = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // store has exception, should deq but not write sbuffer
248  val waitStoreS2 = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // wait for mmio and exception result until store_s2
249  // val vec_robCommit = Reg(Vec(StoreQueueSize, Bool())) // vector store committed by rob
250  // val vec_secondInv = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // Vector unit-stride, second entry is invalid
251
252  // ptr
253  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr))))
254  val rdataPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
255  val deqPtrExt = RegInit(VecInit((0 until EnsbufferWidth).map(_.U.asTypeOf(new SqPtr))))
256  val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr))))
257  val addrReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
258  val dataReadyPtrExt = RegInit(0.U.asTypeOf(new SqPtr))
259
260  val enqPtr = enqPtrExt(0).value
261  val deqPtr = deqPtrExt(0).value
262  val cmtPtr = cmtPtrExt(0).value
263
264  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
265  val allowEnqueue = validCount <= (StoreQueueSize - LSQStEnqWidth).U
266
267  val deqMask = UIntToMask(deqPtr, StoreQueueSize)
268  val enqMask = UIntToMask(enqPtr, StoreQueueSize)
269
270  val commitCount = WireInit(0.U(log2Ceil(CommitWidth + 1).W))
271  val scommit = RegNext(io.rob.scommit)
272
273  // store can be committed by ROB
274  io.rob.mmio := DontCare
275  io.rob.uop := DontCare
276
277  // Read dataModule
278  assert(EnsbufferWidth <= 2)
279  // rdataPtrExtNext and rdataPtrExtNext+1 entry will be read from dataModule
280  val rdataPtrExtNext = WireInit(Mux(dataBuffer.io.enq(1).fire,
281    VecInit(rdataPtrExt.map(_ + 2.U)),
282    Mux(dataBuffer.io.enq(0).fire || io.mmioStout.fire || io.vecmmioStout.fire,
283      VecInit(rdataPtrExt.map(_ + 1.U)),
284      rdataPtrExt
285    )
286  ))
287
288  // deqPtrExtNext traces which inst is about to leave store queue
289  //
290  // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
291  // Before data write finish, sbuffer is unable to provide store to load
292  // forward data. As an workaround, deqPtrExt and allocated flag update
293  // is delayed so that load can get the right data from store queue.
294  //
295  // Modify deqPtrExtNext and io.sqDeq with care!
296  val deqPtrExtNext = Mux(RegNext(io.sbuffer(1).fire),
297    VecInit(deqPtrExt.map(_ + 2.U)),
298    Mux((RegNext(io.sbuffer(0).fire)) || io.mmioStout.fire || io.vecmmioStout.fire,
299      VecInit(deqPtrExt.map(_ + 1.U)),
300      deqPtrExt
301    )
302  )
303  io.sqDeq := RegNext(Mux(RegNext(io.sbuffer(1).fire), 2.U,
304    Mux((RegNext(io.sbuffer(0).fire)) || io.mmioStout.fire || io.vecmmioStout.fire, 1.U, 0.U)
305  ))
306  assert(!RegNext(RegNext(io.sbuffer(0).fire) && (io.mmioStout.fire || io.vecmmioStout.fire)))
307
308  for (i <- 0 until EnsbufferWidth) {
309    dataModule.io.raddr(i) := rdataPtrExtNext(i).value
310    paddrModule.io.raddr(i) := rdataPtrExtNext(i).value
311    vaddrModule.io.raddr(i) := rdataPtrExtNext(i).value
312  }
313
314  /**
315    * Enqueue at dispatch
316    *
317    * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth
318    */
319  io.enq.canAccept := allowEnqueue
320  val canEnqueue = io.enq.req.map(_.valid)
321  val enqCancel = io.enq.req.map(_.bits.robIdx.needFlush(io.brqRedirect))
322  val vStoreFlow = io.enq.req.map(_.bits.numLsElem)
323  val validVStoreFlow = vStoreFlow.zipWithIndex.map{case (vLoadFlowNumItem, index) => Mux(!RegNext(io.brqRedirect.valid) && io.enq.canAccept && io.enq.lqCanAccept && canEnqueue(index), vLoadFlowNumItem, 0.U)}
324  val validVStoreOffset = vStoreFlow.zip(io.enq.needAlloc).map{case (flow, needAllocItem) => Mux(needAllocItem, flow, 0.U)}
325  val validVStoreOffsetRShift = 0.U +: validVStoreOffset.take(vStoreFlow.length - 1)
326
327  for (i <- 0 until io.enq.req.length) {
328    val sqIdx = enqPtrExt(0) + validVStoreOffsetRShift.take(i + 1).reduce(_ + _)
329    val index = io.enq.req(i).bits.sqIdx
330    val enqInstr = io.enq.req(i).bits.instr.asTypeOf(new XSInstBitFields)
331    when (canEnqueue(i) && !enqCancel(i)) {
332      for (j <- 0 until VecMemDispatchMaxNumber) {
333        when (j.U < validVStoreOffset(i)) {
334          uop((index + j.U).value) := io.enq.req(i).bits
335          // NOTE: the index will be used when replay
336          uop((index + j.U).value).sqIdx := sqIdx + j.U
337          allocated((index + j.U).value) := true.B
338          datavalid((index + j.U).value) := false.B
339          addrvalid((index + j.U).value) := false.B
340          committed((index + j.U).value) := false.B
341          pending((index + j.U).value) := false.B
342          prefetch((index + j.U).value) := false.B
343          mmio((index + j.U).value) := false.B
344          isVec((index + j.U).value) := enqInstr.isVecStore // check vector store by the encoding of inst
345          vecMbCommit((index + j.U).value) := false.B
346          vecDataValid((index + j.U).value) := false.B
347          hasException((index + j.U).value) := false.B
348          waitStoreS2((index + j.U).value) := true.B
349          XSError(!io.enq.canAccept || !io.enq.lqCanAccept, s"must accept $i\n")
350          XSError(index.value =/= sqIdx.value, s"must be the same entry $i\n")
351        }
352      }
353    }
354    io.enq.resp(i) := sqIdx
355  }
356  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
357
358  /**
359    * Update addr/dataReadyPtr when issue from rs
360    */
361  // update issuePtr
362  val IssuePtrMoveStride = 4
363  require(IssuePtrMoveStride >= 2)
364
365  val addrReadyLookupVec = (0 until IssuePtrMoveStride).map(addrReadyPtrExt + _.U)
366  val addrReadyLookup = addrReadyLookupVec.map(ptr => allocated(ptr.value) &&
367   (mmio(ptr.value) || addrvalid(ptr.value) || vecMbCommit(ptr.value))
368    && ptr =/= enqPtrExt(0))
369  val nextAddrReadyPtr = addrReadyPtrExt + PriorityEncoder(VecInit(addrReadyLookup.map(!_) :+ true.B))
370  addrReadyPtrExt := nextAddrReadyPtr
371
372  (0 until StoreQueueSize).map(i => {
373    io.stAddrReadyVec(i) := RegNext(allocated(i) && (mmio(i) || addrvalid(i) || (isVec(i) && vecMbCommit(i))))
374  })
375
376  when (io.brqRedirect.valid) {
377    addrReadyPtrExt := Mux(
378      isAfter(cmtPtrExt(0), deqPtrExt(0)),
379      cmtPtrExt(0),
380      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
381    )
382  }
383
384  io.stAddrReadySqPtr := addrReadyPtrExt
385
386  // update
387  val dataReadyLookupVec = (0 until IssuePtrMoveStride).map(dataReadyPtrExt + _.U)
388  val dataReadyLookup = dataReadyLookupVec.map(ptr => allocated(ptr.value) &&
389   (mmio(ptr.value) || datavalid(ptr.value) || vecMbCommit(ptr.value))
390    && ptr =/= enqPtrExt(0))
391  val nextDataReadyPtr = dataReadyPtrExt + PriorityEncoder(VecInit(dataReadyLookup.map(!_) :+ true.B))
392  dataReadyPtrExt := nextDataReadyPtr
393
394  (0 until StoreQueueSize).map(i => {
395    io.stDataReadyVec(i) := RegNext(allocated(i) && (mmio(i) || datavalid(i) || (isVec(i) && vecMbCommit(i))))
396  })
397
398  when (io.brqRedirect.valid) {
399    dataReadyPtrExt := Mux(
400      isAfter(cmtPtrExt(0), deqPtrExt(0)),
401      cmtPtrExt(0),
402      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
403    )
404  }
405
406  io.stDataReadySqPtr := dataReadyPtrExt
407  io.stIssuePtr := enqPtrExt(0)
408  io.sqDeqPtr := deqPtrExt(0)
409
410  /**
411    * Writeback store from store units
412    *
413    * Most store instructions writeback to regfile in the previous cycle.
414    * However,
415    *   (1) For an mmio instruction with exceptions, we need to mark it as addrvalid
416    * (in this way it will trigger an exception when it reaches ROB's head)
417    * instead of pending to avoid sending them to lower level.
418    *   (2) For an mmio instruction without exceptions, we mark it as pending.
419    * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel.
420    * Upon receiving the response, StoreQueue writes back the instruction
421    * through arbiter with store units. It will later commit as normal.
422    */
423
424  // Write addr to sq
425  for (i <- 0 until StorePipelineWidth) {
426    paddrModule.io.wen(i) := false.B
427    vaddrModule.io.wen(i) := false.B
428    dataModule.io.mask.wen(i) := false.B
429    val stWbIndex = io.storeAddrIn(i).bits.uop.sqIdx.value
430    exceptionBuffer.io.storeAddrIn(i).valid := io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss && !io.storeAddrIn(i).bits.isvec
431    exceptionBuffer.io.storeAddrIn(i).bits := io.storeAddrIn(i).bits
432    // will re-enter exceptionbuffer at store_s2
433    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := false.B
434    exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := 0.U.asTypeOf(new LsPipelineBundle)
435
436    when (io.storeAddrIn(i).fire) {
437      val addr_valid = !io.storeAddrIn(i).bits.miss
438      addrvalid(stWbIndex) := addr_valid //!io.storeAddrIn(i).bits.mmio
439      // pending(stWbIndex) := io.storeAddrIn(i).bits.mmio
440
441      paddrModule.io.waddr(i) := stWbIndex
442      paddrModule.io.wdata(i) := io.storeAddrIn(i).bits.paddr
443      paddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
444      paddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
445      paddrModule.io.wen(i) := true.B
446
447      vaddrModule.io.waddr(i) := stWbIndex
448      vaddrModule.io.wdata(i) := io.storeAddrIn(i).bits.vaddr
449      vaddrModule.io.wmask(i) := io.storeAddrIn(i).bits.mask
450      vaddrModule.io.wlineflag(i) := io.storeAddrIn(i).bits.wlineflag
451      vaddrModule.io.wen(i) := true.B
452
453      debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i)
454
455      // mmio(stWbIndex) := io.storeAddrIn(i).bits.mmio
456
457      uop(stWbIndex) := io.storeAddrIn(i).bits.uop
458      uop(stWbIndex).debugInfo := io.storeAddrIn(i).bits.uop.debugInfo
459
460      vecDataValid(stWbIndex) := io.storeAddrIn(i).bits.isvec
461
462      XSInfo("store addr write to sq idx %d pc 0x%x miss:%d vaddr %x paddr %x mmio %x isvec %x\n",
463        io.storeAddrIn(i).bits.uop.sqIdx.value,
464        io.storeAddrIn(i).bits.uop.pc,
465        io.storeAddrIn(i).bits.miss,
466        io.storeAddrIn(i).bits.vaddr,
467        io.storeAddrIn(i).bits.paddr,
468        io.storeAddrIn(i).bits.mmio,
469        io.storeAddrIn(i).bits.isvec
470      )
471    }
472
473    // re-replinish mmio, for pma/pmp will get mmio one cycle later
474    val storeAddrInFireReg = RegNext(io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss)
475    val stWbIndexReg = RegNext(stWbIndex)
476    when (storeAddrInFireReg) {
477      pending(stWbIndexReg) := io.storeAddrInRe(i).mmio
478      mmio(stWbIndexReg) := io.storeAddrInRe(i).mmio
479      atomic(stWbIndexReg) := io.storeAddrInRe(i).atomic
480      hasException(stWbIndexReg) := ExceptionNO.selectByFu(uop(stWbIndexReg).exceptionVec, StaCfg).asUInt.orR || io.storeAddrInRe(i).af
481      waitStoreS2(stWbIndexReg) := false.B
482    }
483    // dcache miss info (one cycle later than storeIn)
484    // if dcache report a miss in sta pipeline, this store will trigger a prefetch when committing to sbuffer (if EnableAtCommitMissTrigger)
485    when (storeAddrInFireReg) {
486      prefetch(stWbIndexReg) := io.storeAddrInRe(i).miss
487    }
488    // enter exceptionbuffer again
489    when (storeAddrInFireReg) {
490      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).valid := io.storeAddrInRe(i).af
491      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits := RegEnable(io.storeAddrIn(i).bits, io.storeAddrIn(i).fire && !io.storeAddrIn(i).bits.miss)
492      exceptionBuffer.io.storeAddrIn(StorePipelineWidth + i).bits.uop.exceptionVec(storeAccessFault) := io.storeAddrInRe(i).af
493    }
494
495    when(vaddrModule.io.wen(i)){
496      debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i)
497    }
498  }
499
500  // Write data to sq
501  // Now store data pipeline is actually 2 stages
502  for (i <- 0 until StorePipelineWidth) {
503    dataModule.io.data.wen(i) := false.B
504    val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value
505    val isVec     = FuType.isVStore(io.storeDataIn(i).bits.uop.fuType)
506    // sq data write takes 2 cycles:
507    // sq data write s0
508    when (io.storeDataIn(i).fire) {
509      // send data write req to data module
510      dataModule.io.data.waddr(i) := stWbIndex
511      dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.fuOpType === LSUOpType.cbo_zero,
512        0.U,
513        Mux(isVec,
514          io.storeDataIn(i).bits.data,
515          genVWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.fuOpType(2,0)))
516      )
517      dataModule.io.data.wen(i) := true.B
518
519      debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i)
520
521      XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n",
522        io.storeDataIn(i).bits.uop.sqIdx.value,
523        io.storeDataIn(i).bits.uop.pc,
524        io.storeDataIn(i).bits.data,
525        dataModule.io.data.wdata(i)
526      )
527    }
528    // sq data write s1
529    when (
530      RegNext(io.storeDataIn(i).fire)
531      // && !RegNext(io.storeDataIn(i).bits.uop).robIdx.needFlush(io.brqRedirect)
532    ) {
533      datavalid(RegNext(stWbIndex)) := true.B
534    }
535  }
536
537  // Write mask to sq
538  for (i <- 0 until StorePipelineWidth) {
539    // sq mask write s0
540    when (io.storeMaskIn(i).fire) {
541      // send data write req to data module
542      dataModule.io.mask.waddr(i) := io.storeMaskIn(i).bits.sqIdx.value
543      dataModule.io.mask.wdata(i) := io.storeMaskIn(i).bits.mask
544      dataModule.io.mask.wen(i) := true.B
545    }
546  }
547
548  /**
549    * load forward query
550    *
551    * Check store queue for instructions that is older than the load.
552    * The response will be valid at the next cycle after req.
553    */
554  // check over all lq entries and forward data from the first matched store
555  for (i <- 0 until LoadPipelineWidth) {
556    // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases:
557    // (1) if they have the same flag, we need to check range(tail, sqIdx)
558    // (2) if they have different flags, we need to check range(tail, VirtualLoadQueueSize) and range(0, sqIdx)
559    // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, VirtualLoadQueueSize))
560    // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
561    // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
562    val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag
563    val forwardMask = io.forward(i).sqIdxMask
564    // all addrvalid terms need to be checked
565    // Real Vaild: all scalar stores, and vector store with (!inactive && !secondInvalid)
566    val addrRealValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
567    // vector store will consider all inactive || secondInvalid flows as valid
568    val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && allocated(j))))
569    val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(j => datavalid(j))))
570    val allValidVec  = WireInit(VecInit((0 until StoreQueueSize).map(j => addrvalid(j) && datavalid(j) && allocated(j))))
571
572    val lfstEnable = Constantin.createRecord("LFSTEnable", LFSTEnable)
573    val storeSetHitVec = Mux(lfstEnable,
574      WireInit(VecInit((0 until StoreQueueSize).map(j => io.forward(i).uop.loadWaitBit && uop(j).robIdx === io.forward(i).uop.waitForRobIdx))),
575      WireInit(VecInit((0 until StoreQueueSize).map(j => uop(j).storeSetHit && uop(j).ssid === io.forward(i).uop.ssid)))
576    )
577
578    val forwardMask1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask)
579    val forwardMask2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W))
580    val canForward1 = forwardMask1 & allValidVec.asUInt
581    val canForward2 = forwardMask2 & allValidVec.asUInt
582    val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask)
583
584    XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " +
585      p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n"
586    )
587
588    // do real fwd query (cam lookup in load_s1)
589    dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt
590    dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt
591
592    vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr
593    vaddrModule.io.forwardDataMask(i) := io.forward(i).mask
594    paddrModule.io.forwardMdata(i) := io.forward(i).paddr
595    paddrModule.io.forwardDataMask(i) := io.forward(i).mask
596
597    // vaddr cam result does not equal to paddr cam result
598    // replay needed
599    // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U
600    // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid
601    val vpmaskNotEqual = (
602      (RegNext(paddrModule.io.forwardMmask(i).asUInt) ^ RegNext(vaddrModule.io.forwardMmask(i).asUInt)) &
603      RegNext(needForward) &
604      RegNext(addrRealValidVec.asUInt)
605    ) =/= 0.U
606    val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid)
607    when (vaddrMatchFailed) {
608      XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n",
609        RegNext(io.forward(i).uop.pc),
610        RegNext(needForward & paddrModule.io.forwardMmask(i).asUInt),
611        RegNext(needForward & vaddrModule.io.forwardMmask(i).asUInt)
612      );
613    }
614    XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual)
615    XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed)
616
617    // Fast forward mask will be generated immediately (load_s1)
618    io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i)
619
620    // Forward result will be generated 1 cycle later (load_s2)
621    io.forward(i).forwardMask := dataModule.io.forwardMask(i)
622    io.forward(i).forwardData := dataModule.io.forwardData(i)
623    // If addr match, data not ready, mark it as dataInvalid
624    // load_s1: generate dataInvalid in load_s1 to set fastUop
625    val dataInvalidMask1 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask1.asUInt)
626    val dataInvalidMask2 = (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & forwardMask2.asUInt)
627    val dataInvalidMask = dataInvalidMask1 | dataInvalidMask2
628    io.forward(i).dataInvalidFast := dataInvalidMask.orR
629
630    // make chisel happy
631    val dataInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
632    dataInvalidMask1Reg := RegNext(dataInvalidMask1)
633    // make chisel happy
634    val dataInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
635    dataInvalidMask2Reg := RegNext(dataInvalidMask2)
636    val dataInvalidMaskReg = dataInvalidMask1Reg | dataInvalidMask2Reg
637
638    // If SSID match, address not ready, mark it as addrInvalid
639    // load_s2: generate addrInvalid
640    val addrInvalidMask1 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask1.asUInt)
641    val addrInvalidMask2 = (~addrValidVec.asUInt & storeSetHitVec.asUInt & forwardMask2.asUInt)
642    // make chisel happy
643    val addrInvalidMask1Reg = Wire(UInt(StoreQueueSize.W))
644    addrInvalidMask1Reg := RegNext(addrInvalidMask1)
645    // make chisel happy
646    val addrInvalidMask2Reg = Wire(UInt(StoreQueueSize.W))
647    addrInvalidMask2Reg := RegNext(addrInvalidMask2)
648    val addrInvalidMaskReg = addrInvalidMask1Reg | addrInvalidMask2Reg
649
650    // load_s2
651    io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast)
652    // check if vaddr forward mismatched
653    io.forward(i).matchInvalid := vaddrMatchFailed
654
655    // data invalid sq index
656    // check whether false fail
657    // check flag
658    val s2_differentFlag = RegNext(differentFlag)
659    val s2_enqPtrExt = RegNext(enqPtrExt(0))
660    val s2_deqPtrExt = RegNext(deqPtrExt(0))
661
662    // addr invalid sq index
663    // make chisel happy
664    val addrInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
665    addrInvalidMaskRegWire := addrInvalidMaskReg
666    val addrInvalidFlag = addrInvalidMaskRegWire.orR
667    val hasInvalidAddr = (~addrValidVec.asUInt & needForward).orR
668
669    val addrInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask1Reg))))
670    val addrInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(addrInvalidMask2Reg))))
671    val addrInvalidSqIdx = Mux(addrInvalidMask2Reg.orR, addrInvalidSqIdx2, addrInvalidSqIdx1)
672
673    // store-set content management
674    //                +-----------------------+
675    //                | Search a SSID for the |
676    //                |    load operation     |
677    //                +-----------------------+
678    //                           |
679    //                           V
680    //                 +-------------------+
681    //                 | load wait strict? |
682    //                 +-------------------+
683    //                           |
684    //                           V
685    //               +----------------------+
686    //            Set|                      |Clean
687    //               V                      V
688    //  +------------------------+   +------------------------------+
689    //  | Waiting for all older  |   | Wait until the corresponding |
690    //  |   stores operations    |   | older store operations       |
691    //  +------------------------+   +------------------------------+
692
693
694
695    when (RegNext(io.forward(i).uop.loadWaitStrict)) {
696      io.forward(i).addrInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx - 1.U)
697    } .elsewhen (addrInvalidFlag) {
698      io.forward(i).addrInvalidSqIdx.flag := Mux(!s2_differentFlag || addrInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
699      io.forward(i).addrInvalidSqIdx.value := addrInvalidSqIdx
700    } .otherwise {
701      // may be store inst has been written to sbuffer already.
702      io.forward(i).addrInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx)
703    }
704    io.forward(i).addrInvalid := Mux(RegNext(io.forward(i).uop.loadWaitStrict), RegNext(hasInvalidAddr), addrInvalidFlag)
705
706    // data invalid sq index
707    // make chisel happy
708    val dataInvalidMaskRegWire = Wire(UInt(StoreQueueSize.W))
709    dataInvalidMaskRegWire := dataInvalidMaskReg
710    val dataInvalidFlag = dataInvalidMaskRegWire.orR
711
712    val dataInvalidSqIdx1 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask1Reg))))
713    val dataInvalidSqIdx2 = OHToUInt(Reverse(PriorityEncoderOH(Reverse(dataInvalidMask2Reg))))
714    val dataInvalidSqIdx = Mux(dataInvalidMask2Reg.orR, dataInvalidSqIdx2, dataInvalidSqIdx1)
715
716    when (dataInvalidFlag) {
717      io.forward(i).dataInvalidSqIdx.flag := Mux(!s2_differentFlag || dataInvalidSqIdx >= s2_deqPtrExt.value, s2_deqPtrExt.flag, s2_enqPtrExt.flag)
718      io.forward(i).dataInvalidSqIdx.value := dataInvalidSqIdx
719    } .otherwise {
720      // may be store inst has been written to sbuffer already.
721      io.forward(i).dataInvalidSqIdx := RegNext(io.forward(i).uop.sqIdx)
722    }
723  }
724
725  /**
726    * Memory mapped IO / other uncached operations
727    *
728    * States:
729    * (1) writeback from store units: mark as pending
730    * (2) when they reach ROB's head, they can be sent to uncache channel
731    * (3) response from uncache channel: mark as datavalidmask.wen
732    * (4) writeback to ROB (and other units): mark as writebacked
733    * (5) ROB commits the instruction: same as normal instructions
734    */
735  //(2) when they reach ROB's head, they can be sent to uncache channel
736  // TODO: CAN NOT deal with vector mmio now!
737  val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5)
738  val uncacheState = RegInit(s_idle)
739  val uncacheUop = Reg(new DynInst)
740  switch(uncacheState) {
741    is(s_idle) {
742      when(RegNext(io.rob.pendingst && uop(deqPtr).robIdx === io.rob.pendingPtr && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr))) {
743        uncacheState := s_req
744        uncacheUop := uop(deqPtr)
745      }
746    }
747    is(s_req) {
748      when (io.uncache.req.fire) {
749        when (io.uncacheOutstanding) {
750          uncacheState := s_wb
751        } .otherwise {
752          uncacheState := s_resp
753        }
754      }
755    }
756    is(s_resp) {
757      when(io.uncache.resp.fire) {
758        uncacheState := s_wb
759
760        when (io.uncache.resp.bits.nderr) {
761          uop(deqPtr).exceptionVec(storeAccessFault) := true.B
762        }
763      }
764    }
765    is(s_wb) {
766      when (io.mmioStout.fire || io.vecmmioStout.fire) {
767        uncacheState := s_wait
768      }
769    }
770    is(s_wait) {
771      // A MMIO store can always move cmtPtrExt as it must be ROB head
772      when(scommit > 0.U) {
773        uncacheState := s_idle // ready for next mmio
774      }
775    }
776  }
777  io.uncache.req.valid := uncacheState === s_req
778
779  io.uncache.req.bits := DontCare
780  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XWR
781  io.uncache.req.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0)
782  io.uncache.req.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data)
783  io.uncache.req.bits.mask := shiftMaskToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).mask)
784
785  // CBO op type check can be delayed for 1 cycle,
786  // as uncache op will not start in s_idle
787  val cbo_mmio_addr = paddrModule.io.rdata(0) >> 2 << 2 // clear lowest 2 bits for op
788  val cbo_mmio_op = 0.U //TODO
789  val cbo_mmio_data = cbo_mmio_addr | cbo_mmio_op
790  when(RegNext(LSUOpType.isCbo(uop(deqPtr).fuOpType))){
791    io.uncache.req.bits.addr := DontCare // TODO
792    io.uncache.req.bits.data := paddrModule.io.rdata(0)
793    io.uncache.req.bits.mask := DontCare // TODO
794  }
795
796  io.uncache.req.bits.atomic := atomic(RegNext(rdataPtrExtNext(0)).value)
797
798  when(io.uncache.req.fire){
799    // mmio store should not be committed until uncache req is sent
800    pending(deqPtr) := false.B
801
802    XSDebug(
803      p"uncache req: pc ${Hexadecimal(uop(deqPtr).pc)} " +
804      p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " +
805      p"data ${Hexadecimal(io.uncache.req.bits.data)} " +
806      p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " +
807      p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n"
808    )
809  }
810
811  // (3) response from uncache channel: mark as datavalid
812  io.uncache.resp.ready := true.B
813
814  // (4) scalar store: writeback to ROB (and other units): mark as writebacked
815  io.mmioStout.valid := uncacheState === s_wb && !isVec(deqPtr)
816  io.mmioStout.bits.uop := uncacheUop
817  io.mmioStout.bits.uop.sqIdx := deqPtrExt(0)
818  io.mmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
819  io.mmioStout.bits.debug.isMMIO := true.B
820  io.mmioStout.bits.debug.paddr := DontCare
821  io.mmioStout.bits.debug.isPerfCnt := false.B
822  io.mmioStout.bits.debug.vaddr := DontCare
823  // Remove MMIO inst from store queue after MMIO request is being sent
824  // That inst will be traced by uncache state machine
825  when (io.mmioStout.fire) {
826    allocated(deqPtr) := false.B
827  }
828
829  // (4) or vector store:
830  // TODO: implement it!
831  io.vecmmioStout := DontCare
832  io.vecmmioStout.valid := uncacheState === s_wb && isVec(deqPtr)
833  io.vecmmioStout.bits.uop := uop(deqPtr)
834  io.vecmmioStout.bits.uop.sqIdx := deqPtrExt(0)
835  io.vecmmioStout.bits.data := shiftDataToLow(paddrModule.io.rdata(0), dataModule.io.rdata(0).data) // dataModule.io.rdata.read(deqPtr)
836  io.vecmmioStout.bits.debug.isMMIO := true.B
837  io.vecmmioStout.bits.debug.paddr := DontCare
838  io.vecmmioStout.bits.debug.isPerfCnt := false.B
839  io.vecmmioStout.bits.debug.vaddr := DontCare
840  // Remove MMIO inst from store queue after MMIO request is being sent
841  // That inst will be traced by uncache state machine
842  when (io.vecmmioStout.fire) {
843    allocated(deqPtr) := false.B
844  }
845
846  /**
847    * ROB commits store instructions (mark them as committed)
848    *
849    * (1) When store commits, mark it as committed.
850    * (2) They will not be cancelled and can be sent to lower level.
851    */
852  XSError(uncacheState =/= s_idle && uncacheState =/= s_wait && commitCount > 0.U,
853   "should not commit instruction when MMIO has not been finished\n")
854
855  val commitVec = WireInit(VecInit(Seq.fill(CommitWidth)(false.B)))
856  val needCancel = Wire(Vec(StoreQueueSize, Bool())) // Will be assigned later
857  dontTouch(commitVec)
858  // TODO: Deal with vector store mmio
859  for (i <- 0 until CommitWidth) {
860    when (allocated(cmtPtrExt(i).value) && isNotAfter(uop(cmtPtrExt(i).value).robIdx, RegNext(io.rob.pendingPtr)) && !needCancel(cmtPtrExt(i).value) && (!waitStoreS2(cmtPtrExt(i).value) || isVec(cmtPtrExt(i).value))) {
861      if (i == 0){
862        // TODO: fixme for vector mmio
863        when ((uncacheState === s_idle) || (uncacheState === s_wait && scommit > 0.U)){
864          when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) {
865            committed(cmtPtrExt(0).value) := true.B
866            commitVec(0) := true.B
867          }
868        }
869      } else {
870        when ((isVec(cmtPtrExt(i).value) && vecMbCommit(cmtPtrExt(i).value)) || !isVec(cmtPtrExt(i).value)) {
871          committed(cmtPtrExt(i).value) := commitVec(i - 1) || committed(cmtPtrExt(i).value)
872          commitVec(i) := commitVec(i - 1)
873        }
874      }
875    }
876  }
877
878  commitCount := PopCount(commitVec)
879  cmtPtrExt := cmtPtrExt.map(_ + commitCount)
880
881  // committed stores will not be cancelled and can be sent to lower level.
882  // remove retired insts from sq, add retired store to sbuffer
883
884  // Read data from data module
885  // As store queue grows larger and larger, time needed to read data from data
886  // module keeps growing higher. Now we give data read a whole cycle.
887  for (i <- 0 until EnsbufferWidth) {
888    val ptr = rdataPtrExt(i).value
889    val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
890    dataBuffer.io.enq(i).valid := allocated(ptr) && committed(ptr) && ((!isVec(ptr) && (allvalid(ptr) || hasException(ptr))) || vecMbCommit(ptr)) && !mmioStall
891    // Note that store data/addr should both be valid after store's commit
892    assert(!dataBuffer.io.enq(i).valid || allvalid(ptr) || (allocated(ptr) && vecMbCommit(ptr)))
893    dataBuffer.io.enq(i).bits.addr     := paddrModule.io.rdata(i)
894    dataBuffer.io.enq(i).bits.vaddr    := vaddrModule.io.rdata(i)
895    dataBuffer.io.enq(i).bits.data     := dataModule.io.rdata(i).data
896    dataBuffer.io.enq(i).bits.mask     := dataModule.io.rdata(i).mask
897    dataBuffer.io.enq(i).bits.wline    := paddrModule.io.rlineflag(i)
898    dataBuffer.io.enq(i).bits.sqPtr    := rdataPtrExt(i)
899    dataBuffer.io.enq(i).bits.prefetch := prefetch(ptr)
900    // when scalar has exception, will also not write into sbuffer
901    dataBuffer.io.enq(i).bits.vecValid := (!isVec(ptr) || vecDataValid(ptr)) && !hasException(ptr)
902  }
903
904  // Send data stored in sbufferReqBitsReg to sbuffer
905  for (i <- 0 until EnsbufferWidth) {
906    io.sbuffer(i).valid := dataBuffer.io.deq(i).valid
907    dataBuffer.io.deq(i).ready := io.sbuffer(i).ready
908    // Write line request should have all 1 mask
909    assert(!(io.sbuffer(i).valid && io.sbuffer(i).bits.wline && io.sbuffer(i).bits.vecValid && !io.sbuffer(i).bits.mask.andR))
910    io.sbuffer(i).bits := DontCare
911    io.sbuffer(i).bits.cmd   := MemoryOpConstants.M_XWR
912    io.sbuffer(i).bits.addr  := dataBuffer.io.deq(i).bits.addr
913    io.sbuffer(i).bits.vaddr := dataBuffer.io.deq(i).bits.vaddr
914    io.sbuffer(i).bits.data  := dataBuffer.io.deq(i).bits.data
915    io.sbuffer(i).bits.mask  := dataBuffer.io.deq(i).bits.mask
916    io.sbuffer(i).bits.wline := dataBuffer.io.deq(i).bits.wline && dataBuffer.io.deq(i).bits.vecValid
917    io.sbuffer(i).bits.prefetch := dataBuffer.io.deq(i).bits.prefetch
918    io.sbuffer(i).bits.vecValid := dataBuffer.io.deq(i).bits.vecValid
919    // io.sbuffer(i).fire is RegNexted, as sbuffer data write takes 2 cycles.
920    // Before data write finish, sbuffer is unable to provide store to load
921    // forward data. As an workaround, deqPtrExt and allocated flag update
922    // is delayed so that load can get the right data from store queue.
923    val ptr = dataBuffer.io.deq(i).bits.sqPtr.value
924    when (RegNext(io.sbuffer(i).fire)) {
925      allocated(RegEnable(ptr, io.sbuffer(i).fire)) := false.B
926      XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
927    }
928  }
929
930
931  // Initialize when unenabled difftest.
932  for (i <- 0 until EnsbufferWidth) {
933    io.sbufferVecDifftestInfo(i) := DontCare
934  }
935  // Consistent with the logic above.
936  // Only the vector store difftest required signal is separated from the rtl code.
937  if (env.EnableDifftest) {
938    for (i <- 0 until EnsbufferWidth) {
939      val ptr = rdataPtrExt(i).value
940      val mmioStall = if(i == 0) mmio(rdataPtrExt(0).value) else (mmio(rdataPtrExt(i).value) || mmio(rdataPtrExt(i-1).value))
941      difftestBuffer.get.io.enq(i).valid := allocated(ptr) && committed(ptr) && (!isVec(ptr) || vecMbCommit(ptr)) && !mmioStall
942      difftestBuffer.get.io.enq(i).bits := uop(ptr)
943    }
944    for (i <- 0 until EnsbufferWidth) {
945      io.sbufferVecDifftestInfo(i).valid := difftestBuffer.get.io.deq(i).valid
946      difftestBuffer.get.io.deq(i).ready := io.sbufferVecDifftestInfo(i).ready
947
948      io.sbufferVecDifftestInfo(i).bits := difftestBuffer.get.io.deq(i).bits
949    }
950  }
951
952  (1 until EnsbufferWidth).foreach(i => when(io.sbuffer(i).fire) { assert(io.sbuffer(i - 1).fire) })
953  if (coreParams.dcacheParametersOpt.isEmpty) {
954    for (i <- 0 until EnsbufferWidth) {
955      val ptr = deqPtrExt(i).value
956      val ram = DifftestMem(64L * 1024 * 1024 * 1024, 8)
957      val wen = allocated(ptr) && committed(ptr) && !mmio(ptr)
958      val waddr = ((paddrModule.io.rdata(i) - "h80000000".U) >> 3).asUInt
959      val wdata = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).data(127, 64), dataModule.io.rdata(i).data(63, 0))
960      val wmask = Mux(paddrModule.io.rdata(i)(3), dataModule.io.rdata(i).mask(15, 8), dataModule.io.rdata(i).mask(7, 0))
961      when (wen) {
962        ram.write(waddr, wdata.asTypeOf(Vec(8, UInt(8.W))), wmask.asBools)
963      }
964    }
965  }
966
967  // Read vaddr for mem exception
968  io.exceptionAddr.vaddr  := exceptionBuffer.io.exceptionAddr.vaddr
969  io.exceptionAddr.gpaddr  := exceptionBuffer.io.exceptionAddr.gpaddr
970  io.exceptionAddr.vstart := exceptionBuffer.io.exceptionAddr.vstart
971  io.exceptionAddr.vl     := exceptionBuffer.io.exceptionAddr.vl
972
973  // vector commit or replay from
974  val vecCommittmp = Wire(Vec(StoreQueueSize, Vec(VecStorePipelineWidth, Bool())))
975  val vecCommit = Wire(Vec(StoreQueueSize, Bool()))
976  for (i <- 0 until StoreQueueSize) {
977    val fbk = io.vecFeedback
978    for (j <- 0 until VecStorePipelineWidth) {
979      vecCommittmp(i)(j) := fbk(j).valid && fbk(j).bits.isCommit && uop(i).robIdx === fbk(j).bits.robidx && uop(i).uopIdx === fbk(j).bits.uopidx && allocated(i)
980    }
981    vecCommit(i) := vecCommittmp(i).reduce(_ || _)
982
983    when (vecCommit(i)) {
984      vecMbCommit(i) := true.B
985    }
986  }
987
988  // misprediction recovery / exception redirect
989  // invalidate sq term using robIdx
990  for (i <- 0 until StoreQueueSize) {
991    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !committed(i)
992    when (needCancel(i)) {
993      allocated(i) := false.B
994    }
995  }
996
997 /**
998* update pointers
999**/
1000  val enqCancelValid = canEnqueue.zip(io.enq.req).map{case (v , x) =>
1001    v && x.bits.robIdx.needFlush(io.brqRedirect)
1002  }
1003  val enqCancelNum = enqCancelValid.zip(io.enq.req).map{case (v, req) =>
1004    Mux(v, req.bits.numLsElem, 0.U)
1005  }
1006  val lastEnqCancel = RegNext(enqCancelNum.reduce(_ + _)) // 1 cycle after redirect
1007
1008  val lastCycleCancelCount = PopCount(RegNext(needCancel)) // 1 cycle after redirect
1009  val lastCycleRedirect = RegNext(io.brqRedirect.valid) // 1 cycle after redirect
1010  val enqNumber = validVStoreFlow.reduce(_ + _)
1011
1012  val lastlastCycleRedirect=RegNext(lastCycleRedirect)// 2 cycle after redirect
1013  val redirectCancelCount = RegEnable(lastCycleCancelCount + lastEnqCancel, lastCycleRedirect) // 2 cycle after redirect
1014
1015  when (lastlastCycleRedirect) {
1016    // we recover the pointers in 2 cycle after redirect for better timing
1017    enqPtrExt := VecInit(enqPtrExt.map(_ - redirectCancelCount))
1018  }.otherwise {
1019    // lastCycleRedirect.valid or nornal case
1020    // when lastCycleRedirect.valid, enqNumber === 0.U, enqPtrExt will not change
1021    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
1022  }
1023  assert(!(lastCycleRedirect && enqNumber =/= 0.U))
1024
1025  deqPtrExt := deqPtrExtNext
1026  rdataPtrExt := rdataPtrExtNext
1027
1028  // val dequeueCount = Mux(io.sbuffer(1).fire, 2.U, Mux(io.sbuffer(0).fire || io.mmioStout.fire, 1.U, 0.U))
1029
1030  // If redirect at T0, sqCancelCnt is at T2
1031  io.sqCancelCnt := redirectCancelCount
1032  val ForceWriteUpper = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1033  ForceWriteUpper := Constantin.createRecord(s"ForceWriteUpper_${p(XSCoreParamsKey).HartId}", initValue = 60)
1034  val ForceWriteLower = Wire(UInt(log2Up(StoreQueueSize + 1).W))
1035  ForceWriteLower := Constantin.createRecord(s"ForceWriteLower_${p(XSCoreParamsKey).HartId}", initValue = 55)
1036
1037  val valid_cnt = PopCount(allocated)
1038  io.force_write := RegNext(Mux(valid_cnt >= ForceWriteUpper, true.B, valid_cnt >= ForceWriteLower && io.force_write), init = false.B)
1039
1040  // io.sqempty will be used by sbuffer
1041  // We delay it for 1 cycle for better timing
1042  // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty
1043  // for 1 cycle will also promise that sq is empty in that cycle
1044  io.sqEmpty := RegNext(
1045    enqPtrExt(0).value === deqPtrExt(0).value &&
1046    enqPtrExt(0).flag === deqPtrExt(0).flag
1047  )
1048  // perf counter
1049  QueuePerf(StoreQueueSize, validCount, !allowEnqueue)
1050  val vecValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => allocated(i) && isVec(i))))
1051  QueuePerf(StoreQueueSize, PopCount(vecValidVec), !allowEnqueue)
1052  io.sqFull := !allowEnqueue
1053  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
1054  XSPerfAccumulate("mmioCnt", io.uncache.req.fire)
1055  XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire)
1056  XSPerfAccumulate("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready))
1057  XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0)))
1058  XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0)))
1059  XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0)))
1060
1061  val perfValidCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
1062  val perfEvents = Seq(
1063    ("mmioCycle      ", uncacheState =/= s_idle),
1064    ("mmioCnt        ", io.uncache.req.fire),
1065    ("mmio_wb_success", io.mmioStout.fire || io.vecmmioStout.fire),
1066    ("mmio_wb_blocked", (io.mmioStout.valid && !io.mmioStout.ready) || (io.vecmmioStout.valid && !io.vecmmioStout.ready)),
1067    ("stq_1_4_valid  ", (perfValidCount < (StoreQueueSize.U/4.U))),
1068    ("stq_2_4_valid  ", (perfValidCount > (StoreQueueSize.U/4.U)) & (perfValidCount <= (StoreQueueSize.U/2.U))),
1069    ("stq_3_4_valid  ", (perfValidCount > (StoreQueueSize.U/2.U)) & (perfValidCount <= (StoreQueueSize.U*3.U/4.U))),
1070    ("stq_4_4_valid  ", (perfValidCount > (StoreQueueSize.U*3.U/4.U))),
1071  )
1072  generatePerfEvent()
1073
1074  // debug info
1075  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr)
1076
1077  def PrintFlag(flag: Bool, name: String): Unit = {
1078    when(flag) {
1079      XSDebug(false, true.B, name)
1080    }.otherwise {
1081      XSDebug(false, true.B, " ")
1082    }
1083  }
1084
1085  for (i <- 0 until StoreQueueSize) {
1086    XSDebug(i + ": pc %x va %x pa %x data %x ",
1087      uop(i).pc,
1088      debug_vaddr(i),
1089      debug_paddr(i),
1090      debug_data(i)
1091    )
1092    PrintFlag(allocated(i), "a")
1093    PrintFlag(allocated(i) && addrvalid(i), "a")
1094    PrintFlag(allocated(i) && datavalid(i), "d")
1095    PrintFlag(allocated(i) && committed(i), "c")
1096    PrintFlag(allocated(i) && pending(i), "p")
1097    PrintFlag(allocated(i) && mmio(i), "m")
1098    XSDebug(false, true.B, "\n")
1099  }
1100
1101}
1102