xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/StoreQueue.scala (revision a273862e37f1d43bee748f2a6353320a2f52f6f4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import xiangshan._
24import xiangshan.cache._
25import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants}
26import xiangshan.backend.rob.{RobLsqIO, RobPtr}
27import difftest._
28import device.RAMHelper
29
30class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr](
31  p => p(XSCoreParamsKey).StoreQueueSize
32){
33  override def cloneType = (new SqPtr).asInstanceOf[this.type]
34}
35
36object SqPtr {
37  def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = {
38    val ptr = Wire(new SqPtr)
39    ptr.flag := f
40    ptr.value := v
41    ptr
42  }
43}
44
45class SqEnqIO(implicit p: Parameters) extends XSBundle {
46  val canAccept = Output(Bool())
47  val lqCanAccept = Input(Bool())
48  val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool()))
49  val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp)))
50  val resp = Vec(exuParameters.LsExuCnt, Output(new SqPtr))
51}
52
53// Store Queue
54class StoreQueue(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
55  val io = IO(new Bundle() {
56    val enq = new SqEnqIO
57    val brqRedirect = Flipped(ValidIO(new Redirect))
58    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included
59    val storeInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception
60    val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreDataBundle))) // store data, send to sq from rs
61    val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReqWithVaddr)) // write commited store to sbuffer
62    val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store
63    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
64    val rob = Flipped(new RobLsqIO)
65    val uncache = new DCacheWordIO
66    // val refill = Flipped(Valid(new DCacheLineReq ))
67    val exceptionAddr = new ExceptionAddrIO
68    val sqempty = Output(Bool())
69    val issuePtrExt = Output(new SqPtr) // used to wake up delayed load/store
70    val sqFull = Output(Bool())
71  })
72
73  println("StoreQueue: size:" + StoreQueueSize)
74
75  // data modules
76  val uop = Reg(Vec(StoreQueueSize, new MicroOp))
77  // val data = Reg(Vec(StoreQueueSize, new LsqEntry))
78  val dataModule = Module(new SQDataModule(
79    numEntries = StoreQueueSize,
80    numRead = StorePipelineWidth,
81    numWrite = StorePipelineWidth,
82    numForward = StorePipelineWidth
83  ))
84  dataModule.io := DontCare
85  val paddrModule = Module(new SQAddrModule(
86    dataWidth = PAddrBits,
87    numEntries = StoreQueueSize,
88    numRead = StorePipelineWidth,
89    numWrite = StorePipelineWidth,
90    numForward = StorePipelineWidth
91  ))
92  paddrModule.io := DontCare
93  val vaddrModule = Module(new SQAddrModule(
94    dataWidth = VAddrBits,
95    numEntries = StoreQueueSize,
96    numRead = StorePipelineWidth + 1, // sbuffer 2 + badvaddr 1 (TODO)
97    numWrite = StorePipelineWidth,
98    numForward = StorePipelineWidth
99  ))
100  vaddrModule.io := DontCare
101  val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W)))
102  val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W)))
103  val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W)))
104
105  // state & misc
106  val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated
107  val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio addr is valid
108  val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid
109  val allvalid  = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) // non-mmio data & addr is valid
110  val commited = Reg(Vec(StoreQueueSize, Bool())) // inst has been commited by rob
111  val pending = Reg(Vec(StoreQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob
112  val mmio = Reg(Vec(StoreQueueSize, Bool())) // mmio: inst is an mmio inst
113
114  // ptr
115  val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr))))
116  val deqPtrExt = RegInit(VecInit((0 until StorePipelineWidth).map(_.U.asTypeOf(new SqPtr))))
117  val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr))))
118  val issuePtrExt = RegInit(0.U.asTypeOf(new SqPtr))
119  val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W))
120  val allowEnqueue = RegInit(true.B)
121
122  val enqPtr = enqPtrExt(0).value
123  val deqPtr = deqPtrExt(0).value
124  val cmtPtr = cmtPtrExt(0).value
125
126  val deqMask = UIntToMask(deqPtr, StoreQueueSize)
127  val enqMask = UIntToMask(enqPtr, StoreQueueSize)
128
129  val commitCount = RegNext(io.rob.scommit)
130
131  // Read dataModule
132  // deqPtrExtNext and deqPtrExtNext+1 entry will be read from dataModule
133  // if !sbuffer.fire(), read the same ptr
134  // if sbuffer.fire(), read next
135  val deqPtrExtNext = WireInit(Mux(io.sbuffer(1).fire(),
136    VecInit(deqPtrExt.map(_ + 2.U)),
137    Mux(io.sbuffer(0).fire() || io.mmioStout.fire(),
138      VecInit(deqPtrExt.map(_ + 1.U)),
139      deqPtrExt
140    )
141  ))
142  for (i <- 0 until StorePipelineWidth) {
143    dataModule.io.raddr(i) := deqPtrExtNext(i).value
144    paddrModule.io.raddr(i) := deqPtrExtNext(i).value
145    vaddrModule.io.raddr(i) := deqPtrExtNext(i).value
146  }
147
148  // no inst will be commited 1 cycle before tval update
149  vaddrModule.io.raddr(StorePipelineWidth) := (cmtPtrExt(0) + commitCount).value
150
151  /**
152    * Enqueue at dispatch
153    *
154    * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth
155    */
156  io.enq.canAccept := allowEnqueue
157  for (i <- 0 until io.enq.req.length) {
158    val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i))
159    val sqIdx = enqPtrExt(offset)
160    val index = sqIdx.value
161    when (io.enq.req(i).valid && io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid) {
162      uop(index) := io.enq.req(i).bits
163      allocated(index) := true.B
164      datavalid(index) := false.B
165      addrvalid(index) := false.B
166      commited(index) := false.B
167      pending(index) := false.B
168    }
169    io.enq.resp(i) := sqIdx
170  }
171  XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n")
172
173  /**
174    * Update issuePtr when issue from rs
175    */
176  // update issuePtr
177  val IssuePtrMoveStride = 4
178  require(IssuePtrMoveStride >= 2)
179
180  val issueLookupVec = (0 until IssuePtrMoveStride).map(issuePtrExt + _.U)
181  val issueLookup = issueLookupVec.map(ptr => allocated(ptr.value) && addrvalid(ptr.value) && datavalid(ptr.value) && ptr =/= enqPtrExt(0))
182  val nextIssuePtr = issuePtrExt + PriorityEncoder(VecInit(issueLookup.map(!_) :+ true.B))
183  issuePtrExt := nextIssuePtr
184
185  when (io.brqRedirect.valid) {
186    issuePtrExt := Mux(
187      isAfter(cmtPtrExt(0), deqPtrExt(0)),
188      cmtPtrExt(0),
189      deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr
190    )
191  }
192  // send issuePtrExt to rs
193  // io.issuePtrExt := cmtPtrExt(0)
194  io.issuePtrExt := issuePtrExt
195
196  /**
197    * Writeback store from store units
198    *
199    * Most store instructions writeback to regfile in the previous cycle.
200    * However,
201    *   (1) For an mmio instruction with exceptions, we need to mark it as addrvalid
202    * (in this way it will trigger an exception when it reaches ROB's head)
203    * instead of pending to avoid sending them to lower level.
204    *   (2) For an mmio instruction without exceptions, we mark it as pending.
205    * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel.
206    * Upon receiving the response, StoreQueue writes back the instruction
207    * through arbiter with store units. It will later commit as normal.
208    */
209
210  // Write addr to sq
211  for (i <- 0 until StorePipelineWidth) {
212    paddrModule.io.wen(i) := false.B
213    vaddrModule.io.wen(i) := false.B
214    dataModule.io.mask.wen(i) := false.B
215    val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value
216    when (io.storeIn(i).fire()) {
217      addrvalid(stWbIndex) := true.B//!io.storeIn(i).bits.mmio
218      // pending(stWbIndex) := io.storeIn(i).bits.mmio
219
220      dataModule.io.mask.waddr(i) := stWbIndex
221      dataModule.io.mask.wdata(i) := io.storeIn(i).bits.mask
222      dataModule.io.mask.wen(i) := true.B
223
224      paddrModule.io.waddr(i) := stWbIndex
225      paddrModule.io.wdata(i) := io.storeIn(i).bits.paddr
226      paddrModule.io.wlineflag(i) := io.storeIn(i).bits.wlineflag
227      paddrModule.io.wen(i) := true.B
228
229      vaddrModule.io.waddr(i) := stWbIndex
230      vaddrModule.io.wdata(i) := io.storeIn(i).bits.vaddr
231      vaddrModule.io.wlineflag(i) := io.storeIn(i).bits.wlineflag
232      vaddrModule.io.wen(i) := true.B
233
234      debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i)
235
236      // mmio(stWbIndex) := io.storeIn(i).bits.mmio
237
238      uop(stWbIndex).debugInfo := io.storeIn(i).bits.uop.debugInfo
239      XSInfo("store addr write to sq idx %d pc 0x%x vaddr %x paddr %x mmio %x\n",
240        io.storeIn(i).bits.uop.sqIdx.value,
241        io.storeIn(i).bits.uop.cf.pc,
242        io.storeIn(i).bits.vaddr,
243        io.storeIn(i).bits.paddr,
244        io.storeIn(i).bits.mmio
245      )
246    }
247
248    // re-replinish mmio, for pma/pmp will get mmio one cycle later
249    val storeInFireReg = RegNext(io.storeIn(i).fire())
250    val stWbIndexReg = RegNext(stWbIndex)
251    when (storeInFireReg) {
252      pending(stWbIndexReg) := io.storeInRe(i).mmio
253      mmio(stWbIndexReg) := io.storeInRe(i).mmio
254    }
255
256    when(vaddrModule.io.wen(i)){
257      debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i)
258    }
259  }
260
261  // Write data to sq
262  for (i <- 0 until StorePipelineWidth) {
263    dataModule.io.data.wen(i) := false.B
264    io.rob.storeDataRobWb(i).valid := false.B
265    io.rob.storeDataRobWb(i).bits := DontCare
266    val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value
267    when (io.storeDataIn(i).fire()) {
268      datavalid(stWbIndex) := true.B
269
270      dataModule.io.data.waddr(i) := stWbIndex
271      dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.ctrl.fuOpType === LSUOpType.cbo_zero,
272        0.U,
273        genWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.ctrl.fuOpType(1,0))
274      )
275      dataModule.io.data.wen(i) := true.B
276
277      debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i)
278
279      io.rob.storeDataRobWb(i).valid := true.B
280      io.rob.storeDataRobWb(i).bits := io.storeDataIn(i).bits.uop.robIdx
281
282      XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n",
283        io.storeDataIn(i).bits.uop.sqIdx.value,
284        io.storeDataIn(i).bits.uop.cf.pc,
285        io.storeDataIn(i).bits.data,
286        dataModule.io.data.wdata(i)
287      )
288    }
289  }
290
291  /**
292    * load forward query
293    *
294    * Check store queue for instructions that is older than the load.
295    * The response will be valid at the next cycle after req.
296    */
297  // check over all lq entries and forward data from the first matched store
298  for (i <- 0 until LoadPipelineWidth) {
299    // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases:
300    // (1) if they have the same flag, we need to check range(tail, sqIdx)
301    // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx)
302    // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
303    // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
304    // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
305    val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag
306    val forwardMask = io.forward(i).sqIdxMask
307    // all addrvalid terms need to be checked
308    val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && allocated(i))))
309    val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => datavalid(i))))
310    val allValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i) && allocated(i))))
311    val canForward1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask) & allValidVec.asUInt
312    val canForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & allValidVec.asUInt
313    val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask)
314
315    XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " +
316      p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n"
317    )
318
319    // do real fwd query (cam lookup in load_s1)
320    dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt
321    dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt
322
323    vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr
324    paddrModule.io.forwardMdata(i) := io.forward(i).paddr
325
326    // vaddr cam result does not equal to paddr cam result
327    // replay needed
328    // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U
329    // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid
330    val vpmaskNotEqual = ((RegNext(paddrModule.io.forwardMmask(i).asUInt) ^ RegNext(vaddrModule.io.forwardMmask(i).asUInt)) & RegNext(needForward)) =/= 0.U
331    val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid)
332    when (vaddrMatchFailed) {
333      XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n",
334        RegNext(io.forward(i).uop.cf.pc),
335        RegNext(needForward & paddrModule.io.forwardMmask(i).asUInt),
336        RegNext(needForward & vaddrModule.io.forwardMmask(i).asUInt)
337      );
338    }
339    XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual)
340    XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed)
341
342    // Fast forward mask will be generated immediately (load_s1)
343    io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i)
344
345    // Forward result will be generated 1 cycle later (load_s2)
346    io.forward(i).forwardMask := dataModule.io.forwardMask(i)
347    io.forward(i).forwardData := dataModule.io.forwardData(i)
348
349    // If addr match, data not ready, mark it as dataInvalid
350    // load_s1: generate dataInvalid in load_s1 to set fastUop
351    io.forward(i).dataInvalidFast := (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & needForward).orR
352    val dataInvalidSqIdxReg = RegNext(OHToUInt(addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & needForward))
353    // load_s2
354    io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast)
355
356    // load_s2
357    // check if vaddr forward mismatched
358    io.forward(i).matchInvalid := vaddrMatchFailed
359    io.forward(i).dataInvalidSqIdx := dataInvalidSqIdxReg
360  }
361
362  /**
363    * Memory mapped IO / other uncached operations
364    *
365    * States:
366    * (1) writeback from store units: mark as pending
367    * (2) when they reach ROB's head, they can be sent to uncache channel
368    * (3) response from uncache channel: mark as datavalidmask.wen
369    * (4) writeback to ROB (and other units): mark as writebacked
370    * (5) ROB commits the instruction: same as normal instructions
371    */
372  //(2) when they reach ROB's head, they can be sent to uncache channel
373  val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5)
374  val uncacheState = RegInit(s_idle)
375  switch(uncacheState) {
376    is(s_idle) {
377      when(io.rob.pendingst && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr)) {
378        uncacheState := s_req
379      }
380    }
381    is(s_req) {
382      when(io.uncache.req.fire()) {
383        uncacheState := s_resp
384      }
385    }
386    is(s_resp) {
387      when(io.uncache.resp.fire()) {
388        uncacheState := s_wb
389      }
390    }
391    is(s_wb) {
392      when (io.mmioStout.fire()) {
393        uncacheState := s_wait
394      }
395    }
396    is(s_wait) {
397      when(commitCount > 0.U) {
398        uncacheState := s_idle // ready for next mmio
399      }
400    }
401  }
402  io.uncache.req.valid := uncacheState === s_req
403
404  io.uncache.req.bits.cmd  := MemoryOpConstants.M_XWR
405  io.uncache.req.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0)
406  io.uncache.req.bits.data := dataModule.io.rdata(0).data
407  io.uncache.req.bits.mask := dataModule.io.rdata(0).mask
408
409  // CBO op type check can be delayed for 1 cycle,
410  // as uncache op will not start in s_idle
411  val cbo_mmio_addr = paddrModule.io.rdata(0) >> 2 << 2 // clear lowest 2 bits for op
412  val cbo_mmio_op = 0.U //TODO
413  val cbo_mmio_data = cbo_mmio_addr | cbo_mmio_op
414  when(RegNext(LSUOpType.isCbo(uop(deqPtr).ctrl.fuOpType))){
415    io.uncache.req.bits.addr := DontCare // TODO
416    io.uncache.req.bits.data := paddrModule.io.rdata(0)
417    io.uncache.req.bits.mask := DontCare // TODO
418  }
419
420  io.uncache.req.bits.id   := DontCare
421  io.uncache.req.bits.instrtype   := DontCare
422
423  when(io.uncache.req.fire()){
424    // mmio store should not be committed until uncache req is sent
425    pending(deqPtr) := false.B
426
427    XSDebug(
428      p"uncache req: pc ${Hexadecimal(uop(deqPtr).cf.pc)} " +
429      p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " +
430      p"data ${Hexadecimal(io.uncache.req.bits.data)} " +
431      p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " +
432      p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n"
433    )
434  }
435
436  // (3) response from uncache channel: mark as datavalid
437  io.uncache.resp.ready := true.B
438
439  // (4) writeback to ROB (and other units): mark as writebacked
440  io.mmioStout.valid := uncacheState === s_wb
441  io.mmioStout.bits.uop := uop(deqPtr)
442  io.mmioStout.bits.uop.sqIdx := deqPtrExt(0)
443  io.mmioStout.bits.data := dataModule.io.rdata(0).data // dataModule.io.rdata.read(deqPtr)
444  io.mmioStout.bits.redirectValid := false.B
445  io.mmioStout.bits.redirect := DontCare
446  io.mmioStout.bits.debug.isMMIO := true.B
447  io.mmioStout.bits.debug.paddr := DontCare
448  io.mmioStout.bits.debug.isPerfCnt := false.B
449  io.mmioStout.bits.fflags := DontCare
450  // Remove MMIO inst from store queue after MMIO request is being sent
451  // That inst will be traced by uncache state machine
452  when (io.mmioStout.fire()) {
453    allocated(deqPtr) := false.B
454  }
455
456  /**
457    * ROB commits store instructions (mark them as commited)
458    *
459    * (1) When store commits, mark it as commited.
460    * (2) They will not be cancelled and can be sent to lower level.
461    */
462  XSError(uncacheState =/= s_idle && uncacheState =/= s_wait && commitCount > 0.U,
463   "should not commit instruction when MMIO has not been finished\n")
464  for (i <- 0 until CommitWidth) {
465    when (commitCount > i.U) { // MMIO inst is not in progress
466      if(i == 0){
467        // MMIO inst should not update commited flag
468        // Note that commit count has been delayed for 1 cycle
469        when(uncacheState === s_idle){
470          commited(cmtPtrExt(0).value) := true.B
471        }
472      } else {
473        commited(cmtPtrExt(i).value) := true.B
474      }
475    }
476  }
477  cmtPtrExt := cmtPtrExt.map(_ + commitCount)
478
479  // Commited stores will not be cancelled and can be sent to lower level.
480  // remove retired insts from sq, add retired store to sbuffer
481  for (i <- 0 until StorePipelineWidth) {
482    // We use RegNext to prepare data for sbuffer
483    val ptr = deqPtrExt(i).value
484    // if !sbuffer.fire(), read the same ptr
485    // if sbuffer.fire(), read next
486    io.sbuffer(i).valid := allocated(ptr) && commited(ptr) && !mmio(ptr)
487    // Note that store data/addr should both be valid after store's commit
488    assert(!io.sbuffer(i).valid || allvalid(ptr))
489    // Write line request should have all 1 mask
490    assert(!(io.sbuffer(i).valid && io.sbuffer(i).bits.wline && !io.sbuffer(i).bits.mask.andR))
491    io.sbuffer(i).bits.cmd   := MemoryOpConstants.M_XWR
492    io.sbuffer(i).bits.addr  := paddrModule.io.rdata(i)
493    io.sbuffer(i).bits.vaddr := vaddrModule.io.rdata(i)
494    io.sbuffer(i).bits.data  := dataModule.io.rdata(i).data
495    io.sbuffer(i).bits.mask  := dataModule.io.rdata(i).mask
496    io.sbuffer(i).bits.wline := paddrModule.io.rlineflag(i)
497    io.sbuffer(i).bits.id    := DontCare
498    io.sbuffer(i).bits.instrtype    := DontCare
499
500    when (io.sbuffer(i).fire()) {
501      allocated(ptr) := false.B
502      XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr)
503    }
504  }
505  when (io.sbuffer(1).fire()) {
506    assert(io.sbuffer(0).fire())
507  }
508  if (coreParams.dcacheParametersOpt.isEmpty) {
509    for (i <- 0 until StorePipelineWidth) {
510      val ptr = deqPtrExt(i).value
511      val fakeRAM = Module(new RAMHelper(64L * 1024 * 1024 * 1024))
512      fakeRAM.clk   := clock
513      fakeRAM.en    := allocated(ptr) && commited(ptr) && !mmio(ptr)
514      fakeRAM.rIdx  := 0.U
515      fakeRAM.wIdx  := (paddrModule.io.rdata(i) - "h80000000".U) >> 3
516      fakeRAM.wdata := dataModule.io.rdata(i).data
517      fakeRAM.wmask := MaskExpand(dataModule.io.rdata(i).mask)
518      fakeRAM.wen   := allocated(ptr) && commited(ptr) && !mmio(ptr)
519    }
520  }
521
522  if (!env.FPGAPlatform) {
523    for (i <- 0 until StorePipelineWidth) {
524      val storeCommit = io.sbuffer(i).fire()
525      val waddr = SignExt(io.sbuffer(i).bits.addr, 64)
526      val wdata = io.sbuffer(i).bits.data & MaskExpand(io.sbuffer(i).bits.mask)
527      val wmask = io.sbuffer(i).bits.mask
528
529      val difftest = Module(new DifftestStoreEvent)
530      difftest.io.clock       := clock
531      difftest.io.coreid      := hardId.U
532      difftest.io.index       := i.U
533      difftest.io.valid       := storeCommit
534      difftest.io.storeAddr   := waddr
535      difftest.io.storeData   := wdata
536      difftest.io.storeMask   := wmask
537    }
538  }
539
540  // Read vaddr for mem exception
541  io.exceptionAddr.vaddr := vaddrModule.io.rdata(StorePipelineWidth)
542
543  // misprediction recovery / exception redirect
544  // invalidate sq term using robIdx
545  val needCancel = Wire(Vec(StoreQueueSize, Bool()))
546  for (i <- 0 until StoreQueueSize) {
547    needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i)
548    when (needCancel(i)) {
549        allocated(i) := false.B
550    }
551  }
552
553  /**
554    * update pointers
555    */
556  val lastCycleRedirect = RegNext(io.brqRedirect.valid)
557  val lastCycleCancelCount = PopCount(RegNext(needCancel))
558  // when io.brqRedirect.valid, we don't allow eneuque even though it may fire.
559  val enqNumber = Mux(io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U)
560  when (lastCycleRedirect) {
561    // we recover the pointers in the next cycle after redirect
562    enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount))
563  }.otherwise {
564    enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber))
565  }
566
567  deqPtrExt := deqPtrExtNext
568
569  val dequeueCount = Mux(io.sbuffer(1).fire(), 2.U, Mux(io.sbuffer(0).fire() || io.mmioStout.fire(), 1.U, 0.U))
570  val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0))
571
572  allowEnqueue := validCount + enqNumber <= (StoreQueueSize - io.enq.req.length).U
573
574  // io.sqempty will be used by sbuffer
575  // We delay it for 1 cycle for better timing
576  // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty
577  // for 1 cycle will also promise that sq is empty in that cycle
578  io.sqempty := RegNext(enqPtrExt(0).value === deqPtrExt(0).value && enqPtrExt(0).flag === deqPtrExt(0).flag)
579
580  // perf counter
581  QueuePerf(StoreQueueSize, validCount, !allowEnqueue)
582  io.sqFull := !allowEnqueue
583  XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req
584  XSPerfAccumulate("mmioCnt", io.uncache.req.fire())
585  XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire())
586  XSPerfAccumulate("mmio_wb_blocked", io.mmioStout.valid && !io.mmioStout.ready)
587  XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0)))
588  XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0)))
589  XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0)))
590
591  val perfinfo = IO(new Bundle(){
592    val perfEvents = Output(new PerfEventsBundle(8))
593  })
594  val perfEvents = Seq(
595    ("mmioCycle         ", uncacheState =/= s_idle                                                                                                                             ),
596    ("mmioCnt           ", io.uncache.req.fire()                                                                                                                               ),
597    ("mmio_wb_success   ", io.mmioStout.fire()                                                                                                                                 ),
598    ("mmio_wb_blocked   ", io.mmioStout.valid && !io.mmioStout.ready                                                                                                           ),
599    ("stq_1/4_valid     ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) < (StoreQueueSize.U/4.U))                                                                              ),
600    ("stq_2/4_valid     ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U/4.U)) & (distanceBetween(enqPtrExt(0), deqPtrExt(0)) <= (StoreQueueSize.U/2.U))    ),
601    ("stq_3/4_valid     ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U/2.U)) & (distanceBetween(enqPtrExt(0), deqPtrExt(0)) <= (StoreQueueSize.U*3.U/4.U))),
602    ("stq_4/4_valid     ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U*3.U/4.U))                                                                          ),
603  )
604
605  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
606    perf_out.incr_step := RegNext(perf)
607  }
608  // debug info
609  XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr)
610
611  def PrintFlag(flag: Bool, name: String): Unit = {
612    when(flag) {
613      XSDebug(false, true.B, name)
614    }.otherwise {
615      XSDebug(false, true.B, " ")
616    }
617  }
618
619  for (i <- 0 until StoreQueueSize) {
620    XSDebug(i + ": pc %x va %x pa %x data %x ",
621      uop(i).cf.pc,
622      debug_vaddr(i),
623      debug_paddr(i),
624      debug_data(i)
625    )
626    PrintFlag(allocated(i), "a")
627    PrintFlag(allocated(i) && addrvalid(i), "a")
628    PrintFlag(allocated(i) && datavalid(i), "d")
629    PrintFlag(allocated(i) && commited(i), "c")
630    PrintFlag(allocated(i) && pending(i), "p")
631    PrintFlag(allocated(i) && mmio(i), "m")
632    XSDebug(false, true.B, "\n")
633  }
634
635}
636