xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LSQWrapper.scala (revision 3088616cbf0793407bb68460b2db89b7de80c12a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.mem
18
19import org.chipsalliance.cde.config.Parameters
20import chisel3._
21import chisel3.util._
22import utils._
23import utility._
24import xiangshan._
25import xiangshan.backend.Bundles.{DynInst, MemExuOutput}
26import xiangshan.cache._
27import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants}
28import xiangshan.cache.mmu.{TlbRequestIO, TlbHintIO}
29import xiangshan.mem._
30import xiangshan.backend._
31import xiangshan.backend.rob.RobLsqIO
32import coupledL2.{CMOReq, CMOResp}
33
34class ExceptionAddrIO(implicit p: Parameters) extends XSBundle {
35  val isStore = Input(Bool())
36  val vaddr = Output(UInt(VAddrBits.W))
37  val vstart = Output(UInt((log2Up(VLEN) + 1).W))
38  val vl = Output(UInt((log2Up(VLEN) + 1).W))
39  val gpaddr = Output(UInt(GPAddrBits.W))
40}
41
42class FwdEntry extends Bundle {
43  val validFast = Bool() // validFast is generated the same cycle with query
44  val valid = Bool() // valid is generated 1 cycle after query request
45  val data = UInt(8.W) // data is generated 1 cycle after query request
46}
47
48// inflight miss block reqs
49class InflightBlockInfo(implicit p: Parameters) extends XSBundle {
50  val block_addr = UInt(PAddrBits.W)
51  val valid = Bool()
52}
53
54class LsqEnqIO(implicit p: Parameters) extends MemBlockBundle {
55  val canAccept = Output(Bool())
56  val needAlloc = Vec(LSQEnqWidth, Input(UInt(2.W)))
57  val req       = Vec(LSQEnqWidth, Flipped(ValidIO(new DynInst)))
58  val resp      = Vec(LSQEnqWidth, Output(new LSIdx))
59}
60
61// Load / Store Queue Wrapper for XiangShan Out of Order LSU
62class LsqWrapper(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasPerfEvents {
63  val io = IO(new Bundle() {
64    val hartId = Input(UInt(hartIdLen.W))
65    val brqRedirect = Flipped(ValidIO(new Redirect))
66    val stvecFeedback = Vec(VecStorePipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
67    val ldvecFeedback = Vec(VecLoadPipelineWidth, Flipped(ValidIO(new FeedbackToLsqIO)))
68    val enq = new LsqEnqIO
69    val ldu = new Bundle() {
70        val stld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
71        val ldld_nuke_query = Vec(LoadPipelineWidth, Flipped(new LoadNukeQueryIO)) // from load_s2
72        val ldin = Vec(LoadPipelineWidth, Flipped(Decoupled(new LqWriteBundle))) // from load_s3
73    }
74    val sta = new Bundle() {
75      val storeMaskIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreMaskBundle))) // from store_s0, store mask, send to sq from rs
76      val storeAddrIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // from store_s1
77      val storeAddrInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // from store_s2
78    }
79    val std = new Bundle() {
80      val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new MemExuOutput(isVector = true)))) // from store_s0, store data, send to sq from rs
81    }
82    val ldout = Vec(LoadPipelineWidth, DecoupledIO(new MemExuOutput))
83    val ld_raw_data = Vec(LoadPipelineWidth, Output(new LoadDataFromLQBundle))
84    val replay = Vec(LoadPipelineWidth, Decoupled(new LsPipelineBundle))
85    val sbuffer = Vec(EnsbufferWidth, Decoupled(new DCacheWordReqWithVaddrAndPfFlag))
86    val sbufferVecDifftestInfo = Vec(EnsbufferWidth, Decoupled(new DynInst)) // The vector store difftest needs is
87    val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO))
88    val rob = Flipped(new RobLsqIO)
89    val nuke_rollback = Vec(StorePipelineWidth, Output(Valid(new Redirect)))
90    val nack_rollback = Output(Valid(new Redirect))
91    val release = Flipped(Valid(new Release))
92   // val refill = Flipped(Valid(new Refill))
93    val tl_d_channel  = Input(new DcacheToLduForwardIO)
94    val maControl     = Flipped(new StoreMaBufToSqControlIO)
95    val uncacheOutstanding = Input(Bool())
96    val uncache = new UncacheWordIO
97    val mmioStout = DecoupledIO(new MemExuOutput) // writeback uncached store
98    // TODO: implement vector store
99    val vecmmioStout = DecoupledIO(new MemExuOutput(isVector = true)) // vec writeback uncached store
100    val sqEmpty = Output(Bool())
101    val lq_rep_full = Output(Bool())
102    val sqFull = Output(Bool())
103    val lqFull = Output(Bool())
104    val sqCancelCnt = Output(UInt(log2Up(StoreQueueSize+1).W))
105    val lqCancelCnt = Output(UInt(log2Up(VirtualLoadQueueSize+1).W))
106    val lqDeq = Output(UInt(log2Up(CommitWidth + 1).W))
107    val sqDeq = Output(UInt(log2Ceil(EnsbufferWidth + 1).W))
108    val lqCanAccept = Output(Bool())
109    val sqCanAccept = Output(Bool())
110    val lqDeqPtr = Output(new LqPtr)
111    val sqDeqPtr = Output(new SqPtr)
112    val exceptionAddr = new ExceptionAddrIO
113    val flushFrmMaBuf = Input(Bool())
114    val issuePtrExt = Output(new SqPtr)
115    val l2_hint = Input(Valid(new L2ToL1Hint()))
116    val tlb_hint = Flipped(new TlbHintIO)
117    val cmoOpReq  = DecoupledIO(new CMOReq)
118    val cmoOpResp = Flipped(DecoupledIO(new CMOResp))
119    val flushSbuffer = new SbufferFlushBundle
120    val force_write = Output(Bool())
121    val lqEmpty = Output(Bool())
122
123    // top-down
124    val debugTopDown = new LoadQueueTopDownIO
125  })
126
127  val loadQueue = Module(new LoadQueue)
128  val storeQueue = Module(new StoreQueue)
129
130  storeQueue.io.hartId := io.hartId
131  storeQueue.io.uncacheOutstanding := io.uncacheOutstanding
132
133
134  dontTouch(loadQueue.io.tlbReplayDelayCycleCtrl)
135  // Todo: imm
136  val tlbReplayDelayCycleCtrl = WireInit(VecInit(Seq(14.U(ReSelectLen.W), 0.U(ReSelectLen.W), 125.U(ReSelectLen.W), 0.U(ReSelectLen.W))))
137  loadQueue.io.tlbReplayDelayCycleCtrl := tlbReplayDelayCycleCtrl
138
139  // io.enq logic
140  // LSQ: send out canAccept when both load queue and store queue are ready
141  // Dispatch: send instructions to LSQ only when they are ready
142  io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept
143  io.lqCanAccept := loadQueue.io.enq.canAccept
144  io.sqCanAccept := storeQueue.io.enq.canAccept
145  loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept
146  storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept
147  io.lqDeqPtr := loadQueue.io.lqDeqPtr
148  io.sqDeqPtr := storeQueue.io.sqDeqPtr
149  for (i <- io.enq.req.indices) {
150    loadQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(0)
151    loadQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(0) && io.enq.req(i).valid
152    loadQueue.io.enq.req(i).bits       := io.enq.req(i).bits
153    loadQueue.io.enq.req(i).bits.sqIdx := storeQueue.io.enq.resp(i)
154
155    storeQueue.io.enq.needAlloc(i)      := io.enq.needAlloc(i)(1)
156    storeQueue.io.enq.req(i).valid      := io.enq.needAlloc(i)(1) && io.enq.req(i).valid
157    storeQueue.io.enq.req(i).bits       := io.enq.req(i).bits
158    storeQueue.io.enq.req(i).bits.lqIdx := loadQueue.io.enq.resp(i)
159
160    io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i)
161    io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i)
162  }
163
164  // store queue wiring
165  storeQueue.io.brqRedirect <> io.brqRedirect
166  storeQueue.io.vecFeedback   <> io.stvecFeedback
167  storeQueue.io.storeAddrIn <> io.sta.storeAddrIn // from store_s1
168  storeQueue.io.storeAddrInRe <> io.sta.storeAddrInRe // from store_s2
169  storeQueue.io.storeDataIn <> io.std.storeDataIn // from store_s0
170  storeQueue.io.storeMaskIn <> io.sta.storeMaskIn // from store_s0
171  storeQueue.io.sbuffer     <> io.sbuffer
172  storeQueue.io.sbufferVecDifftestInfo <> io.sbufferVecDifftestInfo
173  storeQueue.io.mmioStout   <> io.mmioStout
174  storeQueue.io.vecmmioStout <> io.vecmmioStout
175  storeQueue.io.rob         <> io.rob
176  storeQueue.io.exceptionAddr.isStore := DontCare
177  storeQueue.io.sqCancelCnt  <> io.sqCancelCnt
178  storeQueue.io.sqDeq        <> io.sqDeq
179  storeQueue.io.sqEmpty      <> io.sqEmpty
180  storeQueue.io.sqFull       <> io.sqFull
181  storeQueue.io.forward      <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE
182  storeQueue.io.force_write  <> io.force_write
183  storeQueue.io.cmoOpReq     <> io.cmoOpReq
184  storeQueue.io.cmoOpResp    <> io.cmoOpResp
185  storeQueue.io.flushSbuffer <> io.flushSbuffer
186  storeQueue.io.maControl    <> io.maControl
187
188  /* <------- DANGEROUS: Don't change sequence here ! -------> */
189
190  //  load queue wiring
191  loadQueue.io.redirect            <> io.brqRedirect
192  loadQueue.io.vecFeedback           <> io.ldvecFeedback
193  loadQueue.io.ldu                 <> io.ldu
194  loadQueue.io.ldout               <> io.ldout
195  loadQueue.io.ld_raw_data         <> io.ld_raw_data
196  loadQueue.io.rob                 <> io.rob
197  loadQueue.io.nuke_rollback       <> io.nuke_rollback
198  loadQueue.io.nack_rollback       <> io.nack_rollback
199  loadQueue.io.replay              <> io.replay
200 // loadQueue.io.refill              <> io.refill
201  loadQueue.io.tl_d_channel        <> io.tl_d_channel
202  loadQueue.io.release             <> io.release
203  loadQueue.io.exceptionAddr.isStore := DontCare
204  loadQueue.io.flushFrmMaBuf       := io.flushFrmMaBuf
205  loadQueue.io.lqCancelCnt         <> io.lqCancelCnt
206  loadQueue.io.sq.stAddrReadySqPtr <> storeQueue.io.stAddrReadySqPtr
207  loadQueue.io.sq.stAddrReadyVec   <> storeQueue.io.stAddrReadyVec
208  loadQueue.io.sq.stDataReadySqPtr <> storeQueue.io.stDataReadySqPtr
209  loadQueue.io.sq.stDataReadyVec   <> storeQueue.io.stDataReadyVec
210  loadQueue.io.sq.stIssuePtr       <> storeQueue.io.stIssuePtr
211  loadQueue.io.sq.sqEmpty          <> storeQueue.io.sqEmpty
212  loadQueue.io.sta.storeAddrIn     <> io.sta.storeAddrIn // store_s1
213  loadQueue.io.std.storeDataIn     <> io.std.storeDataIn // store_s0
214  loadQueue.io.lqFull              <> io.lqFull
215  loadQueue.io.lq_rep_full         <> io.lq_rep_full
216  loadQueue.io.lqDeq               <> io.lqDeq
217  loadQueue.io.l2_hint             <> io.l2_hint
218  loadQueue.io.tlb_hint            <> io.tlb_hint
219  loadQueue.io.lqEmpty             <> io.lqEmpty
220
221  // rob commits for lsq is delayed for two cycles, which causes the delayed update for deqPtr in lq/sq
222  // s0: commit
223  // s1:               exception find
224  // s2:               exception triggered
225  // s3: ptr updated & new address
226  // address will be used at the next cycle after exception is triggered
227  io.exceptionAddr.vaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr)
228  io.exceptionAddr.vstart := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vstart, loadQueue.io.exceptionAddr.vstart)
229  io.exceptionAddr.vl     := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.vl, loadQueue.io.exceptionAddr.vl)
230  io.exceptionAddr.gpaddr := Mux(RegNext(io.exceptionAddr.isStore), storeQueue.io.exceptionAddr.gpaddr, loadQueue.io.exceptionAddr.gpaddr)
231  io.issuePtrExt := storeQueue.io.stAddrReadySqPtr
232
233  // naive uncache arbiter
234  val s_idle :: s_load :: s_store :: Nil = Enum(3)
235  val pendingstate = RegInit(s_idle)
236
237  switch(pendingstate){
238    is(s_idle){
239      when(io.uncache.req.fire){
240        pendingstate := Mux(loadQueue.io.uncache.req.valid, s_load,
241                          Mux(io.uncacheOutstanding, s_idle, s_store))
242      }
243    }
244    is(s_load){
245      when(io.uncache.resp.fire){
246        pendingstate := s_idle
247      }
248    }
249    is(s_store){
250      when(io.uncache.resp.fire){
251        pendingstate := s_idle
252      }
253    }
254  }
255
256  loadQueue.io.uncache := DontCare
257  storeQueue.io.uncache := DontCare
258  loadQueue.io.uncache.req.ready := false.B
259  storeQueue.io.uncache.req.ready := false.B
260  loadQueue.io.uncache.resp.valid := false.B
261  storeQueue.io.uncache.resp.valid := false.B
262  when(loadQueue.io.uncache.req.valid){
263    io.uncache.req <> loadQueue.io.uncache.req
264  }.otherwise{
265    io.uncache.req <> storeQueue.io.uncache.req
266  }
267  when (io.uncacheOutstanding) {
268    io.uncache.resp <> loadQueue.io.uncache.resp
269  } .otherwise {
270    when(pendingstate === s_load){
271      io.uncache.resp <> loadQueue.io.uncache.resp
272    }.otherwise{
273      io.uncache.resp <> storeQueue.io.uncache.resp
274    }
275  }
276
277  loadQueue.io.debugTopDown <> io.debugTopDown
278
279  assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid))
280  assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid))
281  when (!io.uncacheOutstanding) {
282    assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && pendingstate === s_idle))
283  }
284
285
286  val perfEvents = Seq(loadQueue, storeQueue).flatMap(_.getPerfEvents)
287  generatePerfEvent()
288}
289
290class LsqEnqCtrl(implicit p: Parameters) extends XSModule
291  with HasVLSUParameters  {
292  val io = IO(new Bundle {
293    val redirect = Flipped(ValidIO(new Redirect))
294    // to dispatch
295    val enq = new LsqEnqIO
296    // from `memBlock.io.lqDeq
297    val lcommit = Input(UInt(log2Up(CommitWidth + 1).W))
298    // from `memBlock.io.sqDeq`
299    val scommit = Input(UInt(log2Ceil(EnsbufferWidth + 1).W))
300    // from/tp lsq
301    val lqCancelCnt = Input(UInt(log2Up(VirtualLoadQueueSize + 1).W))
302    val sqCancelCnt = Input(UInt(log2Up(StoreQueueSize + 1).W))
303    val lqFreeCount = Output(UInt(log2Up(VirtualLoadQueueSize + 1).W))
304    val sqFreeCount = Output(UInt(log2Up(StoreQueueSize + 1).W))
305    val enqLsq = Flipped(new LsqEnqIO)
306  })
307
308  val lqPtr = RegInit(0.U.asTypeOf(new LqPtr))
309  val sqPtr = RegInit(0.U.asTypeOf(new SqPtr))
310  val lqCounter = RegInit(VirtualLoadQueueSize.U(log2Up(VirtualLoadQueueSize + 1).W))
311  val sqCounter = RegInit(StoreQueueSize.U(log2Up(StoreQueueSize + 1).W))
312  val canAccept = RegInit(false.B)
313
314  val loadEnqVec = io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(0))
315  val storeEnqVec = io.enq.req.zip(io.enq.needAlloc).map(x => x._1.valid && x._2(1))
316  val isLastUopVec = io.enq.req.map(_.bits.lastUop)
317  val vLoadFlow = io.enq.req.map(_.bits.numLsElem)
318  val vStoreFlow = io.enq.req.map(_.bits.numLsElem)
319  val validVLoadFlow = vLoadFlow.zipWithIndex.map{case (vLoadFlowNumItem, index) => Mux(loadEnqVec(index), vLoadFlowNumItem, 0.U)}
320  val validVStoreFlow = vStoreFlow.zipWithIndex.map{case (vStoreFlowNumItem, index) => Mux(storeEnqVec(index), vStoreFlowNumItem, 0.U)}
321  val enqVLoadOffsetNumber = validVLoadFlow.reduce(_ + _)
322  val enqVStoreOffsetNumber = validVStoreFlow.reduce(_ + _)
323  val validVLoadOffset = 0.U +: vLoadFlow.zip(io.enq.needAlloc)
324                                .map{case (flow, needAllocItem) => Mux(needAllocItem(0).asBool, flow, 0.U)}
325                                .slice(0, validVLoadFlow.length - 1)
326  val validVStoreOffset = 0.U +: vStoreFlow.zip(io.enq.needAlloc)
327                                .map{case (flow, needAllocItem) => Mux(needAllocItem(1).asBool, flow, 0.U)}
328                                .slice(0, validVStoreFlow.length - 1)
329  val lqAllocNumber = enqVLoadOffsetNumber
330  val sqAllocNumber = enqVStoreOffsetNumber
331
332  io.lqFreeCount  := lqCounter
333  io.sqFreeCount  := sqCounter
334  // How to update ptr and counter:
335  // (1) by default, updated according to enq/commit
336  // (2) when redirect and dispatch queue is empty, update according to lsq
337  val t1_redirect = RegNext(io.redirect.valid)
338  val t2_redirect = RegNext(t1_redirect)
339  val t2_update = t2_redirect && !VecInit(io.enq.needAlloc.map(_.orR)).asUInt.orR
340  val t3_update = RegNext(t2_update)
341  val t3_lqCancelCnt = GatedRegNext(io.lqCancelCnt)
342  val t3_sqCancelCnt = GatedRegNext(io.sqCancelCnt)
343  when (t3_update) {
344    lqPtr := lqPtr - t3_lqCancelCnt
345    lqCounter := lqCounter + io.lcommit + t3_lqCancelCnt
346    sqPtr := sqPtr - t3_sqCancelCnt
347    sqCounter := sqCounter + io.scommit + t3_sqCancelCnt
348  }.elsewhen (!io.redirect.valid && io.enq.canAccept) {
349    lqPtr := lqPtr + lqAllocNumber
350    lqCounter := lqCounter + io.lcommit - lqAllocNumber
351    sqPtr := sqPtr + sqAllocNumber
352    sqCounter := sqCounter + io.scommit - sqAllocNumber
353  }.otherwise {
354    lqCounter := lqCounter + io.lcommit
355    sqCounter := sqCounter + io.scommit
356  }
357
358
359  //TODO MaxAllocate and width of lqOffset/sqOffset needs to be discussed
360  val lqMaxAllocate = LSQLdEnqWidth
361  val sqMaxAllocate = LSQStEnqWidth
362  val maxAllocate = lqMaxAllocate max sqMaxAllocate
363  val ldCanAccept = lqCounter >= lqAllocNumber +& lqMaxAllocate.U
364  val sqCanAccept = sqCounter >= sqAllocNumber +& sqMaxAllocate.U
365  // It is possible that t3_update and enq are true at the same clock cycle.
366  // For example, if redirect.valid lasts more than one clock cycle,
367  // after the last redirect, new instructions may enter but previously redirect has not been resolved (updated according to the cancel count from LSQ).
368  // To solve the issue easily, we block enqueue when t3_update, which is RegNext(t2_update).
369  io.enq.canAccept := RegNext(ldCanAccept && sqCanAccept && !t2_update)
370  val lqOffset = Wire(Vec(io.enq.resp.length, UInt(lqPtr.value.getWidth.W)))
371  val sqOffset = Wire(Vec(io.enq.resp.length, UInt(sqPtr.value.getWidth.W)))
372  for ((resp, i) <- io.enq.resp.zipWithIndex) {
373    lqOffset(i) := validVLoadOffset.take(i + 1).reduce(_ + _)
374    resp.lqIdx := lqPtr + lqOffset(i)
375    sqOffset(i) := validVStoreOffset.take(i + 1).reduce(_ + _)
376    resp.sqIdx := sqPtr + sqOffset(i)
377  }
378
379  io.enqLsq.needAlloc := RegNext(io.enq.needAlloc)
380  io.enqLsq.req.zip(io.enq.req).zip(io.enq.resp).foreach{ case ((toLsq, enq), resp) =>
381    val do_enq = enq.valid && !io.redirect.valid && io.enq.canAccept
382    toLsq.valid := RegNext(do_enq)
383    toLsq.bits := RegEnable(enq.bits, do_enq)
384    toLsq.bits.lqIdx := RegEnable(resp.lqIdx, do_enq)
385    toLsq.bits.sqIdx := RegEnable(resp.sqIdx, do_enq)
386  }
387
388}