xref: /XiangShan/src/main/scala/xiangshan/mem/lsqueue/LSQWrapper.scala (revision 49272fa467f97c3293eb9ed685e99ecf79691182)
1package xiangshan.mem
2
3import chisel3._
4import chisel3.util._
5import utils._
6import xiangshan._
7import xiangshan.cache._
8import xiangshan.cache.{DCacheWordIO, DCacheLineIO, TlbRequestIO, MemoryOpConstants}
9import xiangshan.backend.LSUOpType
10import xiangshan.mem._
11import xiangshan.backend.roq.RoqPtr
12
13class ExceptionAddrIO extends XSBundle {
14  val lsIdx = Input(new LSIdx)
15  val isStore = Input(Bool())
16  val vaddr = Output(UInt(VAddrBits.W))
17}
18
19
20class LsqEntry extends XSBundle {
21  val vaddr = UInt(VAddrBits.W) // TODO: need opt
22  val paddr = UInt(PAddrBits.W)
23  val mask = UInt(8.W)
24  val data = UInt(XLEN.W)
25  val exception = UInt(16.W) // TODO: opt size
26  val mmio = Bool()
27  val fwdMask = Vec(8, Bool())
28  val fwdData = Vec(8, UInt(8.W))
29}
30
31class FwdEntry extends XSBundle {
32  val mask = Vec(8, Bool())
33  val data = Vec(8, UInt(8.W))
34}
35
36
37class LSQueueData(size: Int, nchannel: Int) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper {
38  val io = IO(new Bundle() {
39    val wb = Vec(nchannel, new Bundle() {
40      val wen = Input(Bool())
41      val index = Input(UInt(log2Up(size).W))
42      val wdata = Input(new LsqEntry)
43    })
44    val uncache = new Bundle() {
45      val wen = Input(Bool())
46      val index = Input(UInt(log2Up(size).W))
47      val wdata = Input(UInt(XLEN.W))
48    }
49    val refill = new Bundle() {
50      val wen = Input(Vec(size, Bool()))
51      val dcache = Input(new DCacheLineResp)
52    }
53    val needForward = Input(Vec(nchannel, Vec(2, UInt(size.W))))
54    val forward = Vec(nchannel, Flipped(new LoadForwardQueryIO))
55    val rdata = Output(Vec(size, new LsqEntry))
56
57    // val debug = new Bundle() {
58    //   val debug_data = Vec(LoadQueueSize, new LsqEntry)
59    // }
60
61    def wbWrite(channel: Int, index: UInt, wdata: LsqEntry): Unit = {
62      require(channel < nchannel && channel >= 0)
63      // need extra "this.wb(channel).wen := true.B"
64      this.wb(channel).index := index
65      this.wb(channel).wdata := wdata
66    }
67
68    def uncacheWrite(index: UInt, wdata: UInt): Unit = {
69      // need extra "this.uncache.wen := true.B"
70      this.uncache.index := index
71      this.uncache.wdata := wdata
72    }
73
74    def forwardQuery(channel: Int, paddr: UInt, needForward1: Data, needForward2: Data): Unit = {
75      this.needForward(channel)(0) := needForward1
76      this.needForward(channel)(1) := needForward2
77      this.forward(channel).paddr := paddr
78    }
79
80    // def refillWrite(ldIdx: Int): Unit = {
81    // }
82    // use "this.refill.wen(ldIdx) := true.B" instead
83  })
84
85  io := DontCare
86
87  val data = Reg(Vec(size, new LsqEntry))
88
89  // writeback to lq/sq
90  (0 until 2).map(i => {
91    when(io.wb(i).wen){
92      data(io.wb(i).index) := io.wb(i).wdata
93    }
94  })
95
96  when(io.uncache.wen){
97    data(io.uncache.index).data := io.uncache.wdata
98  }
99
100  // refill missed load
101  def mergeRefillData(refill: UInt, fwd: UInt, fwdMask: UInt): UInt = {
102    val res = Wire(Vec(8, UInt(8.W)))
103    (0 until 8).foreach(i => {
104      res(i) := Mux(fwdMask(i), fwd(8 * (i + 1) - 1, 8 * i), refill(8 * (i + 1) - 1, 8 * i))
105    })
106    res.asUInt
107  }
108
109  // split dcache result into words
110  val words = VecInit((0 until blockWords) map { i =>
111    io.refill.dcache.data(DataBits * (i + 1) - 1, DataBits * i)
112  })
113
114
115  (0 until size).map(i => {
116    when(io.refill.wen(i) ){
117      val refillData = words(get_word(data(i).paddr))
118      data(i).data := mergeRefillData(refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
119      XSDebug("miss resp: pos %d addr %x data %x + %x(%b)\n", i.U, data(i).paddr, refillData, data(i).fwdData.asUInt, data(i).fwdMask.asUInt)
120    }
121  })
122
123  // forwarding
124  // Compare ringBufferTail (deqPtr) and forward.sqIdx, we have two cases:
125  // (1) if they have the same flag, we need to check range(tail, sqIdx)
126  // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx)
127  // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize))
128  // Forward2: Mux(same_flag, 0.U,                   range(0, sqIdx)    )
129  // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise
130
131  // entry with larger index should have higher priority since it's data is younger
132
133  // FIXME: old fwd logic for assertion, remove when rtl freeze
134  (0 until nchannel).map(i => {
135
136    val forwardMask1 = WireInit(VecInit(Seq.fill(8)(false.B)))
137    val forwardData1 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
138    val forwardMask2 = WireInit(VecInit(Seq.fill(8)(false.B)))
139    val forwardData2 = WireInit(VecInit(Seq.fill(8)(0.U(8.W))))
140
141    for (j <- 0 until size) {
142      val needCheck = io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
143      (0 until XLEN / 8).foreach(k => {
144        when (needCheck && data(j).mask(k)) {
145          when (io.needForward(i)(0)(j)) {
146            forwardMask1(k) := true.B
147            forwardData1(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
148          }
149          when (io.needForward(i)(1)(j)) {
150            forwardMask2(k) := true.B
151            forwardData2(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
152          }
153          XSDebug(io.needForward(i)(0)(j) || io.needForward(i)(1)(j),
154            p"forwarding $k-th byte ${Hexadecimal(data(j).data(8 * (k + 1) - 1, 8 * k))} " +
155            p"from ptr $j\n")
156        }
157      })
158    }
159
160    // merge forward lookup results
161    // forward2 is younger than forward1 and should have higher priority
162    val oldFwdResult = Wire(new FwdEntry)
163    (0 until XLEN / 8).map(k => {
164      oldFwdResult.mask(k) := RegNext(forwardMask1(k) || forwardMask2(k))
165      oldFwdResult.data(k) := RegNext(Mux(forwardMask2(k), forwardData2(k), forwardData1(k)))
166    })
167
168    // parallel fwd logic
169    val paddrMatch = Wire(Vec(size, Bool()))
170    val matchResultVec = Wire(Vec(size * 2, new FwdEntry))
171
172    def parallelFwd(xs: Seq[Data]): Data = {
173      ParallelOperation(xs, (a: Data, b: Data) => {
174        val l = a.asTypeOf(new FwdEntry)
175        val r = b.asTypeOf(new FwdEntry)
176        val res = Wire(new FwdEntry)
177        (0 until 8).map(p => {
178          res.mask(p) := l.mask(p) || r.mask(p)
179          res.data(p) := Mux(r.mask(p), r.data(p), l.data(p))
180        })
181        res
182      })
183    }
184
185    for (j <- 0 until size) {
186      paddrMatch(j) := io.forward(i).paddr(PAddrBits - 1, 3) === data(j).paddr(PAddrBits - 1, 3)
187    }
188
189    for (j <- 0 until size) {
190      val needCheck0 = RegNext(paddrMatch(j) && io.needForward(i)(0)(j))
191      val needCheck1 = RegNext(paddrMatch(j) && io.needForward(i)(1)(j))
192      (0 until XLEN / 8).foreach(k => {
193        matchResultVec(j).mask(k) := needCheck0 && data(j).mask(k)
194        matchResultVec(j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
195        matchResultVec(size + j).mask(k) := needCheck1 && data(j).mask(k)
196        matchResultVec(size + j).data(k) := data(j).data(8 * (k + 1) - 1, 8 * k)
197      })
198    }
199
200    val parallelFwdResult = parallelFwd(matchResultVec).asTypeOf(new FwdEntry)
201
202    io.forward(i).forwardMask := parallelFwdResult.mask
203    io.forward(i).forwardData := parallelFwdResult.data
204
205    when(
206      oldFwdResult.mask.asUInt =/= parallelFwdResult.mask.asUInt
207    ){
208      printf("%d: mask error: right: %b false %b\n", GTimer(), oldFwdResult.mask.asUInt, parallelFwdResult.mask.asUInt)
209    }
210
211    for (p <- 0 until 8) {
212      when(
213        oldFwdResult.data(p) =/= parallelFwdResult.data(p) && oldFwdResult.mask(p)
214      ){
215        printf("%d: data "+p+" error: right: %x false %x\n", GTimer(), oldFwdResult.data(p), parallelFwdResult.data(p))
216      }
217    }
218
219  })
220
221  // data read
222  io.rdata := data
223  // io.debug.debug_data := data
224}
225
226// inflight miss block reqs
227class InflightBlockInfo extends XSBundle {
228  val block_addr = UInt(PAddrBits.W)
229  val valid = Bool()
230}
231
232class LsqEnqIO extends XSBundle {
233  val canAccept = Output(Bool())
234  val needAlloc = Vec(RenameWidth, Input(Bool()))
235  val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp)))
236  val resp = Vec(RenameWidth, Output(new LSIdx))
237}
238
239// Load / Store Queue Wrapper for XiangShan Out of Order LSU
240class LsqWrappper extends XSModule with HasDCacheParameters {
241  val io = IO(new Bundle() {
242    val enq = new LsqEnqIO
243    val brqRedirect = Input(Valid(new Redirect))
244    val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle)))
245    val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle)))
246    val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReq))
247    val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback store
248    val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store
249    val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
250    val commits = Flipped(new RoqCommitIO)
251    val rollback = Output(Valid(new Redirect))
252    val dcache = new DCacheLineIO
253    val uncache = new DCacheWordIO
254    val roqDeqPtr = Input(new RoqPtr)
255    val exceptionAddr = new ExceptionAddrIO
256  })
257
258  val loadQueue = Module(new LoadQueue)
259  val storeQueue = Module(new StoreQueue)
260
261  // io.enq logic
262  // LSQ: send out canAccept when both load queue and store queue are ready
263  // Dispatch: send instructions to LSQ only when they are ready
264  io.enq.canAccept := loadQueue.io.enq.canAccept && storeQueue.io.enq.canAccept
265  loadQueue.io.enq.sqCanAccept := storeQueue.io.enq.canAccept
266  storeQueue.io.enq.lqCanAccept := loadQueue.io.enq.canAccept
267  for (i <- 0 until RenameWidth) {
268    val isStore = CommitType.lsInstIsStore(io.enq.req(i).bits.ctrl.commitType)
269
270    loadQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i) && !isStore
271    loadQueue.io.enq.req(i).valid  := !isStore && io.enq.req(i).valid
272    loadQueue.io.enq.req(i).bits  := io.enq.req(i).bits
273
274    storeQueue.io.enq.needAlloc(i) := io.enq.needAlloc(i) && isStore
275    storeQueue.io.enq.req(i).valid :=  isStore && io.enq.req(i).valid
276    storeQueue.io.enq.req(i).bits := io.enq.req(i).bits
277
278    io.enq.resp(i).lqIdx := loadQueue.io.enq.resp(i)
279    io.enq.resp(i).sqIdx := storeQueue.io.enq.resp(i)
280  }
281
282  // load queue wiring
283  loadQueue.io.brqRedirect <> io.brqRedirect
284  loadQueue.io.loadIn <> io.loadIn
285  loadQueue.io.storeIn <> io.storeIn
286  loadQueue.io.ldout <> io.ldout
287  loadQueue.io.commits <> io.commits
288  loadQueue.io.rollback <> io.rollback
289  loadQueue.io.dcache <> io.dcache
290  loadQueue.io.roqDeqPtr <> io.roqDeqPtr
291  loadQueue.io.exceptionAddr.lsIdx := io.exceptionAddr.lsIdx
292  loadQueue.io.exceptionAddr.isStore := DontCare
293
294  // store queue wiring
295  // storeQueue.io <> DontCare
296  storeQueue.io.brqRedirect <> io.brqRedirect
297  storeQueue.io.storeIn <> io.storeIn
298  storeQueue.io.sbuffer <> io.sbuffer
299  storeQueue.io.mmioStout <> io.mmioStout
300  storeQueue.io.commits <> io.commits
301  storeQueue.io.roqDeqPtr <> io.roqDeqPtr
302  storeQueue.io.exceptionAddr.lsIdx := io.exceptionAddr.lsIdx
303  storeQueue.io.exceptionAddr.isStore := DontCare
304
305  loadQueue.io.load_s1 <> io.forward
306  storeQueue.io.forward <> io.forward // overlap forwardMask & forwardData, DO NOT CHANGE SEQUENCE
307
308  io.exceptionAddr.vaddr := Mux(io.exceptionAddr.isStore, storeQueue.io.exceptionAddr.vaddr, loadQueue.io.exceptionAddr.vaddr)
309
310  // naive uncache arbiter
311  val s_idle :: s_load :: s_store :: Nil = Enum(3)
312  val uncacheState = RegInit(s_idle)
313
314  switch(uncacheState){
315    is(s_idle){
316      when(io.uncache.req.fire()){
317        uncacheState := Mux(loadQueue.io.uncache.req.valid, s_load, s_store)
318      }
319    }
320    is(s_load){
321      when(io.uncache.resp.fire()){
322        uncacheState := s_idle
323      }
324    }
325    is(s_store){
326      when(io.uncache.resp.fire()){
327        uncacheState := s_idle
328      }
329    }
330  }
331
332  loadQueue.io.uncache := DontCare
333  storeQueue.io.uncache := DontCare
334  loadQueue.io.uncache.resp.valid := false.B
335  storeQueue.io.uncache.resp.valid := false.B
336  when(loadQueue.io.uncache.req.valid){
337    io.uncache.req <> loadQueue.io.uncache.req
338  }.otherwise{
339    io.uncache.req <> storeQueue.io.uncache.req
340  }
341  when(uncacheState === s_load){
342    io.uncache.resp <> loadQueue.io.uncache.resp
343  }.otherwise{
344    io.uncache.resp <> storeQueue.io.uncache.resp
345  }
346
347  assert(!(loadQueue.io.uncache.req.valid && storeQueue.io.uncache.req.valid))
348  assert(!(loadQueue.io.uncache.resp.valid && storeQueue.io.uncache.resp.valid))
349  assert(!((loadQueue.io.uncache.resp.valid || storeQueue.io.uncache.resp.valid) && uncacheState === s_idle))
350
351}
352