xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MainPipe.scala (revision a273862e37f1d43bee748f2a6353320a2f52f6f4)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.internal.firrtl.Port
22import chisel3.util._
23import freechips.rocketchip.tilelink.ClientStates._
24import freechips.rocketchip.tilelink.MemoryOpCategories._
25import freechips.rocketchip.tilelink.TLPermissions._
26import freechips.rocketchip.tilelink.{ClientMetadata, ClientStates, TLPermissions}
27import utils._
28
29class MainPipeReq(implicit p: Parameters) extends DCacheBundle {
30  val miss = Bool() // only amo miss will refill in main pipe
31  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
32  val miss_param = UInt(TLPermissions.bdWidth.W)
33  val miss_dirty = Bool()
34
35  val probe = Bool()
36  val probe_param = UInt(TLPermissions.bdWidth.W)
37  val probe_need_data = Bool()
38
39  // request info
40  // reqs from Store, AMO use this
41  // probe does not use this
42  val source = UInt(sourceTypeWidth.W)
43  val cmd = UInt(M_SZ.W)
44  // if dcache size > 32KB, vaddr is also needed for store
45  // vaddr is used to get extra index bits
46  val vaddr  = UInt(VAddrBits.W)
47  // must be aligned to block
48  val addr   = UInt(PAddrBits.W)
49
50  // store
51  val store_data = UInt((cfg.blockBytes * 8).W)
52  val store_mask = UInt(cfg.blockBytes.W)
53
54  // which word does amo work on?
55  val word_idx = UInt(log2Up(cfg.blockBytes * 8 / DataBits).W)
56  val amo_data   = UInt(DataBits.W)
57  val amo_mask   = UInt((DataBits / 8).W)
58
59  val id = UInt(reqIdWidth.W)
60
61  def isLoad: Bool = source === LOAD_SOURCE.U
62  def isStore: Bool = source === STORE_SOURCE.U
63  def isAMO: Bool = source === AMO_SOURCE.U
64
65  def convertStoreReq(store: DCacheLineReq): MainPipeReq = {
66    val req = Wire(new MainPipeReq)
67    req := DontCare
68    req.miss := false.B
69    req.miss_dirty := false.B
70    req.probe := false.B
71    req.probe_need_data := false.B
72    req.source := STORE_SOURCE.U
73    req.cmd := store.cmd
74    req.addr := store.addr
75    req.vaddr := store.vaddr
76    req.store_data := store.data
77    req.store_mask := store.mask
78    req.id := store.id
79    req
80  }
81}
82
83class MainPipe(implicit p: Parameters) extends DCacheModule {
84  val metaBits = (new Meta).getWidth
85  val encMetaBits = cacheParams.tagCode.width((new MetaAndTag).getWidth) - tagBits
86
87  val io = IO(new Bundle() {
88    // probe queue
89    val probe_req = Flipped(DecoupledIO(new MainPipeReq))
90    // store miss go to miss queue
91    val miss = DecoupledIO(new MissReq)
92    // store buffer
93    val store_req = Flipped(DecoupledIO(new DCacheLineReq))
94    val store_replay_resp = ValidIO(new DCacheLineResp)
95    val store_hit_resp = ValidIO(new DCacheLineResp)
96    val release_update = ValidIO(new ReleaseUpdate)
97    // atmoics
98    val atomic_req = Flipped(DecoupledIO(new MainPipeReq))
99    val atomic_resp = ValidIO(new AtomicsResp)
100    // write-back queue
101    val wb = DecoupledIO(new WritebackReq)
102
103    val data_read = DecoupledIO(new L1BankedDataReadLineReq)
104    val data_resp = Input(Vec(DCacheBanks, new L1BankedDataReadResult()))
105    val data_write = DecoupledIO(new L1BankedDataWriteReq)
106
107    val meta_read = DecoupledIO(new MetaReadReq)
108    val meta_resp = Input(Vec(nWays, UInt(encMetaBits.W)))
109    val meta_write = DecoupledIO(new MetaWriteReq)
110
111    val tag_read = DecoupledIO(new TagReadReq)
112    val tag_resp = Input(Vec(nWays, UInt(tagBits.W)))
113    val tag_write = DecoupledIO(new TagWriteReq)
114
115    // update state vec in replacement algo
116    val replace_access = ValidIO(new ReplacementAccessBundle)
117    // find the way to be replaced
118    val replace_way = new ReplacementWayReqIO
119
120    val status = new Bundle() {
121      val s0_set = ValidIO(UInt(idxBits.W))
122      val s1, s2, s3 = ValidIO(new Bundle() {
123        val set = UInt(idxBits.W)
124        val way_en = UInt(nWays.W)
125      })
126    }
127
128    // lrsc locked block should block probe
129    val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W)))
130    val invalid_resv_set = Input(Bool())
131  })
132
133  // meta array is made of regs, so meta write or read should always be ready
134  assert(RegNext(io.meta_read.ready))
135  assert(RegNext(io.meta_write.ready))
136
137  val s1_s0_set_conflict, s2_s0_set_conlict, s3_s0_set_conflict = Wire(Bool())
138  val set_conflict = s1_s0_set_conflict || s2_s0_set_conlict || s3_s0_set_conflict
139  val s1_ready, s2_ready, s3_ready = Wire(Bool())
140
141  // convert store req to main pipe req, and select a req from store and probe
142  val store_req = Wire(DecoupledIO(new MainPipeReq))
143  store_req.bits := (new MainPipeReq).convertStoreReq(io.store_req.bits)
144  store_req.valid := io.store_req.valid
145  io.store_req.ready := store_req.ready
146  val req_arb = Module(new Arbiter(new MainPipeReq, 3))
147  req_arb.io.in(0) <> io.atomic_req
148  req_arb.io.in(1) <> store_req
149  req_arb.io.in(2) <> io.probe_req
150
151  // s0: read meta and tag
152  val req = Wire(DecoupledIO(new MainPipeReq))
153  req <> req_arb.io.out
154  val s0_req = req.bits
155  val s0_idx = get_idx(s0_req.vaddr)
156  val s0_can_go = io.meta_read.ready && io.tag_read.ready && s1_ready && !set_conflict
157  val s0_fire = req.valid && s0_can_go
158
159  val bank_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).orR)).asUInt
160  val bank_full_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).andR)).asUInt
161  val banks_full_overwrite = bank_full_write.andR
162
163  val banked_store_rmask = bank_write & ~bank_full_write
164  val banked_full_rmask = ~0.U(DCacheBanks.W)
165  val banked_none_rmask = 0.U(DCacheBanks.W)
166
167  val store_need_data = !s0_req.probe && s0_req.isStore && banked_store_rmask.orR
168  val probe_need_data = s0_req.probe
169  val amo_need_data = !s0_req.probe && s0_req.isAMO
170  val miss_need_data = s0_req.miss
171
172  val banked_need_data = store_need_data || probe_need_data || amo_need_data || miss_need_data
173
174  val s0_banked_rmask = Mux(store_need_data, banked_store_rmask,
175    Mux(probe_need_data || amo_need_data || miss_need_data,
176      banked_full_rmask,
177      banked_none_rmask
178    ))
179
180  // generate wmask here and use it in stage 2
181  val banked_store_wmask = bank_write
182  val banked_full_wmask = ~0.U(DCacheBanks.W)
183  val banked_none_wmask = 0.U(DCacheBanks.W)
184
185  // s1: read data
186  val s1_valid = RegInit(false.B)
187  val s1_need_data = RegEnable(banked_need_data, s0_fire)
188  val s1_req = RegEnable(s0_req, s0_fire)
189  val s1_banked_rmask = RegEnable(s0_banked_rmask, s0_fire)
190  val s1_banked_store_wmask = RegEnable(banked_store_wmask, s0_fire)
191  val s1_can_go = s2_ready && (io.data_read.ready || !s1_need_data)
192  val s1_fire = s1_valid && s1_can_go
193  val s1_idx = get_idx(s1_req.vaddr)
194  when (s0_fire) {
195    s1_valid := true.B
196  }.elsewhen (s1_fire) {
197    s1_valid := false.B
198  }
199  s1_ready := !s1_valid || s1_can_go
200  s1_s0_set_conflict := s1_valid && s0_idx === s1_idx
201
202  def getMeta(encMeta: UInt): UInt = {
203    require(encMeta.getWidth == encMetaBits)
204    encMeta(metaBits - 1, 0)
205  }
206
207  val tag_resp = Wire(Vec(nWays, UInt(tagBits.W)))
208  val ecc_meta_resp = Wire(Vec(nWays, UInt(encMetaBits.W)))
209  tag_resp := Mux(RegNext(s0_fire), io.tag_resp, RegNext(tag_resp))
210  ecc_meta_resp := Mux(RegNext(s0_fire), io.meta_resp, RegNext(ecc_meta_resp))
211  val meta_resp = ecc_meta_resp.map(getMeta(_))
212
213  def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
214  val s1_tag_eq_way = wayMap((w: Int) => tag_resp(w) === get_tag(s1_req.addr)).asUInt
215  val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && Meta(meta_resp(w)).coh.isValid()).asUInt
216  val s1_tag_match = s1_tag_match_way.orR
217
218  val s1_hit_tag = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => tag_resp(w))), get_tag(s1_req.addr))
219  val s1_hit_coh = ClientMetadata(Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => meta_resp(w))), 0.U))
220
221  // replacement policy
222  val s1_repl_way_en = WireInit(0.U(nWays.W))
223  s1_repl_way_en := Mux(RegNext(s0_fire), UIntToOH(io.replace_way.way), RegNext(s1_repl_way_en))
224  val s1_repl_tag = Mux1H(s1_repl_way_en, wayMap(w => tag_resp(w)))
225  val s1_repl_coh = Mux1H(s1_repl_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata)
226
227  val s1_need_replacement = (s1_req.miss || s1_req.isStore && !s1_req.probe) && !s1_tag_match
228  val s1_way_en = Mux(s1_need_replacement, s1_repl_way_en, s1_tag_match_way)
229  val s1_tag = Mux(s1_need_replacement, s1_repl_tag, s1_hit_tag)
230  val s1_coh = Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh)
231
232  // s2: select data, return resp if this is a store miss
233  val s2_valid = RegInit(false.B)
234  val s2_req = RegEnable(s1_req, s1_fire)
235  val s2_tag_match = RegEnable(s1_tag_match, s1_fire)
236  val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire)
237  val (s2_has_permission, _, s2_new_hit_coh) = s2_hit_coh.onAccess(s2_req.cmd)
238  val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire)
239  val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire)
240  val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire)
241  val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire)
242  val s2_idx = get_idx(s2_req.vaddr)
243  val s2_way_en = RegEnable(s1_way_en, s1_fire)
244  val s2_tag = RegEnable(s1_tag, s1_fire)
245  val s2_coh = RegEnable(s1_coh, s1_fire)
246  val s2_banked_store_wmask = RegEnable(s1_banked_store_wmask, s1_fire)
247
248  val s2_hit = s2_tag_match && s2_has_permission
249  val s2_amo_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isAMO
250  val s2_store_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isStore
251
252  s2_s0_set_conlict := s2_valid && s0_idx === s2_idx
253
254  // For a store req, it either hits and goes to s3, or miss and enter miss queue immediately
255  val s2_can_go_to_s3 = (s2_req.probe || s2_req.miss || (s2_req.isStore || s2_req.isAMO) && s2_hit) && s3_ready
256  val s2_can_go_to_mq = !s2_req.probe && !s2_req.miss && (s2_req.isStore || s2_req.isAMO) && !s2_hit
257  assert(RegNext(!(s2_valid && s2_can_go_to_s3 && s2_can_go_to_mq)))
258  val s2_can_go = s2_can_go_to_s3 || s2_can_go_to_mq
259  val s2_fire = s2_valid && s2_can_go
260  val s2_fire_to_s3 = s2_valid && s2_can_go_to_s3
261  when (s1_fire) {
262    s2_valid := true.B
263  }.elsewhen (s2_fire) {
264    s2_valid := false.B
265  }
266  s2_ready := !s2_valid || s2_can_go
267  val replay = !io.miss.ready
268
269  val data_resp = Wire(io.data_resp.cloneType)
270  data_resp := Mux(RegNext(s1_fire), io.data_resp, RegNext(data_resp))
271  val s2_store_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
272
273  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
274    val full_wmask = FillInterleaved(8, wmask)
275    ((~full_wmask & old_data) | (full_wmask & new_data))
276  }
277
278  val s2_data = WireInit(VecInit((0 until DCacheBanks).map(i => {
279    val decoded = cacheParams.dataCode.decode(data_resp(i).asECCData())
280    // assert(!RegNext(s2_valid && s2_hit && decoded.uncorrectable))
281    // TODO: trigger ecc error
282    data_resp(i).raw_data
283  })))
284
285  for (i <- 0 until DCacheBanks) {
286    val old_data = s2_data(i)
287    val new_data = get_data_of_bank(i, s2_req.store_data)
288    // for amo hit, we should use read out SRAM data
289    // do not merge with store data
290    val wmask = Mux(s2_amo_hit, 0.U(wordBytes.W), get_mask_of_bank(i, s2_req.store_mask))
291    s2_store_data_merged(i) := mergePutData(old_data, new_data, wmask)
292  }
293
294  val s2_data_word = s2_store_data_merged(s2_req.word_idx)
295
296  // s3: write data, meta and tag
297  val s3_valid = RegInit(false.B)
298  val s3_req = RegEnable(s2_req, s2_fire_to_s3)
299  val s3_idx = get_idx(s3_req.vaddr)
300  val s3_tag = RegEnable(s2_tag, s2_fire_to_s3)
301  val s3_tag_match = RegEnable(s2_tag_match, s2_fire_to_s3)
302  val s3_coh = RegEnable(s2_coh, s2_fire_to_s3)
303  val s3_hit = RegEnable(s2_hit, s2_fire_to_s3)
304  val s3_amo_hit = RegEnable(s2_amo_hit, s2_fire_to_s3)
305  val s3_store_hit = RegEnable(s2_store_hit, s2_fire_to_s3)
306  val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire_to_s3)
307  val s3_new_hit_coh = RegEnable(s2_new_hit_coh, s2_fire_to_s3)
308  val s3_way_en = RegEnable(s2_way_en, s2_fire_to_s3)
309  val s3_banked_store_wmask = RegEnable(s2_banked_store_wmask, s2_fire_to_s3)
310  val s3_store_data_merged = RegEnable(s2_store_data_merged, s2_fire_to_s3)
311  val s3_data_word = RegEnable(s2_data_word, s2_fire_to_s3)
312  val s3_data = RegEnable(s2_data, s2_fire_to_s3)
313  val (probe_has_dirty_data, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param)
314  val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire_to_s3)
315
316  val miss_update_meta = s3_req.miss
317  val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh
318  val store_update_meta = s3_req.isStore && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
319  val amo_update_meta = s3_req.isAMO && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
320  val update_meta = miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta
321
322  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
323    val c = categorize(cmd)
324    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
325      //(effect param) -> (next)
326      Cat(rd, toB, false.B)  -> Branch,
327      Cat(rd, toB, true.B)   -> Branch,
328      Cat(rd, toT, false.B)  -> Trunk,
329      Cat(rd, toT, true.B)   -> Dirty,
330      Cat(wi, toT, false.B)  -> Trunk,
331      Cat(wi, toT, true.B)   -> Dirty,
332      Cat(wr, toT, false.B)  -> Dirty,
333      Cat(wr, toT, true.B)   -> Dirty))
334  }
335  val miss_new_coh = ClientMetadata(missCohGen(s3_req.cmd, s3_req.miss_param, s3_req.miss_dirty))
336
337  val new_coh = Mux(
338    miss_update_meta,
339    miss_new_coh,
340    Mux(
341      probe_update_meta,
342      probe_new_coh,
343      Mux(
344        store_update_meta || amo_update_meta,
345        s3_new_hit_coh,
346        ClientMetadata.onReset
347      )
348    )
349  )
350
351  // LR, SC and AMO
352  val debug_sc_fail_addr = RegInit(0.U)
353  val debug_sc_fail_cnt  = RegInit(0.U(8.W))
354
355  val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
356  val lrsc_valid = lrsc_count > lrscBackoff.U
357  val lrsc_addr  = Reg(UInt())
358  val s3_lr = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XLR
359  val s3_sc = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XSC
360  val s3_lrsc_addr_match = lrsc_valid && lrsc_addr === get_block_addr(s3_req.addr)
361  val s3_sc_fail = s3_sc && !s3_lrsc_addr_match
362  val s3_sc_resp = Mux(s3_sc_fail, 1.U, 0.U)
363
364  val s3_can_do_amo = (s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U) || s3_amo_hit
365  val s3_can_do_amo_write = s3_can_do_amo && isWrite(s3_req.cmd) && !s3_sc_fail
366
367  when (s3_valid && (s3_lr || s3_sc)) {
368    when (s3_can_do_amo && s3_lr) {
369      lrsc_count := (lrscCycles - 1).U
370      lrsc_addr := get_block_addr(s3_req.addr)
371    } .otherwise {
372      lrsc_count := 0.U
373    }
374  } .elsewhen (lrsc_count > 0.U) {
375    lrsc_count := lrsc_count - 1.U
376  }
377
378  io.lrsc_locked_block.valid := lrsc_valid
379  io.lrsc_locked_block.bits  := lrsc_addr
380
381  // when we release this block,
382  // we invalidate this reservation set
383  when (io.invalid_resv_set) {
384    lrsc_count := 0.U
385  }
386
387  when (s3_valid) {
388    when (s3_req.addr === debug_sc_fail_addr) {
389      when (s3_sc_fail) {
390        debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
391      } .elsewhen (s3_sc) {
392        debug_sc_fail_cnt := 0.U
393      }
394    } .otherwise {
395      when (s3_sc_fail) {
396        debug_sc_fail_addr := s3_req.addr
397        debug_sc_fail_cnt  := 1.U
398      }
399    }
400  }
401  assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
402
403
404  val banked_amo_wmask = UIntToOH(s3_req.word_idx)
405//  val banked_wmask = s3_banked_store_wmask
406  val banked_wmask = Mux(
407    s3_req.miss,
408    banked_full_wmask,
409    Mux(
410      s3_store_hit,
411      s3_banked_store_wmask,
412      Mux(
413        s3_can_do_amo_write,
414        banked_amo_wmask,
415        banked_none_wmask
416      )
417    )
418  )
419  val update_data = banked_wmask.asUInt.orR
420
421  // generate write data
422  // AMO hits
423  val amoalu   = Module(new AMOALU(wordBits))
424  amoalu.io.mask := s3_req.amo_mask
425  amoalu.io.cmd  := s3_req.cmd
426  amoalu.io.lhs  := s3_data_word
427  amoalu.io.rhs  := s3_req.amo_data
428
429  // merge amo write data
430  val s3_amo_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
431  for (i <- 0 until DCacheBanks) {
432    val old_data = s3_store_data_merged(i)
433    val new_data = amoalu.io.out
434    val wmask = Mux(s3_can_do_amo_write && s3_req.word_idx === i.U,
435      ~0.U(wordBytes.W), 0.U(wordBytes.W))
436    s3_amo_data_merged(i) := mergePutData(old_data, new_data, wmask)
437  }
438
439  val miss_wb = s3_req.miss && s3_need_replacement && s3_coh.state =/= ClientStates.Nothing
440  val probe_wb = s3_req.probe
441  val need_wb = miss_wb || probe_wb
442
443  val (_, miss_shrink_param, _) = s3_coh.onCacheControl(M_FLUSH)
444  val writeback_param = Mux(miss_wb, miss_shrink_param, probe_shrink_param)
445  val writeback_data = if (dcacheParameters.alwaysReleaseData) {
446    s3_tag_match && s3_req.probe && s3_req.probe_need_data ||
447      s3_coh === ClientStates.Dirty || miss_wb && s3_coh.state =/= ClientStates.Nothing
448  } else {
449    s3_tag_match && s3_req.probe && s3_req.probe_need_data || s3_coh === ClientStates.Dirty
450  }
451
452  val s3_probe_can_go = s3_req.probe && io.wb.ready && (io.meta_write.ready || !probe_update_meta)
453  val s3_store_can_go = s3_req.isStore && !s3_req.probe && (io.meta_write.ready || !store_update_meta) && (io.data_write.ready || !update_data)
454  val s3_amo_can_go = s3_amo_hit && (io.meta_write.ready || !amo_update_meta) && (io.data_write.ready || !update_data)
455  val s3_miss_can_go = s3_req.miss &&
456    (io.meta_write.ready || !amo_update_meta) &&
457    (io.data_write.ready || !update_data) &&
458    io.tag_write.ready &&
459    io.wb.ready
460  val s3_can_go = s3_probe_can_go || s3_store_can_go || s3_amo_can_go || s3_miss_can_go
461  val s3_fire = s3_valid && s3_can_go
462  when (s2_fire_to_s3) {
463    s3_valid := true.B
464  }.elsewhen (s3_fire) {
465    s3_valid := false.B
466  }
467  s3_ready := !s3_valid || s3_can_go
468  s3_s0_set_conflict := s3_valid && s3_idx === s0_idx
469  assert(RegNext(!s3_valid || !(s3_req.isStore && !s3_req.probe) || s3_hit)) // miss store should never come to s3
470
471
472  req.ready := s0_can_go
473
474  io.meta_read.valid := req.valid && s1_ready && !set_conflict
475  io.meta_read.bits.idx := get_idx(s0_req.vaddr)
476  io.meta_read.bits.way_en := ~0.U(nWays.W)
477
478  io.tag_read.valid := req.valid && s1_ready && !set_conflict
479  io.tag_read.bits.idx := get_idx(s0_req.vaddr)
480  io.tag_read.bits.way_en := ~0.U(nWays.W)
481
482  io.data_read.valid := s1_valid && s1_need_data && s2_ready
483  io.data_read.bits.rmask := s1_banked_rmask
484  io.data_read.bits.way_en := s1_way_en
485  io.data_read.bits.addr := s1_req.vaddr
486
487  io.miss.valid := s2_valid && s2_can_go_to_mq
488  val miss = io.miss.bits
489  miss := DontCare
490  miss.source := s2_req.source
491  miss.cmd := s2_req.cmd
492  miss.addr := s2_req.addr
493  miss.vaddr := s2_req.vaddr
494  miss.way_en := s2_way_en
495  miss.store_data := s2_req.store_data
496  miss.store_mask := s2_req.store_mask
497  miss.word_idx := s2_req.word_idx
498  miss.amo_data := s2_req.amo_data
499  miss.amo_mask := s2_req.amo_mask
500  miss.req_coh := s2_hit_coh
501  miss.replace_coh := s2_repl_coh
502  miss.replace_tag := s2_repl_tag
503  miss.id := s2_req.id
504
505  io.store_replay_resp.valid := s2_valid && s2_can_go_to_mq && replay && s2_req.isStore
506  io.store_replay_resp.bits.data := DontCare
507  io.store_replay_resp.bits.miss := true.B
508  io.store_replay_resp.bits.replay := true.B
509  io.store_replay_resp.bits.id := s2_req.id
510
511  io.store_hit_resp.valid := s3_valid && s3_store_can_go
512  io.store_hit_resp.bits.data := DontCare
513  io.store_hit_resp.bits.miss := false.B
514  io.store_hit_resp.bits.replay := false.B
515  io.store_hit_resp.bits.id := s3_req.id
516
517  io.release_update.valid := s3_valid && (s3_store_can_go || s3_amo_can_go) && s3_hit && update_data
518  io.release_update.bits.addr := s3_req.addr
519  io.release_update.bits.mask := Mux(s3_store_hit, s3_banked_store_wmask, banked_amo_wmask)
520  io.release_update.bits.data := s3_amo_data_merged.asUInt
521
522  val atomic_hit_resp = Wire(new AtomicsResp)
523  atomic_hit_resp.data := Mux(s3_sc, s3_sc_resp, s3_data_word)
524  atomic_hit_resp.miss := false.B
525  atomic_hit_resp.miss_id := s3_req.miss_id
526  atomic_hit_resp.replay := false.B
527  atomic_hit_resp.ack_miss_queue := s3_req.miss
528  atomic_hit_resp.id := lrsc_valid
529  val atomic_replay_resp = Wire(new AtomicsResp)
530  atomic_replay_resp.data := DontCare
531  atomic_replay_resp.miss := true.B
532  atomic_replay_resp.miss_id := DontCare
533  atomic_replay_resp.replay := true.B
534  atomic_replay_resp.ack_miss_queue := false.B
535  atomic_replay_resp.id := DontCare
536  val atomic_replay_resp_valid = s2_valid && s2_can_go_to_mq && replay && s2_req.isAMO
537  val atomic_hit_resp_valid = s3_valid && (s3_amo_can_go || s3_miss_can_go && s3_req.isAMO)
538  io.atomic_resp.valid := atomic_replay_resp_valid || atomic_hit_resp_valid
539  io.atomic_resp.bits := Mux(atomic_replay_resp_valid, atomic_replay_resp, atomic_hit_resp)
540
541  io.meta_write.valid := s3_fire && update_meta
542  io.meta_write.bits.idx := s3_idx
543  io.meta_write.bits.way_en := s3_way_en
544  io.meta_write.bits.tag := get_tag(s3_req.addr)
545  io.meta_write.bits.meta.coh := new_coh
546
547  io.tag_write.valid := s3_fire && s3_req.miss
548  io.tag_write.bits.idx := s3_idx
549  io.tag_write.bits.way_en := s3_way_en
550  io.tag_write.bits.tag := get_tag(s3_req.addr)
551
552  io.data_write.valid := s3_fire && update_data
553  io.data_write.bits.way_en := s3_way_en
554  io.data_write.bits.addr := s3_req.vaddr
555  io.data_write.bits.wmask := banked_wmask
556  io.data_write.bits.data := s3_amo_data_merged
557
558  io.wb.valid := s3_valid && (
559    // probe can go to wbq
560    s3_req.probe && (io.meta_write.ready || !probe_update_meta) ||
561      // amo miss can go to wbq
562      s3_req.miss &&
563        (io.meta_write.ready || !amo_update_meta) &&
564        (io.data_write.ready || !update_data) &&
565        io.tag_write.ready
566    ) && need_wb
567  io.wb.bits.addr := get_block_addr(Cat(s3_tag, get_untag(s3_req.vaddr)))
568  io.wb.bits.param := writeback_param
569  io.wb.bits.voluntary := s3_req.miss
570  io.wb.bits.hasData := writeback_data
571  io.wb.bits.dirty := s3_coh === ClientStates.Dirty
572  io.wb.bits.data := s3_data.asUInt()
573  io.wb.bits.delay_release := false.B
574  io.wb.bits.miss_id := DontCare
575
576  io.replace_access.valid := RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe && s1_tag_match)
577  io.replace_access.bits.set := s2_idx
578  io.replace_access.bits.way := RegNext(OHToUInt(s1_way_en))
579
580  io.replace_way.set.valid := RegNext(s0_fire)
581  io.replace_way.set.bits := s1_idx
582
583  // TODO: consider block policy of a finer granularity
584  io.status.s0_set.valid := req.valid
585  io.status.s0_set.bits := get_idx(s0_req.vaddr)
586  io.status.s1.valid := s1_valid
587  io.status.s1.bits.set := s1_idx
588  io.status.s1.bits.way_en := s1_way_en
589  io.status.s2.valid := s2_valid
590  io.status.s2.bits.set := s2_idx
591  io.status.s2.bits.way_en := s2_way_en
592  io.status.s3.valid := s3_valid
593  io.status.s3.bits.set := s3_idx
594  io.status.s3.bits.way_en := s3_way_en
595
596  val perfinfo = IO(new Bundle(){
597    val perfEvents = Output(new PerfEventsBundle(2))
598  })
599  val perfEvents = Seq(
600    ("dcache_mp_req                    ", s0_fire                                                                     ),
601    ("dcache_mp_total_penalty          ", (PopCount(VecInit(Seq(s0_fire, s1_valid, s2_valid, s3_valid))))             ),
602  )
603
604  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
605    perf_out.incr_step := RegNext(perf)
606  }
607}
608