xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/mainpipe/MainPipe.scala (revision 1b5e3cda2e8bbc4254b900b0321cbc4d396ef041)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import freechips.rocketchip.tilelink.ClientStates._
23import freechips.rocketchip.tilelink.MemoryOpCategories._
24import freechips.rocketchip.tilelink.TLPermissions._
25import freechips.rocketchip.tilelink.{ClientMetadata, ClientStates, TLPermissions}
26import utils._
27
28class MainPipeReq(implicit p: Parameters) extends DCacheBundle {
29  val miss = Bool() // only amo miss will refill in main pipe
30  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
31  val miss_param = UInt(TLPermissions.bdWidth.W)
32  val miss_dirty = Bool()
33
34  val probe = Bool()
35  val probe_param = UInt(TLPermissions.bdWidth.W)
36  val probe_need_data = Bool()
37
38  // request info
39  // reqs from Store, AMO use this
40  // probe does not use this
41  val source = UInt(sourceTypeWidth.W)
42  val cmd = UInt(M_SZ.W)
43  // if dcache size > 32KB, vaddr is also needed for store
44  // vaddr is used to get extra index bits
45  val vaddr  = UInt(VAddrBits.W)
46  // must be aligned to block
47  val addr   = UInt(PAddrBits.W)
48
49  // store
50  val store_data = UInt((cfg.blockBytes * 8).W)
51  val store_mask = UInt(cfg.blockBytes.W)
52
53  // which word does amo work on?
54  val word_idx = UInt(log2Up(cfg.blockBytes * 8 / DataBits).W)
55  val amo_data   = UInt(DataBits.W)
56  val amo_mask   = UInt((DataBits / 8).W)
57
58  // replace
59  val replace = Bool()
60  val replace_way_en = UInt(DCacheWays.W)
61
62  val id = UInt(reqIdWidth.W)
63
64  def isLoad: Bool = source === LOAD_SOURCE.U
65  def isStore: Bool = source === STORE_SOURCE.U
66  def isAMO: Bool = source === AMO_SOURCE.U
67
68  def convertStoreReq(store: DCacheLineReq): MainPipeReq = {
69    val req = Wire(new MainPipeReq)
70    req := DontCare
71    req.miss := false.B
72    req.miss_dirty := false.B
73    req.probe := false.B
74    req.probe_need_data := false.B
75    req.source := STORE_SOURCE.U
76    req.cmd := store.cmd
77    req.addr := store.addr
78    req.vaddr := store.vaddr
79    req.store_data := store.data
80    req.store_mask := store.mask
81    req.replace := false.B
82    req.id := store.id
83    req
84  }
85}
86
87class MainPipe(implicit p: Parameters) extends DCacheModule {
88  val metaBits = (new Meta).getWidth
89  val encMetaBits = cacheParams.tagCode.width((new MetaAndTag).getWidth) - tagBits
90
91  val io = IO(new Bundle() {
92    // probe queue
93    val probe_req = Flipped(DecoupledIO(new MainPipeReq))
94    // store miss go to miss queue
95    val miss_req = DecoupledIO(new MissReq)
96    // store buffer
97    val store_req = Flipped(DecoupledIO(new DCacheLineReq))
98    val store_replay_resp = ValidIO(new DCacheLineResp)
99    val store_hit_resp = ValidIO(new DCacheLineResp)
100    val release_update = ValidIO(new ReleaseUpdate)
101    // atmoics
102    val atomic_req = Flipped(DecoupledIO(new MainPipeReq))
103    val atomic_resp = ValidIO(new AtomicsResp)
104    // replace
105    val replace_req = Flipped(DecoupledIO(new MainPipeReq))
106    val replace_resp = ValidIO(UInt(log2Up(cfg.nMissEntries).W))
107    // write-back queue
108    val wb = DecoupledIO(new WritebackReq)
109
110    val data_read = DecoupledIO(new L1BankedDataReadLineReq)
111    val data_resp = Input(Vec(DCacheBanks, new L1BankedDataReadResult()))
112    val data_write = DecoupledIO(new L1BankedDataWriteReq)
113
114    val meta_read = DecoupledIO(new MetaReadReq)
115    val meta_resp = Input(Vec(nWays, UInt(encMetaBits.W)))
116    val meta_write = DecoupledIO(new MetaWriteReq)
117
118    val tag_read = DecoupledIO(new TagReadReq)
119    val tag_resp = Input(Vec(nWays, UInt(tagBits.W)))
120    val tag_write = DecoupledIO(new TagWriteReq)
121
122    // update state vec in replacement algo
123    val replace_access = ValidIO(new ReplacementAccessBundle)
124    // find the way to be replaced
125    val replace_way = new ReplacementWayReqIO
126
127    val status = new Bundle() {
128      val s0_set = ValidIO(UInt(idxBits.W))
129      val s1, s2, s3 = ValidIO(new Bundle() {
130        val set = UInt(idxBits.W)
131        val way_en = UInt(nWays.W)
132      })
133    }
134
135    // lrsc locked block should block probe
136    val lrsc_locked_block = Output(Valid(UInt(PAddrBits.W)))
137    val invalid_resv_set = Input(Bool())
138    val update_resv_set = Output(Bool())
139  })
140
141  // meta array is made of regs, so meta write or read should always be ready
142  assert(RegNext(io.meta_read.ready))
143  assert(RegNext(io.meta_write.ready))
144
145  val s1_s0_set_conflict, s2_s0_set_conlict, s3_s0_set_conflict = Wire(Bool())
146  val set_conflict = s1_s0_set_conflict || s2_s0_set_conlict || s3_s0_set_conflict
147  val s1_ready, s2_ready, s3_ready = Wire(Bool())
148
149  // convert store req to main pipe req, and select a req from store and probe
150  val store_req = Wire(DecoupledIO(new MainPipeReq))
151  store_req.bits := (new MainPipeReq).convertStoreReq(io.store_req.bits)
152  store_req.valid := io.store_req.valid
153  io.store_req.ready := store_req.ready
154
155  // s0: read meta and tag
156  val req = Wire(DecoupledIO(new MainPipeReq))
157  arbiter(
158    in = Seq(
159      store_req,
160      io.probe_req,
161      io.atomic_req,
162      io.replace_req
163    ),
164    out = req,
165    name = Some("main_pipe_req")
166  )
167  val s0_req = req.bits
168  val s0_idx = get_idx(s0_req.vaddr)
169  val s0_can_go = io.meta_read.ready && io.tag_read.ready && s1_ready && !set_conflict
170  val s0_fire = req.valid && s0_can_go
171
172  val bank_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).orR)).asUInt
173  val bank_full_write = VecInit((0 until DCacheBanks).map(i => get_mask_of_bank(i, s0_req.store_mask).andR)).asUInt
174  val banks_full_overwrite = bank_full_write.andR
175
176  val banked_store_rmask = bank_write & ~bank_full_write
177  val banked_full_rmask = ~0.U(DCacheBanks.W)
178  val banked_none_rmask = 0.U(DCacheBanks.W)
179
180  val store_need_data = !s0_req.probe && s0_req.isStore && banked_store_rmask.orR
181  val probe_need_data = s0_req.probe
182  val amo_need_data = !s0_req.probe && s0_req.isAMO
183  val miss_need_data = s0_req.miss
184  val replace_need_data = s0_req.replace
185
186  val banked_need_data = store_need_data || probe_need_data || amo_need_data || miss_need_data || replace_need_data
187
188  val s0_banked_rmask = Mux(store_need_data, banked_store_rmask,
189    Mux(probe_need_data || amo_need_data || miss_need_data || replace_need_data,
190      banked_full_rmask,
191      banked_none_rmask
192    ))
193
194  // generate wmask here and use it in stage 2
195  val banked_store_wmask = bank_write
196  val banked_full_wmask = ~0.U(DCacheBanks.W)
197  val banked_none_wmask = 0.U(DCacheBanks.W)
198
199  // s1: read data
200  val s1_valid = RegInit(false.B)
201  val s1_need_data = RegEnable(banked_need_data, s0_fire)
202  val s1_req = RegEnable(s0_req, s0_fire)
203  val s1_banked_rmask = RegEnable(s0_banked_rmask, s0_fire)
204  val s1_banked_store_wmask = RegEnable(banked_store_wmask, s0_fire)
205  val s1_can_go = s2_ready && (io.data_read.ready || !s1_need_data)
206  val s1_fire = s1_valid && s1_can_go
207  val s1_idx = get_idx(s1_req.vaddr)
208  when (s0_fire) {
209    s1_valid := true.B
210  }.elsewhen (s1_fire) {
211    s1_valid := false.B
212  }
213  s1_ready := !s1_valid || s1_can_go
214  s1_s0_set_conflict := s1_valid && s0_idx === s1_idx
215
216  def getMeta(encMeta: UInt): UInt = {
217    require(encMeta.getWidth == encMetaBits)
218    encMeta(metaBits - 1, 0)
219  }
220
221  val tag_resp = Wire(Vec(nWays, UInt(tagBits.W)))
222  val ecc_meta_resp = Wire(Vec(nWays, UInt(encMetaBits.W)))
223  tag_resp := Mux(RegNext(s0_fire), io.tag_resp, RegNext(tag_resp))
224  ecc_meta_resp := Mux(RegNext(s0_fire), io.meta_resp, RegNext(ecc_meta_resp))
225  val meta_resp = ecc_meta_resp.map(getMeta(_))
226
227  def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f))
228  val s1_tag_eq_way = wayMap((w: Int) => tag_resp(w) === get_tag(s1_req.addr)).asUInt
229  val s1_tag_match_way = wayMap((w: Int) => s1_tag_eq_way(w) && Meta(meta_resp(w)).coh.isValid()).asUInt
230  val s1_tag_match = s1_tag_match_way.orR
231
232  val s1_hit_tag = Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => tag_resp(w))), get_tag(s1_req.addr))
233  val s1_hit_coh = ClientMetadata(Mux(s1_tag_match, Mux1H(s1_tag_match_way, wayMap(w => meta_resp(w))), 0.U))
234
235  // replacement policy
236  val s1_repl_way_en = WireInit(0.U(nWays.W))
237  s1_repl_way_en := Mux(RegNext(s0_fire), UIntToOH(io.replace_way.way), RegNext(s1_repl_way_en))
238  val s1_repl_tag = Mux1H(s1_repl_way_en, wayMap(w => tag_resp(w)))
239  val s1_repl_coh = Mux1H(s1_repl_way_en, wayMap(w => meta_resp(w))).asTypeOf(new ClientMetadata)
240
241  val s1_need_replacement = (s1_req.miss || s1_req.isStore && !s1_req.probe) && !s1_tag_match
242  val s1_way_en = Mux(s1_req.replace, s1_req.replace_way_en, Mux(s1_need_replacement, s1_repl_way_en, s1_tag_match_way))
243  val s1_tag = Mux(s1_req.replace, get_tag(s1_req.addr), Mux(s1_need_replacement, s1_repl_tag, s1_hit_tag))
244  val s1_coh = Mux(
245    s1_req.replace,
246    Mux1H(s1_req.replace_way_en, meta_resp.map(ClientMetadata(_))),
247    Mux(s1_need_replacement, s1_repl_coh, s1_hit_coh)
248  )
249
250  val s1_has_permission = s1_hit_coh.onAccess(s1_req.cmd)._1
251  val s1_hit = s1_tag_match && s1_has_permission
252  val s1_pregen_can_go_to_mq = !s1_req.replace && !s1_req.probe && !s1_req.miss && (s1_req.isStore || s1_req.isAMO) && !s1_hit
253
254  // s2: select data, return resp if this is a store miss
255  val s2_valid = RegInit(false.B)
256  val s2_req = RegEnable(s1_req, s1_fire)
257  val s2_tag_match = RegEnable(s1_tag_match, s1_fire)
258  val s2_hit_coh = RegEnable(s1_hit_coh, s1_fire)
259  val (s2_has_permission, _, s2_new_hit_coh) = s2_hit_coh.onAccess(s2_req.cmd)
260  val s2_repl_way_en = RegEnable(s1_repl_way_en, s1_fire)
261  val s2_repl_tag = RegEnable(s1_repl_tag, s1_fire)
262  val s2_repl_coh = RegEnable(s1_repl_coh, s1_fire)
263  val s2_need_replacement = RegEnable(s1_need_replacement, s1_fire)
264  val s2_idx = get_idx(s2_req.vaddr)
265  val s2_way_en = RegEnable(s1_way_en, s1_fire)
266  val s2_tag = RegEnable(s1_tag, s1_fire)
267  val s2_coh = RegEnable(s1_coh, s1_fire)
268  val s2_banked_store_wmask = RegEnable(s1_banked_store_wmask, s1_fire)
269
270  val s2_hit = s2_tag_match && s2_has_permission
271  val s2_amo_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isAMO
272  val s2_store_hit = s2_hit && !s2_req.probe && !s2_req.miss && s2_req.isStore
273
274  s2_s0_set_conlict := s2_valid && s0_idx === s2_idx
275
276  // For a store req, it either hits and goes to s3, or miss and enter miss queue immediately
277  val s2_can_go_to_s3 = (s2_req.replace || s2_req.probe || s2_req.miss || (s2_req.isStore || s2_req.isAMO) && s2_hit) && s3_ready
278  val s2_can_go_to_mq = RegEnable(s1_pregen_can_go_to_mq, s1_fire)
279  assert(RegNext(!(s2_valid && s2_can_go_to_s3 && s2_can_go_to_mq)))
280  val s2_can_go = s2_can_go_to_s3 || s2_can_go_to_mq
281  val s2_fire = s2_valid && s2_can_go
282  val s2_fire_to_s3 = s2_valid && s2_can_go_to_s3
283  when (s1_fire) {
284    s2_valid := true.B
285  }.elsewhen (s2_fire) {
286    s2_valid := false.B
287  }
288  s2_ready := !s2_valid || s2_can_go
289  val replay = !io.miss_req.ready
290
291  val data_resp = Wire(io.data_resp.cloneType)
292  data_resp := Mux(RegNext(s1_fire), io.data_resp, RegNext(data_resp))
293  val s2_store_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
294
295  def mergePutData(old_data: UInt, new_data: UInt, wmask: UInt): UInt = {
296    val full_wmask = FillInterleaved(8, wmask)
297    ((~full_wmask & old_data) | (full_wmask & new_data))
298  }
299
300  val s2_data = WireInit(VecInit((0 until DCacheBanks).map(i => {
301    val decoded = cacheParams.dataCode.decode(data_resp(i).asECCData())
302    // assert(!RegNext(s2_valid && s2_hit && decoded.uncorrectable))
303    // TODO: trigger ecc error
304    data_resp(i).raw_data
305  })))
306
307  for (i <- 0 until DCacheBanks) {
308    val old_data = s2_data(i)
309    val new_data = get_data_of_bank(i, s2_req.store_data)
310    // for amo hit, we should use read out SRAM data
311    // do not merge with store data
312    val wmask = Mux(s2_amo_hit, 0.U(wordBytes.W), get_mask_of_bank(i, s2_req.store_mask))
313    s2_store_data_merged(i) := mergePutData(old_data, new_data, wmask)
314  }
315
316  val s2_data_word = s2_store_data_merged(s2_req.word_idx)
317
318  // s3: write data, meta and tag
319  val s3_valid = RegInit(false.B)
320  val s3_req = RegEnable(s2_req, s2_fire_to_s3)
321  val s3_idx = get_idx(s3_req.vaddr)
322  val s3_tag = RegEnable(s2_tag, s2_fire_to_s3)
323  val s3_tag_match = RegEnable(s2_tag_match, s2_fire_to_s3)
324  val s3_coh = RegEnable(s2_coh, s2_fire_to_s3)
325  val s3_hit = RegEnable(s2_hit, s2_fire_to_s3)
326  val s3_amo_hit = RegEnable(s2_amo_hit, s2_fire_to_s3)
327  val s3_store_hit = RegEnable(s2_store_hit, s2_fire_to_s3)
328  val s3_hit_coh = RegEnable(s2_hit_coh, s2_fire_to_s3)
329  val s3_new_hit_coh = RegEnable(s2_new_hit_coh, s2_fire_to_s3)
330  val s3_way_en = RegEnable(s2_way_en, s2_fire_to_s3)
331  val s3_banked_store_wmask = RegEnable(s2_banked_store_wmask, s2_fire_to_s3)
332  val s3_store_data_merged = RegEnable(s2_store_data_merged, s2_fire_to_s3)
333  val s3_data_word = RegEnable(s2_data_word, s2_fire_to_s3)
334  val s3_data = RegEnable(s2_data, s2_fire_to_s3)
335  val (probe_has_dirty_data, probe_shrink_param, probe_new_coh) = s3_coh.onProbe(s3_req.probe_param)
336  val s3_need_replacement = RegEnable(s2_need_replacement, s2_fire_to_s3)
337
338  val miss_update_meta = s3_req.miss
339  val probe_update_meta = s3_req.probe && s3_tag_match && s3_coh =/= probe_new_coh
340  val store_update_meta = s3_req.isStore && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
341  val amo_update_meta = s3_req.isAMO && !s3_req.probe && s3_hit_coh =/= s3_new_hit_coh
342  val amo_wait_amoalu = s3_req.isAMO && s3_req.cmd =/= M_XLR && s3_req.cmd =/= M_XSC
343  val update_meta = (miss_update_meta || probe_update_meta || store_update_meta || amo_update_meta) && !s3_req.replace
344
345  def missCohGen(cmd: UInt, param: UInt, dirty: Bool) = {
346    val c = categorize(cmd)
347    MuxLookup(Cat(c, param, dirty), Nothing, Seq(
348      //(effect param) -> (next)
349      Cat(rd, toB, false.B)  -> Branch,
350      Cat(rd, toB, true.B)   -> Branch,
351      Cat(rd, toT, false.B)  -> Trunk,
352      Cat(rd, toT, true.B)   -> Dirty,
353      Cat(wi, toT, false.B)  -> Trunk,
354      Cat(wi, toT, true.B)   -> Dirty,
355      Cat(wr, toT, false.B)  -> Dirty,
356      Cat(wr, toT, true.B)   -> Dirty))
357  }
358  val miss_new_coh = ClientMetadata(missCohGen(s3_req.cmd, s3_req.miss_param, s3_req.miss_dirty))
359
360  val new_coh = Mux(
361    miss_update_meta,
362    miss_new_coh,
363    Mux(
364      probe_update_meta,
365      probe_new_coh,
366      Mux(
367        store_update_meta || amo_update_meta,
368        s3_new_hit_coh,
369        ClientMetadata.onReset
370      )
371    )
372  )
373
374  // LR, SC and AMO
375  val debug_sc_fail_addr = RegInit(0.U)
376  val debug_sc_fail_cnt  = RegInit(0.U(8.W))
377
378  val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W))
379  val lrsc_valid = lrsc_count > lrscBackoff.U
380  val lrsc_addr  = Reg(UInt())
381  val s3_lr = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XLR
382  val s3_sc = !s3_req.probe && s3_req.isAMO && s3_req.cmd === M_XSC
383  val s3_lrsc_addr_match = lrsc_valid && lrsc_addr === get_block_addr(s3_req.addr)
384  val s3_sc_fail = s3_sc && !s3_lrsc_addr_match
385  val s3_sc_resp = Mux(s3_sc_fail, 1.U, 0.U)
386
387  val s3_can_do_amo = (s3_req.miss && !s3_req.probe && s3_req.source === AMO_SOURCE.U) || s3_amo_hit
388  val s3_can_do_amo_write = s3_can_do_amo && isWrite(s3_req.cmd) && !s3_sc_fail
389
390  when (s3_valid && (s3_lr || s3_sc)) {
391    when (s3_can_do_amo && s3_lr) {
392      lrsc_count := (lrscCycles - 1).U
393      lrsc_addr := get_block_addr(s3_req.addr)
394    } .otherwise {
395      lrsc_count := 0.U
396    }
397  } .elsewhen (lrsc_count > 0.U) {
398    lrsc_count := lrsc_count - 1.U
399  }
400
401  io.lrsc_locked_block.valid := lrsc_valid
402  io.lrsc_locked_block.bits  := lrsc_addr
403
404  // When we update update_resv_set, block all probe req in the next cycle
405  // It should give Probe reservation set addr compare an independent cycle,
406  // which will lead to better timing
407  io.update_resv_set := s3_valid && s3_lr && s3_can_do_amo
408
409  // when we release this block,
410  // we invalidate this reservation set
411  when (io.invalid_resv_set) {
412    lrsc_count := 0.U
413  }
414
415  when (s3_valid) {
416    when (s3_req.addr === debug_sc_fail_addr) {
417      when (s3_sc_fail) {
418        debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U
419      } .elsewhen (s3_sc) {
420        debug_sc_fail_cnt := 0.U
421      }
422    } .otherwise {
423      when (s3_sc_fail) {
424        debug_sc_fail_addr := s3_req.addr
425        debug_sc_fail_cnt  := 1.U
426      }
427    }
428  }
429  assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row")
430
431
432  val banked_amo_wmask = UIntToOH(s3_req.word_idx)
433//  val banked_wmask = s3_banked_store_wmask
434  val banked_wmask = Mux(
435    s3_req.miss,
436    banked_full_wmask,
437    Mux(
438      s3_store_hit,
439      s3_banked_store_wmask,
440      Mux(
441        s3_can_do_amo_write,
442        banked_amo_wmask,
443        banked_none_wmask
444      )
445    )
446  )
447  val update_data = banked_wmask.asUInt.orR
448
449  // generate write data
450  // AMO hits
451  val s3_s_amoalu = RegInit(false.B)
452  val do_amoalu = amo_wait_amoalu && s3_valid && !s3_s_amoalu
453  val amoalu   = Module(new AMOALU(wordBits))
454  amoalu.io.mask := s3_req.amo_mask
455  amoalu.io.cmd  := s3_req.cmd
456  amoalu.io.lhs  := s3_data_word
457  amoalu.io.rhs  := s3_req.amo_data
458
459  // merge amo write data
460//  val amo_bitmask = FillInterleaved(8, s3_req.amo_mask)
461  val s3_amo_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
462  val s3_sc_data_merged = Wire(Vec(DCacheBanks, UInt(DCacheSRAMRowBits.W)))
463  for (i <- 0 until DCacheBanks) {
464    val old_data = s3_store_data_merged(i)
465    val new_data = amoalu.io.out
466    val wmask = Mux(
467      s3_req.word_idx === i.U,
468      ~0.U(wordBytes.W),
469      0.U(wordBytes.W)
470    )
471    s3_amo_data_merged(i) := mergePutData(old_data, new_data, wmask)
472//    s3_sc_data_merged(i) := amo_bitmask & s3_req.amo_data | ~amo_bitmask & old_data
473    s3_sc_data_merged(i) := mergePutData(old_data, s3_req.amo_data,
474      Mux(s3_req.word_idx === i.U && !s3_sc_fail, s3_req.amo_mask, 0.U(wordBytes.W))
475    )
476  }
477  val s3_amo_data_merged_reg = RegEnable(s3_amo_data_merged, do_amoalu)
478  when(do_amoalu){
479    s3_s_amoalu := true.B
480  }
481
482  val miss_wb = s3_req.miss && s3_need_replacement && s3_coh.state =/= ClientStates.Nothing
483  val probe_wb = s3_req.probe
484  val replace_wb = s3_req.replace
485  val need_wb = miss_wb || probe_wb || replace_wb
486
487  val (_, miss_shrink_param, _) = s3_coh.onCacheControl(M_FLUSH)
488  val writeback_param = Mux(probe_wb, probe_shrink_param, miss_shrink_param)
489  val writeback_data = if (dcacheParameters.alwaysReleaseData) {
490    s3_tag_match && s3_req.probe && s3_req.probe_need_data ||
491      s3_coh === ClientStates.Dirty || (miss_wb || replace_wb) && s3_coh.state =/= ClientStates.Nothing
492  } else {
493    s3_tag_match && s3_req.probe && s3_req.probe_need_data || s3_coh === ClientStates.Dirty
494  }
495
496  val s3_probe_can_go = s3_req.probe && io.wb.ready && (io.meta_write.ready || !probe_update_meta)
497  val s3_store_can_go = s3_req.isStore && !s3_req.probe && (io.meta_write.ready || !store_update_meta) && (io.data_write.ready || !update_data)
498  val s3_amo_can_go = s3_amo_hit && (io.meta_write.ready || !amo_update_meta) && (io.data_write.ready || !update_data) && (s3_s_amoalu || !amo_wait_amoalu)
499  val s3_miss_can_go = s3_req.miss &&
500    (io.meta_write.ready || !amo_update_meta) &&
501    (io.data_write.ready || !update_data) &&
502    (s3_s_amoalu || !amo_wait_amoalu) &&
503    io.tag_write.ready &&
504    io.wb.ready
505  val s3_replace_nothing = s3_req.replace && s3_coh.state === ClientStates.Nothing
506  val s3_replace_can_go = s3_req.replace && (s3_replace_nothing || io.wb.ready)
507  val s3_can_go = s3_probe_can_go || s3_store_can_go || s3_amo_can_go || s3_miss_can_go || s3_replace_can_go
508  val s3_fire = s3_valid && s3_can_go
509  when (s2_fire_to_s3) {
510    s3_valid := true.B
511  }.elsewhen (s3_fire) {
512    s3_valid := false.B
513  }
514  s3_ready := !s3_valid || s3_can_go
515  s3_s0_set_conflict := s3_valid && s3_idx === s0_idx
516  assert(RegNext(!s3_valid || !(s3_req.isStore && !s3_req.probe) || s3_hit)) // miss store should never come to s3
517
518  when(s3_fire) {
519    s3_s_amoalu := false.B
520  }
521
522  req.ready := s0_can_go
523
524  io.meta_read.valid := req.valid && s1_ready && !set_conflict
525  io.meta_read.bits.idx := get_idx(s0_req.vaddr)
526  io.meta_read.bits.way_en := Mux(s0_req.replace, s0_req.replace_way_en, ~0.U(nWays.W))
527
528  io.tag_read.valid := req.valid && s1_ready && !set_conflict && !s0_req.replace
529  io.tag_read.bits.idx := get_idx(s0_req.vaddr)
530  io.tag_read.bits.way_en := ~0.U(nWays.W)
531
532  io.data_read.valid := s1_valid && s1_need_data && s2_ready
533  io.data_read.bits.rmask := s1_banked_rmask
534  io.data_read.bits.way_en := s1_way_en
535  io.data_read.bits.addr := s1_req.vaddr
536
537  io.miss_req.valid := s2_valid && s2_can_go_to_mq
538  val miss_req = io.miss_req.bits
539  miss_req := DontCare
540  miss_req.source := s2_req.source
541  miss_req.cmd := s2_req.cmd
542  miss_req.addr := s2_req.addr
543  miss_req.vaddr := s2_req.vaddr
544  miss_req.way_en := s2_way_en
545  miss_req.store_data := s2_req.store_data
546  miss_req.store_mask := s2_req.store_mask
547  miss_req.word_idx := s2_req.word_idx
548  miss_req.amo_data := s2_req.amo_data
549  miss_req.amo_mask := s2_req.amo_mask
550  miss_req.req_coh := s2_hit_coh
551  miss_req.replace_coh := s2_repl_coh
552  miss_req.replace_tag := s2_repl_tag
553  miss_req.id := s2_req.id
554  miss_req.cancel := false.B
555
556  io.store_replay_resp.valid := s2_valid && s2_can_go_to_mq && replay && s2_req.isStore
557  io.store_replay_resp.bits.data := DontCare
558  io.store_replay_resp.bits.miss := true.B
559  io.store_replay_resp.bits.replay := true.B
560  io.store_replay_resp.bits.id := s2_req.id
561
562  io.store_hit_resp.valid := s3_valid && s3_store_can_go
563  io.store_hit_resp.bits.data := DontCare
564  io.store_hit_resp.bits.miss := false.B
565  io.store_hit_resp.bits.replay := false.B
566  io.store_hit_resp.bits.id := s3_req.id
567
568  io.release_update.valid := s3_valid && (s3_store_can_go || s3_amo_can_go) && s3_hit && update_data
569  io.release_update.bits.addr := s3_req.addr
570  io.release_update.bits.mask := Mux(s3_store_hit, s3_banked_store_wmask, banked_amo_wmask)
571  io.release_update.bits.data := Mux(
572    amo_wait_amoalu,
573    s3_amo_data_merged_reg,
574    Mux(
575      s3_sc,
576      s3_sc_data_merged,
577      s3_store_data_merged
578    )
579  ).asUInt
580
581  val atomic_hit_resp = Wire(new AtomicsResp)
582  atomic_hit_resp.data := Mux(s3_sc, s3_sc_resp, s3_data_word)
583  atomic_hit_resp.miss := false.B
584  atomic_hit_resp.miss_id := s3_req.miss_id
585  atomic_hit_resp.replay := false.B
586  atomic_hit_resp.ack_miss_queue := s3_req.miss
587  atomic_hit_resp.id := lrsc_valid
588  val atomic_replay_resp = Wire(new AtomicsResp)
589  atomic_replay_resp.data := DontCare
590  atomic_replay_resp.miss := true.B
591  atomic_replay_resp.miss_id := DontCare
592  atomic_replay_resp.replay := true.B
593  atomic_replay_resp.ack_miss_queue := false.B
594  atomic_replay_resp.id := DontCare
595  val atomic_replay_resp_valid = s2_valid && s2_can_go_to_mq && replay && s2_req.isAMO
596  val atomic_hit_resp_valid = s3_valid && (s3_amo_can_go || s3_miss_can_go && s3_req.isAMO)
597  io.atomic_resp.valid := atomic_replay_resp_valid || atomic_hit_resp_valid
598  io.atomic_resp.bits := Mux(atomic_replay_resp_valid, atomic_replay_resp, atomic_hit_resp)
599
600  io.replace_resp.valid := s3_fire && s3_req.replace
601  io.replace_resp.bits := s3_req.miss_id
602
603  io.meta_write.valid := s3_fire && update_meta
604  io.meta_write.bits.idx := s3_idx
605  io.meta_write.bits.way_en := s3_way_en
606  io.meta_write.bits.tag := get_tag(s3_req.addr)
607  io.meta_write.bits.meta.coh := new_coh
608
609  io.tag_write.valid := s3_fire && s3_req.miss
610  io.tag_write.bits.idx := s3_idx
611  io.tag_write.bits.way_en := s3_way_en
612  io.tag_write.bits.tag := get_tag(s3_req.addr)
613
614  io.data_write.valid := s3_fire && update_data
615  io.data_write.bits.way_en := s3_way_en
616  io.data_write.bits.addr := s3_req.vaddr
617  io.data_write.bits.wmask := banked_wmask
618  io.data_write.bits.data := Mux(
619    amo_wait_amoalu,
620    s3_amo_data_merged_reg,
621    Mux(
622      s3_sc,
623      s3_sc_data_merged,
624      s3_store_data_merged
625    )
626  )
627  assert(RegNext(!io.meta_write.valid || !s3_req.replace))
628  assert(RegNext(!io.tag_write.valid || !s3_req.replace))
629  assert(RegNext(!io.data_write.valid || !s3_req.replace))
630
631  io.wb.valid := s3_valid && (
632    // replace
633    s3_req.replace && !s3_replace_nothing ||
634    // probe can go to wbq
635    s3_req.probe && (io.meta_write.ready || !probe_update_meta) ||
636      // amo miss can go to wbq
637      s3_req.miss &&
638        (io.meta_write.ready || !amo_update_meta) &&
639        (io.data_write.ready || !update_data) &&
640        (s3_s_amoalu || !amo_wait_amoalu) &&
641        io.tag_write.ready
642    ) && need_wb
643  io.wb.bits.addr := get_block_addr(Cat(s3_tag, get_untag(s3_req.vaddr)))
644  io.wb.bits.param := writeback_param
645  io.wb.bits.voluntary := s3_req.miss || s3_req.replace
646  io.wb.bits.hasData := writeback_data
647  io.wb.bits.dirty := s3_coh === ClientStates.Dirty
648  io.wb.bits.data := s3_data.asUInt()
649  io.wb.bits.delay_release := s3_req.replace
650  io.wb.bits.miss_id := s3_req.miss_id
651
652  io.replace_access.valid := RegNext(s1_fire && (s1_req.isAMO || s1_req.isStore) && !s1_req.probe && s1_tag_match)
653  io.replace_access.bits.set := s2_idx
654  io.replace_access.bits.way := RegNext(OHToUInt(s1_way_en))
655
656  io.replace_way.set.valid := RegNext(s0_fire)
657  io.replace_way.set.bits := s1_idx
658
659  // TODO: consider block policy of a finer granularity
660  io.status.s0_set.valid := req.valid
661  io.status.s0_set.bits := get_idx(s0_req.vaddr)
662  io.status.s1.valid := s1_valid
663  io.status.s1.bits.set := s1_idx
664  io.status.s1.bits.way_en := s1_way_en
665  io.status.s2.valid := s2_valid && !s2_req.replace
666  io.status.s2.bits.set := s2_idx
667  io.status.s2.bits.way_en := s2_way_en
668  io.status.s3.valid := s3_valid && !s3_req.replace
669  io.status.s3.bits.set := s3_idx
670  io.status.s3.bits.way_en := s3_way_en
671
672  val perfinfo = IO(new Bundle(){
673    val perfEvents = Output(new PerfEventsBundle(2))
674  })
675  val perfEvents = Seq(
676    ("dcache_mp_req                    ", s0_fire                                                                     ),
677    ("dcache_mp_total_penalty          ", (PopCount(VecInit(Seq(s0_fire, s1_valid, s2_valid, s3_valid))))             ),
678  )
679
680  for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) {
681    perf_out.incr_step := RegNext(perf)
682  }
683}
684