xref: /XiangShan/src/main/scala/xiangshan/cache/dcache/DCacheWrapper.scala (revision 5bd65c56355db1d4f5b92a3815df78273c01b892)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache
18
19import chisel3._
20import chisel3.experimental.ExtModule
21import chisel3.util._
22import coupledL2.VaddrField
23import coupledL2.IsKeywordField
24import coupledL2.IsKeywordKey
25import freechips.rocketchip.diplomacy._
26import freechips.rocketchip.tilelink._
27import freechips.rocketchip.util.BundleFieldBase
28import huancun.{AliasField, PrefetchField}
29import org.chipsalliance.cde.config.Parameters
30import utility._
31import utils._
32import xiangshan._
33import xiangshan.backend.Bundles.DynInst
34import xiangshan.backend.rob.RobDebugRollingIO
35import xiangshan.cache.wpu._
36import xiangshan.mem.{AddPipelineReg, HasL1PrefetchSourceParameter}
37import xiangshan.mem.prefetch._
38import xiangshan.mem.LqPtr
39
40// DCache specific parameters
41case class DCacheParameters
42(
43  nSets: Int = 128,
44  nWays: Int = 8,
45  rowBits: Int = 64,
46  tagECC: Option[String] = None,
47  dataECC: Option[String] = None,
48  replacer: Option[String] = Some("setplru"),
49  updateReplaceOn2ndmiss: Boolean = true,
50  nMissEntries: Int = 1,
51  nProbeEntries: Int = 1,
52  nReleaseEntries: Int = 1,
53  nMMIOEntries: Int = 1,
54  nMMIOs: Int = 1,
55  blockBytes: Int = 64,
56  nMaxPrefetchEntry: Int = 1,
57  alwaysReleaseData: Boolean = false,
58  isKeywordBitsOpt: Option[Boolean] = Some(true),
59  enableDataEcc: Boolean = false,
60  enableTagEcc: Boolean = false,
61  cacheCtrlAddressOpt: Option[AddressSet] = None,
62) extends L1CacheParameters {
63  // if sets * blockBytes > 4KB(page size),
64  // cache alias will happen,
65  // we need to avoid this by recoding additional bits in L2 cache
66  val setBytes = nSets * blockBytes
67  val aliasBitsOpt = if(setBytes > pageSize) Some(log2Ceil(setBytes / pageSize)) else None
68
69  def tagCode: Code = Code.fromString(tagECC)
70
71  def dataCode: Code = Code.fromString(dataECC)
72}
73
74//           Physical Address
75// --------------------------------------
76// |   Physical Tag |  PIndex  | Offset |
77// --------------------------------------
78//                  |
79//                  DCacheTagOffset
80//
81//           Virtual Address
82// --------------------------------------
83// | Above index  | Set | Bank | Offset |
84// --------------------------------------
85//                |     |      |        |
86//                |     |      |        0
87//                |     |      DCacheBankOffset
88//                |     DCacheSetOffset
89//                DCacheAboveIndexOffset
90
91// Default DCache size = 64 sets * 8 ways * 8 banks * 8 Byte = 32K Byte
92
93trait HasDCacheParameters extends HasL1CacheParameters with HasL1PrefetchSourceParameter{
94  val cacheParams = dcacheParameters
95  val cfg = cacheParams
96
97  def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant
98
99  def nSourceType = 10
100  def sourceTypeWidth = log2Up(nSourceType)
101  // non-prefetch source < 3
102  def LOAD_SOURCE = 0
103  def STORE_SOURCE = 1
104  def AMO_SOURCE = 2
105  // prefetch source >= 3
106  def DCACHE_PREFETCH_SOURCE = 3
107  def SOFT_PREFETCH = 4
108  // the following sources are only used inside SMS
109  def HW_PREFETCH_AGT = 5
110  def HW_PREFETCH_PHT_CUR = 6
111  def HW_PREFETCH_PHT_INC = 7
112  def HW_PREFETCH_PHT_DEC = 8
113  def HW_PREFETCH_BOP = 9
114  def HW_PREFETCH_STRIDE = 10
115
116  def BLOOM_FILTER_ENTRY_NUM = 4096
117
118  // each source use a id to distinguish its multiple reqs
119  def reqIdWidth = log2Up(nEntries) max log2Up(StoreBufferSize)
120
121  require(isPow2(cfg.nMissEntries)) // TODO
122  // require(isPow2(cfg.nReleaseEntries))
123  require(cfg.nMissEntries < cfg.nReleaseEntries)
124  val nEntries = cfg.nMissEntries + cfg.nReleaseEntries + 1 // nMissEntries + nReleaseEntries + 1CMO_Entry
125  val releaseIdBase = cfg.nMissEntries + 1
126  val EnableDataEcc = cacheParams.enableDataEcc
127  val EnableTagEcc = cacheParams.enableTagEcc
128
129  // banked dcache support
130  val DCacheSetDiv = 1
131  val DCacheSets = cacheParams.nSets
132  val DCacheWays = cacheParams.nWays
133  val DCacheBanks = 8 // hardcoded
134  val DCacheDupNum = 16
135  val DCacheSRAMRowBits = cacheParams.rowBits // hardcoded
136  val DCacheWordBits = 64 // hardcoded
137  val DCacheWordBytes = DCacheWordBits / 8
138  val MaxPrefetchEntry = cacheParams.nMaxPrefetchEntry
139  val DCacheVWordBytes = VLEN / 8
140  require(DCacheSRAMRowBits == 64)
141
142  val DCacheSetDivBits = log2Ceil(DCacheSetDiv)
143  val DCacheSetBits = log2Ceil(DCacheSets)
144  val DCacheSizeBits = DCacheSRAMRowBits * DCacheBanks * DCacheWays * DCacheSets
145  val DCacheSizeBytes = DCacheSizeBits / 8
146  val DCacheSizeWords = DCacheSizeBits / 64 // TODO
147
148  val DCacheSameVPAddrLength = 12
149
150  val DCacheSRAMRowBytes = DCacheSRAMRowBits / 8
151  val DCacheWordOffset = log2Up(DCacheWordBytes)
152  val DCacheVWordOffset = log2Up(DCacheVWordBytes)
153
154  val DCacheBankOffset = log2Up(DCacheSRAMRowBytes)
155  val DCacheSetOffset = DCacheBankOffset + log2Up(DCacheBanks)
156  val DCacheAboveIndexOffset = DCacheSetOffset + log2Up(DCacheSets)
157  val DCacheTagOffset = DCacheAboveIndexOffset min DCacheSameVPAddrLength
158  val DCacheLineOffset = DCacheSetOffset
159
160  def encWordBits = cacheParams.dataCode.width(wordBits)
161  def encRowBits  = encWordBits * rowWords // for DuplicatedDataArray only
162  def eccBits     = encWordBits - wordBits
163
164  def encTagBits = if (EnableTagEcc) cacheParams.tagCode.width(tagBits) else tagBits
165  def tagECCBits = encTagBits - tagBits
166
167  def encDataBits = if (EnableDataEcc) cacheParams.dataCode.width(DCacheSRAMRowBits) else DCacheSRAMRowBits
168  def dataECCBits = encDataBits - DCacheSRAMRowBits
169
170  // L1 DCache controller
171  val cacheCtrlParamsOpt  = OptionWrapper(
172                              cacheParams.cacheCtrlAddressOpt.nonEmpty,
173                              L1CacheCtrlParams(cacheParams.cacheCtrlAddressOpt.get)
174                            )
175  // uncache
176  val uncacheIdxBits = log2Up(VirtualLoadQueueMaxStoreQueueSize + 1)
177  // hardware prefetch parameters
178  // high confidence hardware prefetch port
179  val HighConfHWPFLoadPort = LoadPipelineWidth - 1 // use the last load port by default
180  val IgnorePrefetchConfidence = false
181
182  // parameters about duplicating regs to solve fanout
183  // In Main Pipe:
184    // tag_write.ready -> data_write.valid * 8 banks
185    // tag_write.ready -> meta_write.valid
186    // tag_write.ready -> tag_write.valid
187    // tag_write.ready -> err_write.valid
188    // tag_write.ready -> wb.valid
189  val nDupTagWriteReady = DCacheBanks + 4
190  // In Main Pipe:
191    // data_write.ready -> data_write.valid * 8 banks
192    // data_write.ready -> meta_write.valid
193    // data_write.ready -> tag_write.valid
194    // data_write.ready -> err_write.valid
195    // data_write.ready -> wb.valid
196  val nDupDataWriteReady = DCacheBanks + 4
197  val nDupWbReady = DCacheBanks + 4
198  val nDupStatus = nDupTagWriteReady + nDupDataWriteReady
199  val dataWritePort = 0
200  val metaWritePort = DCacheBanks
201  val tagWritePort = metaWritePort + 1
202  val errWritePort = tagWritePort + 1
203  val wbPort = errWritePort + 1
204
205  def set_to_dcache_div(set: UInt) = {
206    require(set.getWidth >= DCacheSetBits)
207    if (DCacheSetDivBits == 0) 0.U else set(DCacheSetDivBits-1, 0)
208  }
209
210  def set_to_dcache_div_set(set: UInt) = {
211    require(set.getWidth >= DCacheSetBits)
212    set(DCacheSetBits - 1, DCacheSetDivBits)
213  }
214
215  def addr_to_dcache_bank(addr: UInt) = {
216    require(addr.getWidth >= DCacheSetOffset)
217    addr(DCacheSetOffset-1, DCacheBankOffset)
218  }
219
220  def addr_to_dcache_div(addr: UInt) = {
221    require(addr.getWidth >= DCacheAboveIndexOffset)
222    if(DCacheSetDivBits == 0) 0.U else addr(DCacheSetOffset + DCacheSetDivBits - 1, DCacheSetOffset)
223  }
224
225  def addr_to_dcache_div_set(addr: UInt) = {
226    require(addr.getWidth >= DCacheAboveIndexOffset)
227    addr(DCacheAboveIndexOffset - 1, DCacheSetOffset + DCacheSetDivBits)
228  }
229
230  def addr_to_dcache_set(addr: UInt) = {
231    require(addr.getWidth >= DCacheAboveIndexOffset)
232    addr(DCacheAboveIndexOffset-1, DCacheSetOffset)
233  }
234
235  def get_data_of_bank(bank: Int, data: UInt) = {
236    require(data.getWidth >= (bank+1)*DCacheSRAMRowBits)
237    data(DCacheSRAMRowBits * (bank + 1) - 1, DCacheSRAMRowBits * bank)
238  }
239
240  def get_mask_of_bank(bank: Int, data: UInt) = {
241    require(data.getWidth >= (bank+1)*DCacheSRAMRowBytes)
242    data(DCacheSRAMRowBytes * (bank + 1) - 1, DCacheSRAMRowBytes * bank)
243  }
244
245  def get_alias(vaddr: UInt): UInt ={
246    // require(blockOffBits + idxBits > pgIdxBits)
247    if(blockOffBits + idxBits > pgIdxBits){
248      vaddr(blockOffBits + idxBits - 1, pgIdxBits)
249    }else{
250      0.U
251    }
252  }
253
254  def is_alias_match(vaddr0: UInt, vaddr1: UInt): Bool = {
255    require(vaddr0.getWidth == VAddrBits && vaddr1.getWidth == VAddrBits)
256    if(blockOffBits + idxBits > pgIdxBits) {
257      vaddr0(blockOffBits + idxBits - 1, pgIdxBits) === vaddr1(blockOffBits + idxBits - 1, pgIdxBits)
258    }else {
259      // no alias problem
260      true.B
261    }
262  }
263
264  def get_direct_map_way(addr:UInt): UInt = {
265    addr(DCacheAboveIndexOffset + log2Up(DCacheWays) - 1, DCacheAboveIndexOffset)
266  }
267
268  def arbiter[T <: Bundle](
269    in: Seq[DecoupledIO[T]],
270    out: DecoupledIO[T],
271    name: Option[String] = None): Unit = {
272    val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size))
273    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
274    for ((a, req) <- arb.io.in.zip(in)) {
275      a <> req
276    }
277    out <> arb.io.out
278  }
279
280  def arbiter_with_pipereg[T <: Bundle](
281    in: Seq[DecoupledIO[T]],
282    out: DecoupledIO[T],
283    name: Option[String] = None): Unit = {
284    val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size))
285    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
286    for ((a, req) <- arb.io.in.zip(in)) {
287      a <> req
288    }
289    AddPipelineReg(arb.io.out, out, false.B)
290  }
291
292  def arbiter_with_pipereg_N_dup[T <: Bundle](
293    in: Seq[DecoupledIO[T]],
294    out: DecoupledIO[T],
295    dups: Seq[DecoupledIO[T]],
296    name: Option[String] = None): Unit = {
297    val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size))
298    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
299    for ((a, req) <- arb.io.in.zip(in)) {
300      a <> req
301    }
302    for (dup <- dups) {
303      AddPipelineReg(arb.io.out, dup, false.B)
304    }
305    AddPipelineReg(arb.io.out, out, false.B)
306  }
307
308  def rrArbiter[T <: Bundle](
309    in: Seq[DecoupledIO[T]],
310    out: DecoupledIO[T],
311    name: Option[String] = None): Unit = {
312    val arb = Module(new RRArbiter[T](chiselTypeOf(out.bits), in.size))
313    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
314    for ((a, req) <- arb.io.in.zip(in)) {
315      a <> req
316    }
317    out <> arb.io.out
318  }
319
320  def fastArbiter[T <: Bundle](
321    in: Seq[DecoupledIO[T]],
322    out: DecoupledIO[T],
323    name: Option[String] = None): Unit = {
324    val arb = Module(new FastArbiter[T](chiselTypeOf(out.bits), in.size))
325    if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") }
326    for ((a, req) <- arb.io.in.zip(in)) {
327      a <> req
328    }
329    out <> arb.io.out
330  }
331
332  val numReplaceRespPorts = 2
333
334  require(isPow2(nSets), s"nSets($nSets) must be pow2")
335  require(isPow2(nWays), s"nWays($nWays) must be pow2")
336  require(full_divide(rowBits, wordBits), s"rowBits($rowBits) must be multiple of wordBits($wordBits)")
337  require(full_divide(beatBits, rowBits), s"beatBits($beatBits) must be multiple of rowBits($rowBits)")
338}
339
340abstract class DCacheModule(implicit p: Parameters) extends L1CacheModule
341  with HasDCacheParameters
342
343abstract class DCacheBundle(implicit p: Parameters) extends L1CacheBundle
344  with HasDCacheParameters
345
346class ReplacementAccessBundle(implicit p: Parameters) extends DCacheBundle {
347  val set = UInt(log2Up(nSets).W)
348  val way = UInt(log2Up(nWays).W)
349}
350
351class ReplacementWayReqIO(implicit p: Parameters) extends DCacheBundle {
352  val set = ValidIO(UInt(log2Up(nSets).W))
353  val dmWay = Output(UInt(log2Up(nWays).W))
354  val way = Input(UInt(log2Up(nWays).W))
355}
356
357class DCacheExtraMeta(implicit p: Parameters) extends DCacheBundle
358{
359  val error = Bool() // cache line has been marked as corrupted by l2 / ecc error detected when store
360  val prefetch = UInt(L1PfSourceBits.W) // cache line is first required by prefetch
361  val access = Bool() // cache line has been accessed by load / store
362
363  // val debug_access_timestamp = UInt(64.W) // last time a load / store / refill access that cacheline
364}
365
366// memory request in word granularity(load, mmio, lr/sc, atomics)
367class DCacheWordReq(implicit p: Parameters) extends DCacheBundle
368{
369  val cmd    = UInt(M_SZ.W)
370  val vaddr  = UInt(VAddrBits.W)
371  val data   = UInt(VLEN.W)
372  val mask   = UInt((VLEN/8).W)
373  val id     = UInt(reqIdWidth.W)
374  val instrtype   = UInt(sourceTypeWidth.W)
375  val isFirstIssue = Bool()
376  val replayCarry = new ReplayCarry(nWays)
377  val lqIdx = new LqPtr
378
379  val debug_robIdx = UInt(log2Ceil(RobSize).W)
380  def dump(cond: Bool) = {
381    XSDebug(cond, "DCacheWordReq: cmd: %x vaddr: %x data: %x mask: %x id: %d\n",
382      cmd, vaddr, data, mask, id)
383  }
384}
385
386// memory request in word granularity(store)
387class DCacheLineReq(implicit p: Parameters) extends DCacheBundle
388{
389  val cmd    = UInt(M_SZ.W)
390  val vaddr  = UInt(VAddrBits.W)
391  val addr   = UInt(PAddrBits.W)
392  val data   = UInt((cfg.blockBytes * 8).W)
393  val mask   = UInt(cfg.blockBytes.W)
394  val id     = UInt(reqIdWidth.W)
395  def dump(cond: Bool) = {
396    XSDebug(cond, "DCacheLineReq: cmd: %x addr: %x data: %x mask: %x id: %d\n",
397      cmd, addr, data, mask, id)
398  }
399  def idx: UInt = get_idx(vaddr)
400}
401
402class DCacheWordReqWithVaddr(implicit p: Parameters) extends DCacheWordReq {
403  val addr = UInt(PAddrBits.W)
404  val wline = Bool()
405}
406
407class DCacheWordReqWithVaddrAndPfFlag(implicit p: Parameters) extends DCacheWordReqWithVaddr {
408  val prefetch = Bool()
409  val vecValid = Bool()
410  val sqNeedDeq = Bool()
411
412  def toDCacheWordReqWithVaddr() = {
413    val res = Wire(new DCacheWordReqWithVaddr)
414    res.vaddr := vaddr
415    res.wline := wline
416    res.cmd := cmd
417    res.addr := addr
418    res.data := data
419    res.mask := mask
420    res.id := id
421    res.instrtype := instrtype
422    res.replayCarry := replayCarry
423    res.isFirstIssue := isFirstIssue
424    res.debug_robIdx := debug_robIdx
425
426    res
427  }
428}
429
430class BaseDCacheWordResp(implicit p: Parameters) extends DCacheBundle
431{
432  // read in s2
433  val data = UInt(VLEN.W)
434  // select in s3
435  val data_delayed = UInt(VLEN.W)
436  val id     = UInt(reqIdWidth.W)
437  // cache req missed, send it to miss queue
438  val miss   = Bool()
439  // cache miss, and failed to enter the missqueue, replay from RS is needed
440  val replay = Bool()
441  val replayCarry = new ReplayCarry(nWays)
442  // data has been corrupted
443  val tag_error = Bool() // tag error
444  val mshr_id = UInt(log2Up(cfg.nMissEntries).W)
445
446  val debug_robIdx = UInt(log2Ceil(RobSize).W)
447  def dump(cond: Bool) = {
448    XSDebug(cond, "DCacheWordResp: data: %x id: %d miss: %b replay: %b\n",
449      data, id, miss, replay)
450  }
451}
452
453class DCacheWordResp(implicit p: Parameters) extends BaseDCacheWordResp
454{
455  val meta_prefetch = UInt(L1PfSourceBits.W)
456  val meta_access = Bool()
457  // s2
458  val handled = Bool()
459  val real_miss = Bool()
460  // s3: 1 cycle after data resp
461  val error_delayed = Bool() // all kinds of errors, include tag error
462  val replacementUpdated = Bool()
463}
464
465class BankedDCacheWordResp(implicit p: Parameters) extends DCacheWordResp
466{
467  val bank_data = Vec(DCacheBanks, Bits(DCacheSRAMRowBits.W))
468  val bank_oh = UInt(DCacheBanks.W)
469}
470
471class DCacheWordRespWithError(implicit p: Parameters) extends BaseDCacheWordResp
472{
473  val error = Bool() // all kinds of errors, include tag error
474  val nderr = Bool()
475}
476
477class DCacheLineResp(implicit p: Parameters) extends DCacheBundle
478{
479  val data   = UInt((cfg.blockBytes * 8).W)
480  // cache req missed, send it to miss queue
481  val miss   = Bool()
482  // cache req nacked, replay it later
483  val replay = Bool()
484  val id     = UInt(reqIdWidth.W)
485  def dump(cond: Bool) = {
486    XSDebug(cond, "DCacheLineResp: data: %x id: %d miss: %b replay: %b\n",
487      data, id, miss, replay)
488  }
489}
490
491class Refill(implicit p: Parameters) extends DCacheBundle
492{
493  val addr   = UInt(PAddrBits.W)
494  val data   = UInt(l1BusDataWidth.W)
495  val error  = Bool() // refilled data has been corrupted
496  // for debug usage
497  val data_raw = UInt((cfg.blockBytes * 8).W)
498  val hasdata = Bool()
499  val refill_done = Bool()
500  def dump(cond: Bool) = {
501    XSDebug(cond, "Refill: addr: %x data: %x\n", addr, data)
502  }
503  val id     = UInt(log2Up(cfg.nMissEntries).W)
504}
505
506class Release(implicit p: Parameters) extends DCacheBundle
507{
508  val paddr  = UInt(PAddrBits.W)
509  def dump(cond: Bool) = {
510    XSDebug(cond, "Release: paddr: %x\n", paddr(PAddrBits-1, DCacheTagOffset))
511  }
512}
513
514class DCacheWordIO(implicit p: Parameters) extends DCacheBundle
515{
516  val req  = DecoupledIO(new DCacheWordReq)
517  val resp = Flipped(DecoupledIO(new DCacheWordResp))
518}
519
520
521class UncacheWordReq(implicit p: Parameters) extends DCacheBundle
522{
523  val cmd  = UInt(M_SZ.W)
524  val addr = UInt(PAddrBits.W)
525  val vaddr = UInt(VAddrBits.W) // for uncache buffer forwarding
526  val data = UInt(XLEN.W)
527  val mask = UInt((XLEN/8).W)
528  val id   = UInt(uncacheIdxBits.W)
529  val instrtype = UInt(sourceTypeWidth.W)
530  val atomic = Bool()
531  val nc = Bool()
532  val memBackTypeMM = Bool()
533  val isFirstIssue = Bool()
534  val replayCarry = new ReplayCarry(nWays)
535
536  def dump(cond: Bool) = {
537    XSDebug(cond, "UncacheWordReq: cmd: %x addr: %x data: %x mask: %x id: %d\n",
538      cmd, addr, data, mask, id)
539  }
540}
541
542class UncacheWordResp(implicit p: Parameters) extends DCacheBundle
543{
544  val data      = UInt(XLEN.W)
545  val data_delayed = UInt(XLEN.W)
546  val id        = UInt(uncacheIdxBits.W) // resp identified signals
547  val nc        = Bool() // resp identified signals
548  val is2lq     = Bool() // resp identified signals
549  val miss      = Bool()
550  val replay    = Bool()
551  val tag_error = Bool()
552  val error     = Bool()
553  val nderr     = Bool()
554  val replayCarry = new ReplayCarry(nWays)
555  val mshr_id = UInt(log2Up(cfg.nMissEntries).W)  // FIXME: why uncacheWordResp is not merged to baseDcacheResp
556
557  val debug_robIdx = UInt(log2Ceil(RobSize).W)
558  def dump(cond: Bool) = {
559    XSDebug(cond, "UncacheWordResp: data: %x id: %d miss: %b replay: %b, tag_error: %b, error: %b\n",
560      data, id, miss, replay, tag_error, error)
561  }
562}
563
564class UncacheWordIO(implicit p: Parameters) extends DCacheBundle
565{
566  val req  = DecoupledIO(new UncacheWordReq)
567  val resp = Flipped(DecoupledIO(new UncacheWordResp))
568}
569
570class MainPipeResp(implicit p: Parameters) extends DCacheBundle {
571  //distinguish amo
572  val source  = UInt(sourceTypeWidth.W)
573  val data    = UInt(QuadWordBits.W)
574  val miss    = Bool()
575  val miss_id = UInt(log2Up(cfg.nMissEntries).W)
576  val replay  = Bool()
577  val error   = Bool()
578
579  val ack_miss_queue = Bool()
580
581  val id     = UInt(reqIdWidth.W)
582
583  def isAMO: Bool = source === AMO_SOURCE.U
584  def isStore: Bool = source === STORE_SOURCE.U
585}
586
587class AtomicWordIO(implicit p: Parameters) extends DCacheBundle
588{
589  val req  = DecoupledIO(new MainPipeReq)
590  val resp = Flipped(ValidIO(new MainPipeResp))
591  val block_lr = Input(Bool())
592}
593
594class CMOReq(implicit p: Parameters) extends Bundle {
595  val opcode = UInt(3.W)   // 0-cbo.clean, 1-cbo.flush, 2-cbo.inval, 3-cbo.zero
596  val address = UInt(64.W)
597}
598
599class CMOResp(implicit p: Parameters) extends Bundle {
600  val address = UInt(64.W)
601}
602
603// used by load unit
604class DCacheLoadIO(implicit p: Parameters) extends DCacheWordIO
605{
606  // kill previous cycle's req
607  val s1_kill_data_read = Output(Bool()) // only kill bandedDataRead at s1
608  val s1_kill           = Output(Bool()) // kill loadpipe req at s1
609  val s2_kill           = Output(Bool())
610  val s0_pc             = Output(UInt(VAddrBits.W))
611  val s1_pc             = Output(UInt(VAddrBits.W))
612  val s2_pc             = Output(UInt(VAddrBits.W))
613  // cycle 0: load has updated replacement before
614  val replacementUpdated = Output(Bool())
615  val is128Req = Bool()
616  // cycle 0: prefetch source bits
617  val pf_source = Output(UInt(L1PfSourceBits.W))
618  // cycle0: load microop
619 // val s0_uop = Output(new MicroOp)
620  // cycle 0: virtual address: req.addr
621  // cycle 1: physical address: s1_paddr
622  val s1_paddr_dup_lsu = Output(UInt(PAddrBits.W)) // lsu side paddr
623  val s1_paddr_dup_dcache = Output(UInt(PAddrBits.W)) // dcache side paddr
624  val s1_disable_fast_wakeup = Input(Bool())
625  // cycle 2: hit signal
626  val s2_hit = Input(Bool()) // hit signal for lsu,
627  val s2_first_hit = Input(Bool())
628  val s2_bank_conflict = Input(Bool())
629  val s2_wpu_pred_fail = Input(Bool())
630  val s2_mq_nack = Input(Bool())
631
632  // debug
633  val debug_s1_hit_way = Input(UInt(nWays.W))
634  val debug_s2_pred_way_num = Input(UInt(XLEN.W))
635  val debug_s2_dm_way_num = Input(UInt(XLEN.W))
636  val debug_s2_real_way_num = Input(UInt(XLEN.W))
637}
638
639class DCacheLineIO(implicit p: Parameters) extends DCacheBundle
640{
641  val req  = DecoupledIO(new DCacheLineReq)
642  val resp = Flipped(DecoupledIO(new DCacheLineResp))
643}
644
645class DCacheToSbufferIO(implicit p: Parameters) extends DCacheBundle {
646  // sbuffer will directly send request to dcache main pipe
647  val req = Flipped(Decoupled(new DCacheLineReq))
648
649  val main_pipe_hit_resp = ValidIO(new DCacheLineResp)
650  //val refill_hit_resp = ValidIO(new DCacheLineResp)
651
652  val replay_resp = ValidIO(new DCacheLineResp)
653
654  //def hit_resps: Seq[ValidIO[DCacheLineResp]] = Seq(main_pipe_hit_resp, refill_hit_resp)
655  def hit_resps: Seq[ValidIO[DCacheLineResp]] = Seq(main_pipe_hit_resp)
656}
657
658// forward tilelink channel D's data to ldu
659class DcacheToLduForwardIO(implicit p: Parameters) extends DCacheBundle {
660  val valid = Bool()
661  val data = UInt(l1BusDataWidth.W)
662  val mshrid = UInt(log2Up(cfg.nMissEntries).W)
663  val last = Bool()
664  val corrupt = Bool()
665
666  def apply(d: DecoupledIO[TLBundleD], edge: TLEdgeOut) = {
667    val isKeyword = d.bits.echo.lift(IsKeywordKey).getOrElse(false.B)
668    val (_, _, done, _) = edge.count(d)
669    valid := d.valid
670    data := d.bits.data
671    mshrid := d.bits.source
672    last := isKeyword ^ done
673    corrupt := d.bits.corrupt || d.bits.denied
674  }
675
676  def dontCare() = {
677    valid := false.B
678    data := DontCare
679    mshrid := DontCare
680    last := DontCare
681    corrupt := false.B
682  }
683
684  def forward(req_valid : Bool, req_mshr_id : UInt, req_paddr : UInt) = {
685    val all_match = req_valid && valid &&
686                req_mshr_id === mshrid &&
687                req_paddr(log2Up(refillBytes)) === last
688    val forward_D = RegInit(false.B)
689    val forwardData = RegInit(VecInit(List.fill(VLEN/8)(0.U(8.W))))
690
691    val block_idx = req_paddr(log2Up(refillBytes) - 1, 3)
692    val block_data = Wire(Vec(l1BusDataWidth / 64, UInt(64.W)))
693    (0 until l1BusDataWidth / 64).map(i => {
694      block_data(i) := data(64 * i + 63, 64 * i)
695    })
696    val selected_data = Wire(UInt(128.W))
697    selected_data := Mux(req_paddr(3), Fill(2, block_data(block_idx)), Cat(block_data(block_idx + 1.U), block_data(block_idx)))
698
699    forward_D := all_match
700    for (i <- 0 until VLEN/8) {
701      when (all_match) {
702        forwardData(i) := selected_data(8 * i + 7, 8 * i)
703      }
704    }
705
706    (forward_D, forwardData, corrupt)
707  }
708}
709
710class MissEntryForwardIO(implicit p: Parameters) extends DCacheBundle {
711  val inflight = Bool()
712  val paddr = UInt(PAddrBits.W)
713  val raw_data = Vec(blockRows, UInt(rowBits.W))
714  val firstbeat_valid = Bool()
715  val lastbeat_valid = Bool()
716  val corrupt = Bool()
717
718  // check if we can forward from mshr or D channel
719  def check(req_valid : Bool, req_paddr : UInt) = {
720    RegNext(req_valid && inflight && req_paddr(PAddrBits - 1, blockOffBits) === paddr(PAddrBits - 1, blockOffBits)) // TODO: clock gate(1-bit)
721  }
722
723  def forward(req_valid : Bool, req_paddr : UInt) = {
724    val all_match = (req_paddr(log2Up(refillBytes)) === 0.U && firstbeat_valid) ||
725                    (req_paddr(log2Up(refillBytes)) === 1.U && lastbeat_valid)
726
727    val forward_mshr = RegInit(false.B)
728    val forwardData = RegInit(VecInit(List.fill(VLEN/8)(0.U(8.W))))
729
730    val block_idx = req_paddr(log2Up(refillBytes), 3)
731    val block_data = raw_data
732
733    val selected_data = Wire(UInt(128.W))
734    selected_data := Mux(req_paddr(3), Fill(2, block_data(block_idx)), Cat(block_data(block_idx + 1.U), block_data(block_idx)))
735
736    forward_mshr := all_match
737    for (i <- 0 until VLEN/8) {
738      forwardData(i) := selected_data(8 * i + 7, 8 * i)
739    }
740
741    (forward_mshr, forwardData)
742  }
743}
744
745// forward mshr's data to ldu
746class LduToMissqueueForwardIO(implicit p: Parameters) extends DCacheBundle {
747  // TODO: use separate Bundles for req and resp
748  // req
749  val valid = Input(Bool())
750  val mshrid = Input(UInt(log2Up(cfg.nMissEntries).W))
751  val paddr = Input(UInt(PAddrBits.W))
752  // resp
753  val forward_mshr = Output(Bool())
754  val forwardData = Output(Vec(VLEN/8, UInt(8.W)))
755  val forward_result_valid = Output(Bool())
756  val corrupt = Output(Bool())
757
758  // Why? What is the purpose of `connect`???
759  def connect(sink: LduToMissqueueForwardIO) = {
760    sink.valid := valid
761    sink.mshrid := mshrid
762    sink.paddr := paddr
763    forward_mshr := sink.forward_mshr
764    forwardData := sink.forwardData
765    forward_result_valid := sink.forward_result_valid
766    corrupt := sink.corrupt
767  }
768
769  def forward() = {
770    (forward_result_valid, forward_mshr, forwardData, corrupt)
771  }
772}
773
774class StorePrefetchReq(implicit p: Parameters) extends DCacheBundle {
775  val paddr = UInt(PAddrBits.W)
776  val vaddr = UInt(VAddrBits.W)
777}
778
779class DCacheToLsuIO(implicit p: Parameters) extends DCacheBundle {
780  val load  = Vec(LoadPipelineWidth, Flipped(new DCacheLoadIO)) // for speculative load
781  val sta   = Vec(StorePipelineWidth, Flipped(new DCacheStoreIO)) // for non-blocking store
782  //val lsq = ValidIO(new Refill)  // refill to load queue, wake up load misses
783  val tl_d_channel = Output(new DcacheToLduForwardIO)
784  val store = new DCacheToSbufferIO // for sbuffer
785  val atomics  = Flipped(new AtomicWordIO)  // atomics reqs
786  val release = ValidIO(new Release) // cacheline release hint for ld-ld violation check
787  val forward_D = Output(Vec(LoadPipelineWidth, new DcacheToLduForwardIO))
788  val forward_mshr = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO)
789}
790
791class DCacheTopDownIO(implicit p: Parameters) extends DCacheBundle {
792  val robHeadVaddr = Flipped(Valid(UInt(VAddrBits.W)))
793  val robHeadMissInDCache = Output(Bool())
794  val robHeadOtherReplay = Input(Bool())
795}
796
797class DCacheIO(implicit p: Parameters) extends DCacheBundle {
798  val hartId = Input(UInt(hartIdLen.W))
799  val l2_pf_store_only = Input(Bool())
800  val lsu = new DCacheToLsuIO
801  val error = ValidIO(new L1CacheErrorInfo)
802  val mshrFull = Output(Bool())
803  val memSetPattenDetected = Output(Bool())
804  val lqEmpty = Input(Bool())
805  val pf_ctrl = Output(new PrefetchControlBundle)
806  val force_write = Input(Bool())
807  val sms_agt_evict_req = DecoupledIO(new AGTEvictReq)
808  val debugTopDown = new DCacheTopDownIO
809  val debugRolling = Flipped(new RobDebugRollingIO)
810  val l2_hint = Input(Valid(new L2ToL1Hint()))
811  val cmoOpReq = Flipped(DecoupledIO(new CMOReq))
812  val cmoOpResp = DecoupledIO(new CMOResp)
813}
814
815private object ArbiterCtrl {
816  def apply(request: Seq[Bool]): Seq[Bool] = request.length match {
817    case 0 => Seq()
818    case 1 => Seq(true.B)
819    case _ => true.B +: request.tail.init.scanLeft(request.head)(_ || _).map(!_)
820  }
821}
822
823class TreeArbiter[T <: MissReqWoStoreData](val gen: T, val n: Int) extends Module{
824  val io = IO(new ArbiterIO(gen, n))
825
826  def selectTree(in: Vec[Valid[T]], sIdx: UInt): Tuple2[UInt, T] = {
827    if (in.length == 1) {
828      (sIdx, in(0).bits)
829    } else if (in.length == 2) {
830      (
831        Mux(in(0).valid, sIdx, sIdx + 1.U),
832        Mux(in(0).valid, in(0).bits, in(1).bits)
833      )
834    } else {
835      val half = in.length / 2
836      val leftValid = in.slice(0, half).map(_.valid).reduce(_ || _)
837      val (leftIdx, leftSel) = selectTree(VecInit(in.slice(0, half)), sIdx)
838      val (rightIdx, rightSel) = selectTree(VecInit(in.slice(half, in.length)), sIdx + half.U)
839      (
840        Mux(leftValid, leftIdx, rightIdx),
841        Mux(leftValid, leftSel, rightSel)
842      )
843    }
844  }
845  val ins = Wire(Vec(n, Valid(gen)))
846  for (i <- 0 until n) {
847    ins(i).valid := io.in(i).valid
848    ins(i).bits  := io.in(i).bits
849  }
850  val (idx, sel) = selectTree(ins, 0.U)
851  // NOTE: io.chosen is very slow, dont use it
852  io.chosen := idx
853  io.out.bits := sel
854
855  val grant = ArbiterCtrl(io.in.map(_.valid))
856  for ((in, g) <- io.in.zip(grant))
857    in.ready := g && io.out.ready
858  io.out.valid := !grant.last || io.in.last.valid
859}
860
861class DCacheMEQueryIOBundle(implicit p: Parameters) extends DCacheBundle
862{
863  val req              = ValidIO(new MissReqWoStoreData)
864  val primary_ready    = Input(Bool())
865  val secondary_ready  = Input(Bool())
866  val secondary_reject = Input(Bool())
867}
868
869class DCacheMQQueryIOBundle(implicit p: Parameters) extends DCacheBundle
870{
871  val req    = ValidIO(new MissReq)
872  val ready  = Input(Bool())
873}
874
875class MissReadyGen(val n: Int)(implicit p: Parameters) extends XSModule {
876  val io = IO(new Bundle {
877    val in = Vec(n, Flipped(DecoupledIO(new MissReq)))
878    val queryMQ = Vec(n, new DCacheMQQueryIOBundle)
879  })
880
881  val mqReadyVec = io.queryMQ.map(_.ready)
882
883  io.queryMQ.zipWithIndex.foreach{
884    case (q, idx) => {
885      q.req.valid := io.in(idx).valid
886      q.req.bits  := io.in(idx).bits
887    }
888  }
889  io.in.zipWithIndex.map {
890    case (r, idx) => {
891      if (idx == 0) {
892        r.ready := mqReadyVec(idx)
893      } else {
894        r.ready := mqReadyVec(idx) && !Cat(io.in.slice(0, idx).map(_.valid)).orR
895      }
896    }
897  }
898
899}
900
901class DCache()(implicit p: Parameters) extends LazyModule with HasDCacheParameters {
902  override def shouldBeInlined: Boolean = false
903
904  val reqFields: Seq[BundleFieldBase] = Seq(
905    PrefetchField(),
906    ReqSourceField(),
907    VaddrField(VAddrBits - blockOffBits),
908  //  IsKeywordField()
909  ) ++ cacheParams.aliasBitsOpt.map(AliasField)
910  val echoFields: Seq[BundleFieldBase] = Seq(
911    IsKeywordField()
912  )
913
914  val clientParameters = TLMasterPortParameters.v1(
915    Seq(TLMasterParameters.v1(
916      name = "dcache",
917      sourceId = IdRange(0, nEntries + 1),
918      supportsProbe = TransferSizes(cfg.blockBytes)
919    )),
920    requestFields = reqFields,
921    echoFields = echoFields
922  )
923
924  val clientNode = TLClientNode(Seq(clientParameters))
925  val cacheCtrlOpt = cacheCtrlParamsOpt.map(params => LazyModule(new CtrlUnit(params)))
926
927  lazy val module = new DCacheImp(this)
928}
929
930
931class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParameters with HasPerfEvents with HasL1PrefetchSourceParameter {
932
933  val io = IO(new DCacheIO)
934
935  val (bus, edge) = outer.clientNode.out.head
936  require(bus.d.bits.data.getWidth == l1BusDataWidth, "DCache: tilelink width does not match")
937
938  println("DCache:")
939  println("  DCacheSets: " + DCacheSets)
940  println("  DCacheSetDiv: " + DCacheSetDiv)
941  println("  DCacheWays: " + DCacheWays)
942  println("  DCacheBanks: " + DCacheBanks)
943  println("  DCacheSRAMRowBits: " + DCacheSRAMRowBits)
944  println("  DCacheWordOffset: " + DCacheWordOffset)
945  println("  DCacheBankOffset: " + DCacheBankOffset)
946  println("  DCacheSetOffset: " + DCacheSetOffset)
947  println("  DCacheTagOffset: " + DCacheTagOffset)
948  println("  DCacheAboveIndexOffset: " + DCacheAboveIndexOffset)
949  println("  DcacheMaxPrefetchEntry: " + MaxPrefetchEntry)
950  println("  WPUEnable: " + dwpuParam.enWPU)
951  println("  WPUEnableCfPred: " + dwpuParam.enCfPred)
952  println("  WPUAlgorithm: " + dwpuParam.algoName)
953  println("  HasCMO: " + HasCMO)
954
955  // Enable L1 Store prefetch
956  val StorePrefetchL1Enabled = EnableStorePrefetchAtCommit || EnableStorePrefetchAtIssue || EnableStorePrefetchSPB
957  val MetaReadPort =
958        if (StorePrefetchL1Enabled)
959          1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt
960        else
961          1 + backendParams.LduCnt + backendParams.HyuCnt
962  val TagReadPort =
963        if (StorePrefetchL1Enabled)
964          1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt
965        else
966          1 + backendParams.LduCnt + backendParams.HyuCnt
967
968  // Enable L1 Load prefetch
969  val LoadPrefetchL1Enabled = true
970  val AccessArrayReadPort = if(LoadPrefetchL1Enabled) LoadPipelineWidth + 1 + 1 else LoadPipelineWidth + 1
971  val PrefetchArrayReadPort = if(LoadPrefetchL1Enabled) LoadPipelineWidth + 1 + 1 else LoadPipelineWidth + 1
972
973  //----------------------------------------
974  // core data structures
975  val bankedDataArray = if(dwpuParam.enWPU) Module(new SramedDataArray) else Module(new BankedDataArray)
976  val metaArray = Module(new L1CohMetaArray(readPorts = LoadPipelineWidth + 1, writePorts = 1))
977  val errorArray = Module(new L1FlagMetaArray(readPorts = LoadPipelineWidth + 1, writePorts = 1))
978  val prefetchArray = Module(new L1PrefetchSourceArray(readPorts = PrefetchArrayReadPort, writePorts = 1 + LoadPipelineWidth)) // prefetch flag array
979  val accessArray = Module(new L1FlagMetaArray(readPorts = AccessArrayReadPort, writePorts = LoadPipelineWidth + 1))
980  val tagArray = Module(new DuplicatedTagArray(readPorts = TagReadPort))
981  val prefetcherMonitor = Module(new PrefetcherMonitor)
982  val fdpMonitor =  Module(new FDPrefetcherMonitor)
983  val bloomFilter =  Module(new BloomFilter(BLOOM_FILTER_ENTRY_NUM, true))
984  val counterFilter = Module(new CounterFilter)
985  bankedDataArray.dump()
986
987  //----------------------------------------
988  // miss queue
989  // missReqArb port:
990  // enableStorePrefetch: main pipe * 1 + load pipe * 2 + store pipe * 1 +
991  // hybrid * 1; disable: main pipe * 1 + load pipe * 2 + hybrid * 1
992  // higher priority is given to lower indices
993  val MissReqPortCount = if(StorePrefetchL1Enabled) 1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt else 1 + backendParams.LduCnt + backendParams.HyuCnt
994  val MainPipeMissReqPort = 0
995  val HybridMissReqBase = MissReqPortCount - backendParams.HyuCnt
996
997  //----------------------------------------
998  // core modules
999  val ldu = Seq.tabulate(LoadPipelineWidth)({ i => Module(new LoadPipe(i))})
1000  val stu = Seq.tabulate(StorePipelineWidth)({ i => Module(new StorePipe(i))})
1001  val mainPipe     = Module(new MainPipe)
1002  // val refillPipe   = Module(new RefillPipe)
1003  val missQueue    = Module(new MissQueue(edge, MissReqPortCount))
1004  val probeQueue   = Module(new ProbeQueue(edge))
1005  val wb           = Module(new WritebackQueue(edge))
1006
1007  missQueue.io.lqEmpty := io.lqEmpty
1008  missQueue.io.hartId := io.hartId
1009  missQueue.io.l2_pf_store_only := RegNext(io.l2_pf_store_only, false.B)
1010  missQueue.io.debugTopDown <> io.debugTopDown
1011  missQueue.io.l2_hint <> RegNext(io.l2_hint)
1012  missQueue.io.mainpipe_info := mainPipe.io.mainpipe_info
1013  mainPipe.io.refill_info := missQueue.io.refill_info
1014  mainPipe.io.replace_block := missQueue.io.replace_block
1015  mainPipe.io.sms_agt_evict_req <> io.sms_agt_evict_req
1016  io.memSetPattenDetected := missQueue.io.memSetPattenDetected
1017
1018  // l1 dcache controller
1019  outer.cacheCtrlOpt.foreach {
1020    case mod =>
1021      mod.module.io_pseudoError.foreach {
1022        case x => x.ready := false.B
1023      }
1024  }
1025  ldu.foreach {
1026    case mod =>
1027      mod.io.pseudo_error.valid := false.B
1028      mod.io.pseudo_error.bits := DontCare
1029  }
1030  mainPipe.io.pseudo_error.valid := false.B
1031  mainPipe.io.pseudo_error.bits  := DontCare
1032  bankedDataArray.io.pseudo_error.valid := false.B
1033  bankedDataArray.io.pseudo_error.bits  := DontCare
1034
1035  // pseudo tag ecc error
1036  if (outer.cacheCtrlOpt.nonEmpty && EnableTagEcc) {
1037    val ctrlUnit = outer.cacheCtrlOpt.head.module
1038    ldu.map(mod => mod.io.pseudo_error <> ctrlUnit.io_pseudoError(0))
1039    mainPipe.io.pseudo_error <> ctrlUnit.io_pseudoError(0)
1040    ctrlUnit.io_pseudoError(0).ready := mainPipe.io.pseudo_tag_error_inj_done ||
1041                                        ldu.map(_.io.pseudo_tag_error_inj_done).reduce(_|_)
1042  }
1043
1044  // pseudo data ecc error
1045  if (outer.cacheCtrlOpt.nonEmpty && EnableDataEcc) {
1046    val ctrlUnit = outer.cacheCtrlOpt.head.module
1047    bankedDataArray.io.pseudo_error <> ctrlUnit.io_pseudoError(1)
1048    ctrlUnit.io_pseudoError(1).ready := bankedDataArray.io.pseudo_error.ready &&
1049                                        (mainPipe.io.pseudo_data_error_inj_done ||
1050                                         ldu.map(_.io.pseudo_data_error_inj_done).reduce(_|_))
1051  }
1052
1053  val errors = ldu.map(_.io.error) ++ // load error
1054    Seq(mainPipe.io.error) // store / misc error
1055  val error_valid = errors.map(e => e.valid).reduce(_|_)
1056  io.error.bits <> RegEnable(
1057    Mux1H(errors.map(e => RegNext(e.valid) -> RegEnable(e.bits, e.valid))),
1058    RegNext(error_valid))
1059  io.error.valid := RegNext(RegNext(error_valid, init = false.B), init = false.B)
1060
1061  //----------------------------------------
1062  // meta array
1063  val HybridLoadReadBase = LoadPipelineWidth - backendParams.HyuCnt
1064  val HybridStoreReadBase = StorePipelineWidth - backendParams.HyuCnt
1065
1066  val hybrid_meta_read_ports = Wire(Vec(backendParams.HyuCnt, DecoupledIO(new MetaReadReq)))
1067  val hybrid_meta_resp_ports = Wire(Vec(backendParams.HyuCnt, ldu(0).io.meta_resp.cloneType))
1068  for (i <- 0 until backendParams.HyuCnt) {
1069    val HybridLoadMetaReadPort = HybridLoadReadBase + i
1070    val HybridStoreMetaReadPort = HybridStoreReadBase + i
1071
1072    hybrid_meta_read_ports(i).valid := ldu(HybridLoadMetaReadPort).io.meta_read.valid ||
1073                                       (stu(HybridStoreMetaReadPort).io.meta_read.valid && StorePrefetchL1Enabled.B)
1074    hybrid_meta_read_ports(i).bits := Mux(ldu(HybridLoadMetaReadPort).io.meta_read.valid, ldu(HybridLoadMetaReadPort).io.meta_read.bits,
1075                                          stu(HybridStoreMetaReadPort).io.meta_read.bits)
1076
1077    ldu(HybridLoadMetaReadPort).io.meta_read.ready := hybrid_meta_read_ports(i).ready
1078    stu(HybridStoreMetaReadPort).io.meta_read.ready := hybrid_meta_read_ports(i).ready && StorePrefetchL1Enabled.B
1079
1080    ldu(HybridLoadMetaReadPort).io.meta_resp := hybrid_meta_resp_ports(i)
1081    stu(HybridStoreMetaReadPort).io.meta_resp := hybrid_meta_resp_ports(i)
1082  }
1083
1084  // read / write coh meta
1085  val meta_read_ports = ldu.map(_.io.meta_read).take(HybridLoadReadBase) ++
1086    Seq(mainPipe.io.meta_read) ++
1087    stu.map(_.io.meta_read).take(HybridStoreReadBase) ++ hybrid_meta_read_ports
1088
1089  val meta_resp_ports = ldu.map(_.io.meta_resp).take(HybridLoadReadBase) ++
1090    Seq(mainPipe.io.meta_resp) ++
1091    stu.map(_.io.meta_resp).take(HybridStoreReadBase) ++ hybrid_meta_resp_ports
1092
1093  val meta_write_ports = Seq(
1094    mainPipe.io.meta_write
1095    // refillPipe.io.meta_write
1096  )
1097  if(StorePrefetchL1Enabled) {
1098    meta_read_ports.zip(metaArray.io.read).foreach { case (p, r) => r <> p }
1099    meta_resp_ports.zip(metaArray.io.resp).foreach { case (p, r) => p := r }
1100  } else {
1101    (meta_read_ports.take(HybridLoadReadBase + 1) ++
1102     meta_read_ports.takeRight(backendParams.HyuCnt)).zip(metaArray.io.read).foreach { case (p, r) => r <> p }
1103    (meta_resp_ports.take(HybridLoadReadBase + 1) ++
1104     meta_resp_ports.takeRight(backendParams.HyuCnt)).zip(metaArray.io.resp).foreach { case (p, r) => p := r }
1105
1106    meta_read_ports.drop(HybridLoadReadBase + 1).take(HybridStoreReadBase).foreach { case p => p.ready := false.B }
1107    meta_resp_ports.drop(HybridLoadReadBase + 1).take(HybridStoreReadBase).foreach { case p => p := 0.U.asTypeOf(p) }
1108  }
1109  meta_write_ports.zip(metaArray.io.write).foreach { case (p, w) => w <> p }
1110
1111  // read extra meta (exclude stu)
1112  (meta_read_ports.take(HybridLoadReadBase + 1) ++
1113   meta_read_ports.takeRight(backendParams.HyuCnt)).zip(errorArray.io.read).foreach { case (p, r) => r <> p }
1114  (meta_read_ports.take(HybridLoadReadBase + 1) ++
1115   meta_read_ports.takeRight(backendParams.HyuCnt)).zip(prefetchArray.io.read).foreach { case (p, r) => r <> p }
1116  (meta_read_ports.take(HybridLoadReadBase + 1) ++
1117   meta_read_ports.takeRight(backendParams.HyuCnt)).zip(accessArray.io.read).foreach { case (p, r) => r <> p }
1118  val extra_meta_resp_ports = ldu.map(_.io.extra_meta_resp).take(HybridLoadReadBase) ++
1119    Seq(mainPipe.io.extra_meta_resp) ++
1120    ldu.map(_.io.extra_meta_resp).takeRight(backendParams.HyuCnt)
1121  extra_meta_resp_ports.zip(errorArray.io.resp).foreach { case (p, r) => {
1122    (0 until nWays).map(i => { p(i).error := r(i) })
1123  }}
1124  extra_meta_resp_ports.zip(prefetchArray.io.resp).foreach { case (p, r) => {
1125    (0 until nWays).map(i => { p(i).prefetch := r(i) })
1126  }}
1127  extra_meta_resp_ports.zip(accessArray.io.resp).foreach { case (p, r) => {
1128    (0 until nWays).map(i => { p(i).access := r(i) })
1129  }}
1130
1131  if(LoadPrefetchL1Enabled) {
1132    // use last port to read prefetch and access flag
1133//    prefetchArray.io.read.last.valid := refillPipe.io.prefetch_flag_write.valid
1134//    prefetchArray.io.read.last.bits.idx := refillPipe.io.prefetch_flag_write.bits.idx
1135//    prefetchArray.io.read.last.bits.way_en := refillPipe.io.prefetch_flag_write.bits.way_en
1136//
1137//    accessArray.io.read.last.valid := refillPipe.io.prefetch_flag_write.valid
1138//    accessArray.io.read.last.bits.idx := refillPipe.io.prefetch_flag_write.bits.idx
1139//    accessArray.io.read.last.bits.way_en := refillPipe.io.prefetch_flag_write.bits.way_en
1140    prefetchArray.io.read.last.valid := mainPipe.io.prefetch_flag_write.valid
1141    prefetchArray.io.read.last.bits.idx := mainPipe.io.prefetch_flag_write.bits.idx
1142    prefetchArray.io.read.last.bits.way_en := mainPipe.io.prefetch_flag_write.bits.way_en
1143
1144    accessArray.io.read.last.valid := mainPipe.io.prefetch_flag_write.valid
1145    accessArray.io.read.last.bits.idx := mainPipe.io.prefetch_flag_write.bits.idx
1146    accessArray.io.read.last.bits.way_en := mainPipe.io.prefetch_flag_write.bits.way_en
1147
1148    val extra_flag_valid = RegNext(mainPipe.io.prefetch_flag_write.valid)
1149    val extra_flag_way_en = RegEnable(mainPipe.io.prefetch_flag_write.bits.way_en, mainPipe.io.prefetch_flag_write.valid)
1150    val extra_flag_prefetch = Mux1H(extra_flag_way_en, prefetchArray.io.resp.last)
1151    val extra_flag_access = Mux1H(extra_flag_way_en, accessArray.io.resp.last)
1152
1153    prefetcherMonitor.io.validity.good_prefetch := extra_flag_valid && isPrefetchRelated(extra_flag_prefetch) && extra_flag_access
1154    prefetcherMonitor.io.validity.bad_prefetch := extra_flag_valid && isPrefetchRelated(extra_flag_prefetch) && !extra_flag_access
1155  }
1156
1157  // write extra meta
1158  val error_flag_write_ports = Seq(
1159    mainPipe.io.error_flag_write // error flag generated by corrupted store
1160    // refillPipe.io.error_flag_write // corrupted signal from l2
1161  )
1162  error_flag_write_ports.zip(errorArray.io.write).foreach { case (p, w) => w <> p }
1163
1164  val prefetch_flag_write_ports = ldu.map(_.io.prefetch_flag_write) ++ Seq(
1165    mainPipe.io.prefetch_flag_write // set prefetch_flag to false if coh is set to Nothing
1166    // refillPipe.io.prefetch_flag_write // refill required by prefetch will set prefetch_flag
1167  )
1168  prefetch_flag_write_ports.zip(prefetchArray.io.write).foreach { case (p, w) => w <> p }
1169
1170  // FIXME: add hybrid unit?
1171  val same_cycle_update_pf_flag = ldu(0).io.prefetch_flag_write.valid && ldu(1).io.prefetch_flag_write.valid && (ldu(0).io.prefetch_flag_write.bits.idx === ldu(1).io.prefetch_flag_write.bits.idx) && (ldu(0).io.prefetch_flag_write.bits.way_en === ldu(1).io.prefetch_flag_write.bits.way_en)
1172  XSPerfAccumulate("same_cycle_update_pf_flag", same_cycle_update_pf_flag)
1173
1174  val access_flag_write_ports = ldu.map(_.io.access_flag_write) ++ Seq(
1175    mainPipe.io.access_flag_write
1176    // refillPipe.io.access_flag_write
1177  )
1178  access_flag_write_ports.zip(accessArray.io.write).foreach { case (p, w) => w <> p }
1179
1180  //----------------------------------------
1181  // tag array
1182  if(StorePrefetchL1Enabled) {
1183    require(tagArray.io.read.size == (LoadPipelineWidth + StorePipelineWidth - backendParams.HyuCnt + 1))
1184  }else {
1185    require(tagArray.io.read.size == (LoadPipelineWidth + 1))
1186  }
1187  // val tag_write_intend = missQueue.io.refill_pipe_req.valid || mainPipe.io.tag_write_intend
1188  val tag_write_intend = mainPipe.io.tag_write_intend
1189  assert(!RegNext(!tag_write_intend && tagArray.io.write.valid))
1190  ldu.take(HybridLoadReadBase).zipWithIndex.foreach {
1191    case (ld, i) =>
1192      tagArray.io.read(i) <> ld.io.tag_read
1193      ld.io.tag_resp := tagArray.io.resp(i)
1194      ld.io.tag_read.ready := !tag_write_intend
1195  }
1196  if(StorePrefetchL1Enabled) {
1197    stu.take(HybridStoreReadBase).zipWithIndex.foreach {
1198      case (st, i) =>
1199        tagArray.io.read(HybridLoadReadBase + i) <> st.io.tag_read
1200        st.io.tag_resp := tagArray.io.resp(HybridLoadReadBase + i)
1201        st.io.tag_read.ready := !tag_write_intend
1202    }
1203  }else {
1204    stu.foreach {
1205      case st =>
1206        st.io.tag_read.ready := false.B
1207        st.io.tag_resp := 0.U.asTypeOf(st.io.tag_resp)
1208    }
1209  }
1210  for (i <- 0 until backendParams.HyuCnt) {
1211    val HybridLoadTagReadPort = HybridLoadReadBase + i
1212    val HybridStoreTagReadPort = HybridStoreReadBase + i
1213    val TagReadPort =
1214      if (EnableStorePrefetchSPB)
1215        HybridLoadReadBase + HybridStoreReadBase + i
1216      else
1217        HybridLoadReadBase + i
1218
1219    // read tag
1220    ldu(HybridLoadTagReadPort).io.tag_read.ready := false.B
1221    stu(HybridStoreTagReadPort).io.tag_read.ready := false.B
1222
1223    if (StorePrefetchL1Enabled) {
1224      when (ldu(HybridLoadTagReadPort).io.tag_read.valid) {
1225        tagArray.io.read(TagReadPort) <> ldu(HybridLoadTagReadPort).io.tag_read
1226        ldu(HybridLoadTagReadPort).io.tag_read.ready := !tag_write_intend
1227      } .otherwise {
1228        tagArray.io.read(TagReadPort) <> stu(HybridStoreTagReadPort).io.tag_read
1229        stu(HybridStoreTagReadPort).io.tag_read.ready := !tag_write_intend
1230      }
1231    } else {
1232      tagArray.io.read(TagReadPort) <> ldu(HybridLoadTagReadPort).io.tag_read
1233      ldu(HybridLoadTagReadPort).io.tag_read.ready := !tag_write_intend
1234    }
1235
1236    // tag resp
1237    ldu(HybridLoadTagReadPort).io.tag_resp := tagArray.io.resp(TagReadPort)
1238    stu(HybridStoreTagReadPort).io.tag_resp := tagArray.io.resp(TagReadPort)
1239  }
1240  tagArray.io.read.last <> mainPipe.io.tag_read
1241  mainPipe.io.tag_resp := tagArray.io.resp.last
1242
1243  val fake_tag_read_conflict_this_cycle = PopCount(ldu.map(ld=> ld.io.tag_read.valid))
1244  XSPerfAccumulate("fake_tag_read_conflict", fake_tag_read_conflict_this_cycle)
1245
1246  val tag_write_arb = Module(new Arbiter(new TagWriteReq, 1))
1247  // tag_write_arb.io.in(0) <> refillPipe.io.tag_write
1248  tag_write_arb.io.in(0) <> mainPipe.io.tag_write
1249  tagArray.io.write <> tag_write_arb.io.out
1250
1251  ldu.map(m => {
1252    m.io.vtag_update.valid := tagArray.io.write.valid
1253    m.io.vtag_update.bits := tagArray.io.write.bits
1254  })
1255
1256  //----------------------------------------
1257  // data array
1258  mainPipe.io.data_read.zip(ldu).map(x => x._1 := x._2.io.lsu.req.valid)
1259
1260  val dataWriteArb = Module(new Arbiter(new L1BankedDataWriteReq, 1))
1261  // dataWriteArb.io.in(0) <> refillPipe.io.data_write
1262  dataWriteArb.io.in(0) <> mainPipe.io.data_write
1263
1264  bankedDataArray.io.write <> dataWriteArb.io.out
1265
1266  for (bank <- 0 until DCacheBanks) {
1267    val dataWriteArb_dup = Module(new Arbiter(new L1BankedDataWriteReqCtrl, 1))
1268    // dataWriteArb_dup.io.in(0).valid := refillPipe.io.data_write_dup(bank).valid
1269    // dataWriteArb_dup.io.in(0).bits := refillPipe.io.data_write_dup(bank).bits
1270    dataWriteArb_dup.io.in(0).valid := mainPipe.io.data_write_dup(bank).valid
1271    dataWriteArb_dup.io.in(0).bits := mainPipe.io.data_write_dup(bank).bits
1272
1273    bankedDataArray.io.write_dup(bank) <> dataWriteArb_dup.io.out
1274  }
1275
1276  bankedDataArray.io.readline <> mainPipe.io.data_readline
1277  bankedDataArray.io.readline_intend := mainPipe.io.data_read_intend
1278  mainPipe.io.readline_error_delayed := bankedDataArray.io.readline_error_delayed
1279  mainPipe.io.data_resp := bankedDataArray.io.readline_resp
1280
1281  (0 until LoadPipelineWidth).map(i => {
1282    bankedDataArray.io.read(i) <> ldu(i).io.banked_data_read
1283    bankedDataArray.io.is128Req(i) <> ldu(i).io.is128Req
1284    bankedDataArray.io.read_error_delayed(i) <> ldu(i).io.read_error_delayed
1285
1286    ldu(i).io.banked_data_resp := bankedDataArray.io.read_resp(i)
1287
1288    ldu(i).io.bank_conflict_slow := bankedDataArray.io.bank_conflict_slow(i)
1289  })
1290
1291  (0 until LoadPipelineWidth).map(i => {
1292    when(bus.d.bits.opcode === TLMessages.GrantData) {
1293      io.lsu.forward_D(i).apply(bus.d, edge)
1294    }.otherwise {
1295      io.lsu.forward_D(i).dontCare()
1296    }
1297  })
1298  // tl D channel wakeup
1299  when (bus.d.bits.opcode === TLMessages.GrantData || bus.d.bits.opcode === TLMessages.Grant) {
1300    io.lsu.tl_d_channel.apply(bus.d, edge)
1301  } .otherwise {
1302    io.lsu.tl_d_channel.dontCare()
1303  }
1304  mainPipe.io.force_write <> io.force_write
1305
1306  /** dwpu */
1307  if (dwpuParam.enWPU) {
1308    val dwpu = Module(new DCacheWpuWrapper(LoadPipelineWidth))
1309    for(i <- 0 until LoadPipelineWidth){
1310      dwpu.io.req(i) <> ldu(i).io.dwpu.req(0)
1311      dwpu.io.resp(i) <> ldu(i).io.dwpu.resp(0)
1312      dwpu.io.lookup_upd(i) <> ldu(i).io.dwpu.lookup_upd(0)
1313      dwpu.io.cfpred(i) <> ldu(i).io.dwpu.cfpred(0)
1314    }
1315    dwpu.io.tagwrite_upd.valid := tagArray.io.write.valid
1316    dwpu.io.tagwrite_upd.bits.vaddr := tagArray.io.write.bits.vaddr
1317    dwpu.io.tagwrite_upd.bits.s1_real_way_en := tagArray.io.write.bits.way_en
1318  } else {
1319    for(i <- 0 until LoadPipelineWidth){
1320      ldu(i).io.dwpu.req(0).ready := true.B
1321      ldu(i).io.dwpu.resp(0).valid := false.B
1322      ldu(i).io.dwpu.resp(0).bits := DontCare
1323    }
1324  }
1325
1326  //----------------------------------------
1327  // load pipe
1328  // the s1 kill signal
1329  // only lsu uses this, replay never kills
1330  for (w <- 0 until LoadPipelineWidth) {
1331    ldu(w).io.lsu <> io.lsu.load(w)
1332
1333    // TODO:when have load128Req
1334    ldu(w).io.load128Req := io.lsu.load(w).is128Req
1335
1336    // replay and nack not needed anymore
1337    // TODO: remove replay and nack
1338    ldu(w).io.nack := false.B
1339
1340    ldu(w).io.disable_ld_fast_wakeup :=
1341      bankedDataArray.io.disable_ld_fast_wakeup(w) // load pipe fast wake up should be disabled when bank conflict
1342  }
1343
1344  prefetcherMonitor.io.timely.total_prefetch := ldu.map(_.io.prefetch_info.naive.total_prefetch).reduce(_ || _)
1345  prefetcherMonitor.io.timely.late_hit_prefetch := ldu.map(_.io.prefetch_info.naive.late_hit_prefetch).reduce(_ || _)
1346  prefetcherMonitor.io.timely.late_miss_prefetch := missQueue.io.prefetch_info.naive.late_miss_prefetch
1347  prefetcherMonitor.io.timely.prefetch_hit := PopCount(ldu.map(_.io.prefetch_info.naive.prefetch_hit))
1348  io.pf_ctrl <> prefetcherMonitor.io.pf_ctrl
1349  XSPerfAccumulate("useless_prefetch", ldu.map(_.io.prefetch_info.naive.total_prefetch).reduce(_ || _) && !(ldu.map(_.io.prefetch_info.naive.useful_prefetch).reduce(_ || _)))
1350  XSPerfAccumulate("useful_prefetch", ldu.map(_.io.prefetch_info.naive.useful_prefetch).reduce(_ || _))
1351  XSPerfAccumulate("late_prefetch_hit", ldu.map(_.io.prefetch_info.naive.late_prefetch_hit).reduce(_ || _))
1352  XSPerfAccumulate("late_load_hit", ldu.map(_.io.prefetch_info.naive.late_load_hit).reduce(_ || _))
1353
1354  /** LoadMissDB: record load miss state */
1355  val hartId = p(XSCoreParamsKey).HartId
1356  val isWriteLoadMissTable = Constantin.createRecord(s"isWriteLoadMissTable$hartId")
1357  val isFirstHitWrite = Constantin.createRecord(s"isFirstHitWrite$hartId")
1358  val tableName = s"LoadMissDB$hartId"
1359  val siteName = s"DcacheWrapper$hartId"
1360  val loadMissTable = ChiselDB.createTable(tableName, new LoadMissEntry)
1361  for( i <- 0 until LoadPipelineWidth){
1362    val loadMissEntry = Wire(new LoadMissEntry)
1363    val loadMissWriteEn =
1364      (!ldu(i).io.lsu.resp.bits.replay && ldu(i).io.miss_req.fire) ||
1365      (ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid && isFirstHitWrite.orR)
1366    loadMissEntry.timeCnt := GTimer()
1367    loadMissEntry.robIdx := ldu(i).io.lsu.resp.bits.debug_robIdx
1368    loadMissEntry.paddr := ldu(i).io.miss_req.bits.addr
1369    loadMissEntry.vaddr := ldu(i).io.miss_req.bits.vaddr
1370    loadMissEntry.missState := OHToUInt(Cat(Seq(
1371      ldu(i).io.miss_req.fire & ldu(i).io.miss_resp.merged,
1372      ldu(i).io.miss_req.fire & !ldu(i).io.miss_resp.merged,
1373      ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid
1374    )))
1375    loadMissTable.log(
1376      data = loadMissEntry,
1377      en = isWriteLoadMissTable.orR && loadMissWriteEn,
1378      site = siteName,
1379      clock = clock,
1380      reset = reset
1381    )
1382  }
1383
1384  val isWriteLoadAccessTable = Constantin.createRecord(s"isWriteLoadAccessTable$hartId")
1385  val loadAccessTable = ChiselDB.createTable(s"LoadAccessDB$hartId", new LoadAccessEntry)
1386  for (i <- 0 until LoadPipelineWidth) {
1387    val loadAccessEntry = Wire(new LoadAccessEntry)
1388    loadAccessEntry.timeCnt := GTimer()
1389    loadAccessEntry.robIdx := ldu(i).io.lsu.resp.bits.debug_robIdx
1390    loadAccessEntry.paddr := ldu(i).io.miss_req.bits.addr
1391    loadAccessEntry.vaddr := ldu(i).io.miss_req.bits.vaddr
1392    loadAccessEntry.missState := OHToUInt(Cat(Seq(
1393      ldu(i).io.miss_req.fire & ldu(i).io.miss_resp.merged,
1394      ldu(i).io.miss_req.fire & !ldu(i).io.miss_resp.merged,
1395      ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid
1396    )))
1397    loadAccessEntry.pred_way_num := ldu(i).io.lsu.debug_s2_pred_way_num
1398    loadAccessEntry.real_way_num := ldu(i).io.lsu.debug_s2_real_way_num
1399    loadAccessEntry.dm_way_num := ldu(i).io.lsu.debug_s2_dm_way_num
1400    loadAccessTable.log(
1401      data = loadAccessEntry,
1402      en = isWriteLoadAccessTable.orR && ldu(i).io.lsu.resp.valid,
1403      site = siteName + "_loadpipe" + i.toString,
1404      clock = clock,
1405      reset = reset
1406    )
1407  }
1408
1409  //----------------------------------------
1410  // Sta pipe
1411  for (w <- 0 until StorePipelineWidth) {
1412    stu(w).io.lsu <> io.lsu.sta(w)
1413  }
1414
1415  //----------------------------------------
1416  // atomics
1417  // atomics not finished yet
1418  val atomic_resp_valid = mainPipe.io.atomic_resp.valid && mainPipe.io.atomic_resp.bits.isAMO
1419  io.lsu.atomics.resp.valid := RegNext(atomic_resp_valid)
1420  io.lsu.atomics.resp.bits := RegEnable(mainPipe.io.atomic_resp.bits, atomic_resp_valid)
1421  io.lsu.atomics.block_lr := mainPipe.io.block_lr
1422
1423  // Request
1424  val missReqArb = Module(new TreeArbiter(new MissReq, MissReqPortCount))
1425  // seperately generating miss queue enq ready for better timeing
1426  val missReadyGen = Module(new MissReadyGen(MissReqPortCount))
1427
1428  missReqArb.io.in(MainPipeMissReqPort) <> mainPipe.io.miss_req
1429  missReadyGen.io.in(MainPipeMissReqPort) <> mainPipe.io.miss_req
1430  for (w <- 0 until backendParams.LduCnt) {
1431    missReqArb.io.in(w + 1) <> ldu(w).io.miss_req
1432    missReadyGen.io.in(w + 1) <> ldu(w).io.miss_req
1433  }
1434
1435  for (w <- 0 until LoadPipelineWidth) { ldu(w).io.miss_resp := missQueue.io.resp }
1436  mainPipe.io.miss_resp := missQueue.io.resp
1437
1438  if(StorePrefetchL1Enabled) {
1439    for (w <- 0 until backendParams.StaCnt) {
1440      missReqArb.io.in(1 + backendParams.LduCnt + w) <> stu(w).io.miss_req
1441      missReadyGen.io.in(1 + backendParams.LduCnt + w) <> stu(w).io.miss_req
1442    }
1443  }else {
1444    for (w <- 0 until backendParams.StaCnt) { stu(w).io.miss_req.ready := false.B }
1445  }
1446
1447  for (i <- 0 until backendParams.HyuCnt) {
1448    val HybridLoadReqPort = HybridLoadReadBase + i
1449    val HybridStoreReqPort = HybridStoreReadBase + i
1450    val HybridMissReqPort = HybridMissReqBase + i
1451
1452    ldu(HybridLoadReqPort).io.miss_req.ready := false.B
1453    stu(HybridStoreReqPort).io.miss_req.ready := false.B
1454
1455    if (StorePrefetchL1Enabled) {
1456      when (ldu(HybridLoadReqPort).io.miss_req.valid) {
1457        missReqArb.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req
1458        missReadyGen.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req
1459      } .otherwise {
1460        missReqArb.io.in(HybridMissReqPort) <> stu(HybridStoreReqPort).io.miss_req
1461        missReadyGen.io.in(HybridMissReqPort) <> stu(HybridStoreReqPort).io.miss_req
1462      }
1463    } else {
1464      missReqArb.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req
1465      missReadyGen.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req
1466    }
1467  }
1468
1469  for(w <- 0 until LoadPipelineWidth) {
1470    wb.io.miss_req_conflict_check(w) := ldu(w).io.wbq_conflict_check
1471    ldu(w).io.wbq_block_miss_req     := wb.io.block_miss_req(w)
1472  }
1473
1474  wb.io.miss_req_conflict_check(3) := mainPipe.io.wbq_conflict_check
1475  mainPipe.io.wbq_block_miss_req   := wb.io.block_miss_req(3)
1476
1477  wb.io.miss_req_conflict_check(4).valid := missReqArb.io.out.valid
1478  wb.io.miss_req_conflict_check(4).bits  := missReqArb.io.out.bits.addr
1479  missQueue.io.wbq_block_miss_req := wb.io.block_miss_req(4)
1480
1481  missReqArb.io.out <> missQueue.io.req
1482  missReadyGen.io.queryMQ <> missQueue.io.queryMQ
1483  io.cmoOpReq <> missQueue.io.cmo_req
1484  io.cmoOpResp <> missQueue.io.cmo_resp
1485
1486  for (w <- 0 until LoadPipelineWidth) { ldu(w).io.mq_enq_cancel := missQueue.io.mq_enq_cancel }
1487
1488  XSPerfAccumulate("miss_queue_fire", PopCount(VecInit(missReqArb.io.in.map(_.fire))) >= 1.U)
1489  XSPerfAccumulate("miss_queue_muti_fire", PopCount(VecInit(missReqArb.io.in.map(_.fire))) > 1.U)
1490
1491  XSPerfAccumulate("miss_queue_has_enq_req", PopCount(VecInit(missReqArb.io.in.map(_.valid))) >= 1.U)
1492  XSPerfAccumulate("miss_queue_has_muti_enq_req", PopCount(VecInit(missReqArb.io.in.map(_.valid))) > 1.U)
1493  XSPerfAccumulate("miss_queue_has_muti_enq_but_not_fire", PopCount(VecInit(missReqArb.io.in.map(_.valid))) > 1.U && PopCount(VecInit(missReqArb.io.in.map(_.fire))) === 0.U)
1494
1495  // forward missqueue
1496  (0 until LoadPipelineWidth).map(i => io.lsu.forward_mshr(i).connect(missQueue.io.forward(i)))
1497
1498  // refill to load queue
1499 // io.lsu.lsq <> missQueue.io.refill_to_ldq
1500
1501  // tilelink stuff
1502  bus.a <> missQueue.io.mem_acquire
1503  bus.e <> missQueue.io.mem_finish
1504  missQueue.io.probe_addr := bus.b.bits.address
1505  missQueue.io.replace_addr := mainPipe.io.replace_addr
1506
1507  missQueue.io.main_pipe_resp.valid := RegNext(mainPipe.io.atomic_resp.valid)
1508  missQueue.io.main_pipe_resp.bits := RegEnable(mainPipe.io.atomic_resp.bits, mainPipe.io.atomic_resp.valid)
1509
1510  //----------------------------------------
1511  // probe
1512  // probeQueue.io.mem_probe <> bus.b
1513  block_decoupled(bus.b, probeQueue.io.mem_probe, missQueue.io.probe_block)
1514  probeQueue.io.lrsc_locked_block <> mainPipe.io.lrsc_locked_block
1515  probeQueue.io.update_resv_set <> mainPipe.io.update_resv_set
1516
1517  val refill_req = RegNext(missQueue.io.main_pipe_req.valid && ((missQueue.io.main_pipe_req.bits.isLoad) | (missQueue.io.main_pipe_req.bits.isStore)))
1518  //----------------------------------------
1519  // mainPipe
1520  // when a req enters main pipe, if it is set-conflict with replace pipe or refill pipe,
1521  // block the req in main pipe
1522  probeQueue.io.pipe_req <> mainPipe.io.probe_req
1523  io.lsu.store.req <> mainPipe.io.store_req
1524
1525  io.lsu.store.replay_resp.valid := RegNext(mainPipe.io.store_replay_resp.valid)
1526  io.lsu.store.replay_resp.bits := RegEnable(mainPipe.io.store_replay_resp.bits, mainPipe.io.store_replay_resp.valid)
1527  io.lsu.store.main_pipe_hit_resp := mainPipe.io.store_hit_resp
1528
1529  mainPipe.io.atomic_req <> io.lsu.atomics.req
1530
1531  mainPipe.io.invalid_resv_set := RegNext(
1532    wb.io.req.fire &&
1533    wb.io.req.bits.addr === mainPipe.io.lrsc_locked_block.bits &&
1534    mainPipe.io.lrsc_locked_block.valid
1535  )
1536
1537  //----------------------------------------
1538  // replace (main pipe)
1539  val mpStatus = mainPipe.io.status
1540  mainPipe.io.refill_req <> missQueue.io.main_pipe_req
1541
1542  mainPipe.io.data_write_ready_dup := VecInit(Seq.fill(nDupDataWriteReady)(true.B))
1543  mainPipe.io.tag_write_ready_dup := VecInit(Seq.fill(nDupDataWriteReady)(true.B))
1544  mainPipe.io.wb_ready_dup := wb.io.req_ready_dup
1545
1546  //----------------------------------------
1547  // wb
1548  // add a queue between MainPipe and WritebackUnit to reduce MainPipe stalls due to WritebackUnit busy
1549
1550  wb.io.req <> mainPipe.io.wb
1551  bus.c     <> wb.io.mem_release
1552  // wb.io.release_wakeup := refillPipe.io.release_wakeup
1553  // wb.io.release_update := mainPipe.io.release_update
1554  //wb.io.probe_ttob_check_req <> mainPipe.io.probe_ttob_check_req
1555  //wb.io.probe_ttob_check_resp <> mainPipe.io.probe_ttob_check_resp
1556
1557  io.lsu.release.valid := RegNext(wb.io.req.fire)
1558  io.lsu.release.bits.paddr := RegEnable(wb.io.req.bits.addr, wb.io.req.fire)
1559  // Note: RegNext() is required by:
1560  // * load queue released flag update logic
1561  // * load / load violation check logic
1562  // * and timing requirements
1563  // CHANGE IT WITH CARE
1564
1565  // connect bus d
1566  missQueue.io.mem_grant.valid := false.B
1567  missQueue.io.mem_grant.bits  := DontCare
1568
1569  wb.io.mem_grant.valid := false.B
1570  wb.io.mem_grant.bits  := DontCare
1571
1572  // in L1DCache, we ony expect Grant[Data] and ReleaseAck
1573  bus.d.ready := false.B
1574  when (bus.d.bits.opcode === TLMessages.Grant || bus.d.bits.opcode === TLMessages.GrantData || bus.d.bits.opcode === TLMessages.CBOAck) {
1575    missQueue.io.mem_grant <> bus.d
1576  } .elsewhen (bus.d.bits.opcode === TLMessages.ReleaseAck) {
1577    wb.io.mem_grant <> bus.d
1578  } .otherwise {
1579    assert (!bus.d.fire)
1580  }
1581
1582  //----------------------------------------
1583  // Feedback Direct Prefetch Monitor
1584  fdpMonitor.io.refill := missQueue.io.prefetch_info.fdp.prefetch_monitor_cnt
1585  fdpMonitor.io.timely.late_prefetch := missQueue.io.prefetch_info.fdp.late_miss_prefetch
1586  fdpMonitor.io.accuracy.total_prefetch := missQueue.io.prefetch_info.fdp.total_prefetch
1587  for (w <- 0 until LoadPipelineWidth)  {
1588    if(w == 0) {
1589      fdpMonitor.io.accuracy.useful_prefetch(w) := ldu(w).io.prefetch_info.fdp.useful_prefetch
1590    }else {
1591      fdpMonitor.io.accuracy.useful_prefetch(w) := Mux(same_cycle_update_pf_flag, false.B, ldu(w).io.prefetch_info.fdp.useful_prefetch)
1592    }
1593  }
1594  for (w <- 0 until LoadPipelineWidth)  { fdpMonitor.io.pollution.cache_pollution(w) :=  ldu(w).io.prefetch_info.fdp.pollution }
1595  for (w <- 0 until LoadPipelineWidth)  { fdpMonitor.io.pollution.demand_miss(w) :=  ldu(w).io.prefetch_info.fdp.demand_miss }
1596  fdpMonitor.io.debugRolling := io.debugRolling
1597
1598  //----------------------------------------
1599  // Bloom Filter
1600  // bloomFilter.io.set <> missQueue.io.bloom_filter_query.set
1601  // bloomFilter.io.clr <> missQueue.io.bloom_filter_query.clr
1602  bloomFilter.io.set <> mainPipe.io.bloom_filter_query.set
1603  bloomFilter.io.clr <> mainPipe.io.bloom_filter_query.clr
1604
1605  for (w <- 0 until LoadPipelineWidth)  { bloomFilter.io.query(w) <> ldu(w).io.bloom_filter_query.query }
1606  for (w <- 0 until LoadPipelineWidth)  { bloomFilter.io.resp(w) <> ldu(w).io.bloom_filter_query.resp }
1607
1608  for (w <- 0 until LoadPipelineWidth)  { counterFilter.io.ld_in(w) <> ldu(w).io.counter_filter_enq }
1609  for (w <- 0 until LoadPipelineWidth)  { counterFilter.io.query(w) <> ldu(w).io.counter_filter_query }
1610
1611  //----------------------------------------
1612  // replacement algorithm
1613  val replacer = ReplacementPolicy.fromString(cacheParams.replacer, nWays, nSets)
1614  val replWayReqs = ldu.map(_.io.replace_way) ++ Seq(mainPipe.io.replace_way) ++ stu.map(_.io.replace_way)
1615
1616  if (dwpuParam.enCfPred) {
1617    val victimList = VictimList(nSets)
1618    replWayReqs.foreach {
1619      case req =>
1620        req.way := DontCare
1621        when(req.set.valid) {
1622          when(victimList.whether_sa(req.set.bits)) {
1623            req.way := replacer.way(req.set.bits)
1624          }.otherwise {
1625            req.way := req.dmWay
1626          }
1627        }
1628    }
1629  } else {
1630    replWayReqs.foreach {
1631      case req =>
1632        req.way := DontCare
1633        when(req.set.valid) {
1634          req.way := replacer.way(req.set.bits)
1635        }
1636    }
1637  }
1638
1639  val replAccessReqs = ldu.map(_.io.replace_access) ++ Seq(
1640    mainPipe.io.replace_access
1641  ) ++ stu.map(_.io.replace_access)
1642  val touchWays = Seq.fill(replAccessReqs.size)(Wire(ValidIO(UInt(log2Up(nWays).W))))
1643  touchWays.zip(replAccessReqs).foreach {
1644    case (w, req) =>
1645      w.valid := req.valid
1646      w.bits := req.bits.way
1647  }
1648  val touchSets = replAccessReqs.map(_.bits.set)
1649  replacer.access(touchSets, touchWays)
1650
1651  //----------------------------------------
1652  // assertions
1653  // dcache should only deal with DRAM addresses
1654  import freechips.rocketchip.util._
1655  when (bus.a.fire) {
1656    assert(PmemRanges.map(_.cover(bus.a.bits.address)).reduce(_ || _))
1657  }
1658  when (bus.b.fire) {
1659    assert(PmemRanges.map(_.cover(bus.b.bits.address)).reduce(_ || _))
1660  }
1661  when (bus.c.fire) {
1662    assert(PmemRanges.map(_.cover(bus.c.bits.address)).reduce(_ || _))
1663  }
1664
1665  //----------------------------------------
1666  // utility functions
1667  def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = {
1668    sink.valid   := source.valid && !block_signal
1669    source.ready := sink.ready   && !block_signal
1670    sink.bits    := source.bits
1671  }
1672
1673  //----------------------------------------
1674  // performance counters
1675  val num_loads = PopCount(ldu.map(e => e.io.lsu.req.fire))
1676  XSPerfAccumulate("num_loads", num_loads)
1677
1678  io.mshrFull := missQueue.io.full
1679
1680  // performance counter
1681  // val ld_access = Wire(Vec(LoadPipelineWidth, missQueue.io.debug_early_replace.last.cloneType))
1682  // val st_access = Wire(ld_access.last.cloneType)
1683  // ld_access.zip(ldu).foreach {
1684  //   case (a, u) =>
1685  //     a.valid := RegNext(u.io.lsu.req.fire) && !u.io.lsu.s1_kill
1686  //     a.bits.idx := RegEnable(get_idx(u.io.lsu.req.bits.vaddr), u.io.lsu.req.fire)
1687  //     a.bits.tag := get_tag(u.io.lsu.s1_paddr_dup_dcache)
1688  // }
1689  // st_access.valid := RegNext(mainPipe.io.store_req.fire)
1690  // st_access.bits.idx := RegEnable(get_idx(mainPipe.io.store_req.bits.vaddr), mainPipe.io.store_req.fire)
1691  // st_access.bits.tag := RegEnable(get_tag(mainPipe.io.store_req.bits.addr), mainPipe.io.store_req.fire)
1692  // val access_info = ld_access.toSeq ++ Seq(st_access)
1693  // val early_replace = RegNext(missQueue.io.debug_early_replace) // TODO: clock gate
1694  // val access_early_replace = access_info.map {
1695  //   case acc =>
1696  //     Cat(early_replace.map {
1697  //       case r =>
1698  //         acc.valid && r.valid &&
1699  //           acc.bits.tag === r.bits.tag &&
1700  //           acc.bits.idx === r.bits.idx
1701  //     })
1702  // }
1703  // XSPerfAccumulate("access_early_replace", PopCount(Cat(access_early_replace)))
1704
1705  val perfEvents = (Seq(wb, mainPipe, missQueue, probeQueue) ++ ldu).flatMap(_.getPerfEvents)
1706  generatePerfEvent()
1707}
1708
1709class AMOHelper() extends ExtModule {
1710  val clock  = IO(Input(Clock()))
1711  val enable = IO(Input(Bool()))
1712  val cmd    = IO(Input(UInt(5.W)))
1713  val addr   = IO(Input(UInt(64.W)))
1714  val wdata  = IO(Input(UInt(64.W)))
1715  val mask   = IO(Input(UInt(8.W)))
1716  val rdata  = IO(Output(UInt(64.W)))
1717}
1718
1719class DCacheWrapper()(implicit p: Parameters) extends LazyModule
1720  with HasXSParameter
1721  with HasDCacheParameters
1722{
1723  override def shouldBeInlined: Boolean = false
1724
1725  val useDcache = coreParams.dcacheParametersOpt.nonEmpty
1726  val clientNode = if (useDcache) TLIdentityNode() else null
1727  val dcache = if (useDcache) LazyModule(new DCache()) else null
1728  if (useDcache) {
1729    clientNode := dcache.clientNode
1730  }
1731  val uncacheNode = OptionWrapper(cacheCtrlParamsOpt.isDefined, TLIdentityNode())
1732  require(
1733    (uncacheNode.isDefined && dcache.cacheCtrlOpt.isDefined) ||
1734    (!uncacheNode.isDefined && !dcache.cacheCtrlOpt.isDefined), "uncacheNode and ctrlUnitOpt are not connected!")
1735  if (uncacheNode.isDefined && dcache.cacheCtrlOpt.isDefined) {
1736    dcache.cacheCtrlOpt.get.node := uncacheNode.get
1737  }
1738
1739  class DCacheWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents {
1740    val io = IO(new DCacheIO)
1741    val perfEvents = if (!useDcache) {
1742      // a fake dcache which uses dpi-c to access memory, only for debug usage!
1743      val fake_dcache = Module(new FakeDCache())
1744      io <> fake_dcache.io
1745      Seq()
1746    }
1747    else {
1748      io <> dcache.module.io
1749      dcache.module.getPerfEvents
1750    }
1751    generatePerfEvent()
1752  }
1753
1754  lazy val module = new DCacheWrapperImp(this)
1755}