1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chisel3._ 20import chisel3.experimental.ExtModule 21import chisel3.util._ 22import coupledL2.VaddrField 23import coupledL2.IsKeywordField 24import coupledL2.IsKeywordKey 25import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp, TransferSizes} 26import freechips.rocketchip.tilelink._ 27import freechips.rocketchip.util.BundleFieldBase 28import huancun.{AliasField, PrefetchField} 29import org.chipsalliance.cde.config.Parameters 30import utility._ 31import utils._ 32import xiangshan._ 33import xiangshan.backend.Bundles.DynInst 34import xiangshan.backend.rob.RobDebugRollingIO 35import xiangshan.cache.wpu._ 36import xiangshan.mem.{AddPipelineReg, HasL1PrefetchSourceParameter} 37import xiangshan.mem.prefetch._ 38import xiangshan.mem.LqPtr 39 40// DCache specific parameters 41case class DCacheParameters 42( 43 nSets: Int = 128, 44 nWays: Int = 8, 45 rowBits: Int = 64, 46 tagECC: Option[String] = None, 47 dataECC: Option[String] = None, 48 replacer: Option[String] = Some("setplru"), 49 updateReplaceOn2ndmiss: Boolean = true, 50 nMissEntries: Int = 1, 51 nProbeEntries: Int = 1, 52 nReleaseEntries: Int = 1, 53 nMMIOEntries: Int = 1, 54 nMMIOs: Int = 1, 55 blockBytes: Int = 64, 56 nMaxPrefetchEntry: Int = 1, 57 alwaysReleaseData: Boolean = false, 58 isKeywordBitsOpt: Option[Boolean] = Some(true), 59 enableDataEcc: Boolean = false, 60 enableTagEcc: Boolean = false 61) extends L1CacheParameters { 62 // if sets * blockBytes > 4KB(page size), 63 // cache alias will happen, 64 // we need to avoid this by recoding additional bits in L2 cache 65 val setBytes = nSets * blockBytes 66 val aliasBitsOpt = if(setBytes > pageSize) Some(log2Ceil(setBytes / pageSize)) else None 67 68 def tagCode: Code = Code.fromString(tagECC) 69 70 def dataCode: Code = Code.fromString(dataECC) 71} 72 73// Physical Address 74// -------------------------------------- 75// | Physical Tag | PIndex | Offset | 76// -------------------------------------- 77// | 78// DCacheTagOffset 79// 80// Virtual Address 81// -------------------------------------- 82// | Above index | Set | Bank | Offset | 83// -------------------------------------- 84// | | | | 85// | | | 0 86// | | DCacheBankOffset 87// | DCacheSetOffset 88// DCacheAboveIndexOffset 89 90// Default DCache size = 64 sets * 8 ways * 8 banks * 8 Byte = 32K Byte 91 92trait HasDCacheParameters extends HasL1CacheParameters with HasL1PrefetchSourceParameter{ 93 val cacheParams = dcacheParameters 94 val cfg = cacheParams 95 96 def blockProbeAfterGrantCycles = 8 // give the processor some time to issue a request after a grant 97 98 def nSourceType = 10 99 def sourceTypeWidth = log2Up(nSourceType) 100 // non-prefetch source < 3 101 def LOAD_SOURCE = 0 102 def STORE_SOURCE = 1 103 def AMO_SOURCE = 2 104 // prefetch source >= 3 105 def DCACHE_PREFETCH_SOURCE = 3 106 def SOFT_PREFETCH = 4 107 // the following sources are only used inside SMS 108 def HW_PREFETCH_AGT = 5 109 def HW_PREFETCH_PHT_CUR = 6 110 def HW_PREFETCH_PHT_INC = 7 111 def HW_PREFETCH_PHT_DEC = 8 112 def HW_PREFETCH_BOP = 9 113 def HW_PREFETCH_STRIDE = 10 114 115 def BLOOM_FILTER_ENTRY_NUM = 4096 116 117 // each source use a id to distinguish its multiple reqs 118 def reqIdWidth = log2Up(nEntries) max log2Up(StoreBufferSize) 119 120 require(isPow2(cfg.nMissEntries)) // TODO 121 // require(isPow2(cfg.nReleaseEntries)) 122 require(cfg.nMissEntries < cfg.nReleaseEntries) 123 val nEntries = cfg.nMissEntries + cfg.nReleaseEntries 124 val releaseIdBase = cfg.nMissEntries 125 val EnableDataEcc = cacheParams.enableDataEcc 126 val EnableTagEcc = cacheParams.enableTagEcc 127 128 // banked dcache support 129 val DCacheSetDiv = 1 130 val DCacheSets = cacheParams.nSets 131 val DCacheWays = cacheParams.nWays 132 val DCacheBanks = 8 // hardcoded 133 val DCacheDupNum = 16 134 val DCacheSRAMRowBits = cacheParams.rowBits // hardcoded 135 val DCacheWordBits = 64 // hardcoded 136 val DCacheWordBytes = DCacheWordBits / 8 137 val MaxPrefetchEntry = cacheParams.nMaxPrefetchEntry 138 val DCacheVWordBytes = VLEN / 8 139 require(DCacheSRAMRowBits == 64) 140 141 val DCacheSetDivBits = log2Ceil(DCacheSetDiv) 142 val DCacheSetBits = log2Ceil(DCacheSets) 143 val DCacheSizeBits = DCacheSRAMRowBits * DCacheBanks * DCacheWays * DCacheSets 144 val DCacheSizeBytes = DCacheSizeBits / 8 145 val DCacheSizeWords = DCacheSizeBits / 64 // TODO 146 147 val DCacheSameVPAddrLength = 12 148 149 val DCacheSRAMRowBytes = DCacheSRAMRowBits / 8 150 val DCacheWordOffset = log2Up(DCacheWordBytes) 151 val DCacheVWordOffset = log2Up(DCacheVWordBytes) 152 153 val DCacheBankOffset = log2Up(DCacheSRAMRowBytes) 154 val DCacheSetOffset = DCacheBankOffset + log2Up(DCacheBanks) 155 val DCacheAboveIndexOffset = DCacheSetOffset + log2Up(DCacheSets) 156 val DCacheTagOffset = DCacheAboveIndexOffset min DCacheSameVPAddrLength 157 val DCacheLineOffset = DCacheSetOffset 158 159 def encWordBits = cacheParams.dataCode.width(wordBits) 160 def encRowBits = encWordBits * rowWords // for DuplicatedDataArray only 161 def eccBits = encWordBits - wordBits 162 163 def encTagBits = if (EnableTagEcc) cacheParams.tagCode.width(tagBits) else tagBits 164 def tagECCBits = encTagBits - tagBits 165 166 def encDataBits = if (EnableDataEcc) cacheParams.dataCode.width(DCacheSRAMRowBits) else DCacheSRAMRowBits 167 def dataECCBits = encDataBits - DCacheSRAMRowBits 168 169 // uncache 170 val uncacheIdxBits = log2Up(VirtualLoadQueueMaxStoreQueueSize + 1) 171 // hardware prefetch parameters 172 // high confidence hardware prefetch port 173 val HighConfHWPFLoadPort = LoadPipelineWidth - 1 // use the last load port by default 174 val IgnorePrefetchConfidence = false 175 176 // parameters about duplicating regs to solve fanout 177 // In Main Pipe: 178 // tag_write.ready -> data_write.valid * 8 banks 179 // tag_write.ready -> meta_write.valid 180 // tag_write.ready -> tag_write.valid 181 // tag_write.ready -> err_write.valid 182 // tag_write.ready -> wb.valid 183 val nDupTagWriteReady = DCacheBanks + 4 184 // In Main Pipe: 185 // data_write.ready -> data_write.valid * 8 banks 186 // data_write.ready -> meta_write.valid 187 // data_write.ready -> tag_write.valid 188 // data_write.ready -> err_write.valid 189 // data_write.ready -> wb.valid 190 val nDupDataWriteReady = DCacheBanks + 4 191 val nDupWbReady = DCacheBanks + 4 192 val nDupStatus = nDupTagWriteReady + nDupDataWriteReady 193 val dataWritePort = 0 194 val metaWritePort = DCacheBanks 195 val tagWritePort = metaWritePort + 1 196 val errWritePort = tagWritePort + 1 197 val wbPort = errWritePort + 1 198 199 def set_to_dcache_div(set: UInt) = { 200 require(set.getWidth >= DCacheSetBits) 201 if (DCacheSetDivBits == 0) 0.U else set(DCacheSetDivBits-1, 0) 202 } 203 204 def set_to_dcache_div_set(set: UInt) = { 205 require(set.getWidth >= DCacheSetBits) 206 set(DCacheSetBits - 1, DCacheSetDivBits) 207 } 208 209 def addr_to_dcache_bank(addr: UInt) = { 210 require(addr.getWidth >= DCacheSetOffset) 211 addr(DCacheSetOffset-1, DCacheBankOffset) 212 } 213 214 def addr_to_dcache_div(addr: UInt) = { 215 require(addr.getWidth >= DCacheAboveIndexOffset) 216 if(DCacheSetDivBits == 0) 0.U else addr(DCacheSetOffset + DCacheSetDivBits - 1, DCacheSetOffset) 217 } 218 219 def addr_to_dcache_div_set(addr: UInt) = { 220 require(addr.getWidth >= DCacheAboveIndexOffset) 221 addr(DCacheAboveIndexOffset - 1, DCacheSetOffset + DCacheSetDivBits) 222 } 223 224 def addr_to_dcache_set(addr: UInt) = { 225 require(addr.getWidth >= DCacheAboveIndexOffset) 226 addr(DCacheAboveIndexOffset-1, DCacheSetOffset) 227 } 228 229 def get_data_of_bank(bank: Int, data: UInt) = { 230 require(data.getWidth >= (bank+1)*DCacheSRAMRowBits) 231 data(DCacheSRAMRowBits * (bank + 1) - 1, DCacheSRAMRowBits * bank) 232 } 233 234 def get_mask_of_bank(bank: Int, data: UInt) = { 235 require(data.getWidth >= (bank+1)*DCacheSRAMRowBytes) 236 data(DCacheSRAMRowBytes * (bank + 1) - 1, DCacheSRAMRowBytes * bank) 237 } 238 239 def get_alias(vaddr: UInt): UInt ={ 240 // require(blockOffBits + idxBits > pgIdxBits) 241 if(blockOffBits + idxBits > pgIdxBits){ 242 vaddr(blockOffBits + idxBits - 1, pgIdxBits) 243 }else{ 244 0.U 245 } 246 } 247 248 def is_alias_match(vaddr0: UInt, vaddr1: UInt): Bool = { 249 require(vaddr0.getWidth == VAddrBits && vaddr1.getWidth == VAddrBits) 250 if(blockOffBits + idxBits > pgIdxBits) { 251 vaddr0(blockOffBits + idxBits - 1, pgIdxBits) === vaddr1(blockOffBits + idxBits - 1, pgIdxBits) 252 }else { 253 // no alias problem 254 true.B 255 } 256 } 257 258 def get_direct_map_way(addr:UInt): UInt = { 259 addr(DCacheAboveIndexOffset + log2Up(DCacheWays) - 1, DCacheAboveIndexOffset) 260 } 261 262 def arbiter[T <: Bundle]( 263 in: Seq[DecoupledIO[T]], 264 out: DecoupledIO[T], 265 name: Option[String] = None): Unit = { 266 val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size)) 267 if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") } 268 for ((a, req) <- arb.io.in.zip(in)) { 269 a <> req 270 } 271 out <> arb.io.out 272 } 273 274 def arbiter_with_pipereg[T <: Bundle]( 275 in: Seq[DecoupledIO[T]], 276 out: DecoupledIO[T], 277 name: Option[String] = None): Unit = { 278 val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size)) 279 if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") } 280 for ((a, req) <- arb.io.in.zip(in)) { 281 a <> req 282 } 283 AddPipelineReg(arb.io.out, out, false.B) 284 } 285 286 def arbiter_with_pipereg_N_dup[T <: Bundle]( 287 in: Seq[DecoupledIO[T]], 288 out: DecoupledIO[T], 289 dups: Seq[DecoupledIO[T]], 290 name: Option[String] = None): Unit = { 291 val arb = Module(new Arbiter[T](chiselTypeOf(out.bits), in.size)) 292 if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") } 293 for ((a, req) <- arb.io.in.zip(in)) { 294 a <> req 295 } 296 for (dup <- dups) { 297 AddPipelineReg(arb.io.out, dup, false.B) 298 } 299 AddPipelineReg(arb.io.out, out, false.B) 300 } 301 302 def rrArbiter[T <: Bundle]( 303 in: Seq[DecoupledIO[T]], 304 out: DecoupledIO[T], 305 name: Option[String] = None): Unit = { 306 val arb = Module(new RRArbiter[T](chiselTypeOf(out.bits), in.size)) 307 if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") } 308 for ((a, req) <- arb.io.in.zip(in)) { 309 a <> req 310 } 311 out <> arb.io.out 312 } 313 314 def fastArbiter[T <: Bundle]( 315 in: Seq[DecoupledIO[T]], 316 out: DecoupledIO[T], 317 name: Option[String] = None): Unit = { 318 val arb = Module(new FastArbiter[T](chiselTypeOf(out.bits), in.size)) 319 if (name.nonEmpty) { arb.suggestName(s"${name.get}_arb") } 320 for ((a, req) <- arb.io.in.zip(in)) { 321 a <> req 322 } 323 out <> arb.io.out 324 } 325 326 val numReplaceRespPorts = 2 327 328 require(isPow2(nSets), s"nSets($nSets) must be pow2") 329 require(isPow2(nWays), s"nWays($nWays) must be pow2") 330 require(full_divide(rowBits, wordBits), s"rowBits($rowBits) must be multiple of wordBits($wordBits)") 331 require(full_divide(beatBits, rowBits), s"beatBits($beatBits) must be multiple of rowBits($rowBits)") 332} 333 334abstract class DCacheModule(implicit p: Parameters) extends L1CacheModule 335 with HasDCacheParameters 336 337abstract class DCacheBundle(implicit p: Parameters) extends L1CacheBundle 338 with HasDCacheParameters 339 340class ReplacementAccessBundle(implicit p: Parameters) extends DCacheBundle { 341 val set = UInt(log2Up(nSets).W) 342 val way = UInt(log2Up(nWays).W) 343} 344 345class ReplacementWayReqIO(implicit p: Parameters) extends DCacheBundle { 346 val set = ValidIO(UInt(log2Up(nSets).W)) 347 val dmWay = Output(UInt(log2Up(nWays).W)) 348 val way = Input(UInt(log2Up(nWays).W)) 349} 350 351class DCacheExtraMeta(implicit p: Parameters) extends DCacheBundle 352{ 353 val error = Bool() // cache line has been marked as corrupted by l2 / ecc error detected when store 354 val prefetch = UInt(L1PfSourceBits.W) // cache line is first required by prefetch 355 val access = Bool() // cache line has been accessed by load / store 356 357 // val debug_access_timestamp = UInt(64.W) // last time a load / store / refill access that cacheline 358} 359 360// memory request in word granularity(load, mmio, lr/sc, atomics) 361class DCacheWordReq(implicit p: Parameters) extends DCacheBundle 362{ 363 val cmd = UInt(M_SZ.W) 364 val vaddr = UInt(VAddrBits.W) 365 val data = UInt(VLEN.W) 366 val mask = UInt((VLEN/8).W) 367 val id = UInt(reqIdWidth.W) 368 val instrtype = UInt(sourceTypeWidth.W) 369 val isFirstIssue = Bool() 370 val replayCarry = new ReplayCarry(nWays) 371 val lqIdx = new LqPtr 372 373 val debug_robIdx = UInt(log2Ceil(RobSize).W) 374 def dump() = { 375 XSDebug("DCacheWordReq: cmd: %x vaddr: %x data: %x mask: %x id: %d\n", 376 cmd, vaddr, data, mask, id) 377 } 378} 379 380// memory request in word granularity(store) 381class DCacheLineReq(implicit p: Parameters) extends DCacheBundle 382{ 383 val cmd = UInt(M_SZ.W) 384 val vaddr = UInt(VAddrBits.W) 385 val addr = UInt(PAddrBits.W) 386 val data = UInt((cfg.blockBytes * 8).W) 387 val mask = UInt(cfg.blockBytes.W) 388 val id = UInt(reqIdWidth.W) 389 def dump() = { 390 XSDebug("DCacheLineReq: cmd: %x addr: %x data: %x mask: %x id: %d\n", 391 cmd, addr, data, mask, id) 392 } 393 def idx: UInt = get_idx(vaddr) 394} 395 396class DCacheWordReqWithVaddr(implicit p: Parameters) extends DCacheWordReq { 397 val addr = UInt(PAddrBits.W) 398 val wline = Bool() 399} 400 401class DCacheWordReqWithVaddrAndPfFlag(implicit p: Parameters) extends DCacheWordReqWithVaddr { 402 val prefetch = Bool() 403 val vecValid = Bool() 404 405 def toDCacheWordReqWithVaddr() = { 406 val res = Wire(new DCacheWordReqWithVaddr) 407 res.vaddr := vaddr 408 res.wline := wline 409 res.cmd := cmd 410 res.addr := addr 411 res.data := data 412 res.mask := mask 413 res.id := id 414 res.instrtype := instrtype 415 res.replayCarry := replayCarry 416 res.isFirstIssue := isFirstIssue 417 res.debug_robIdx := debug_robIdx 418 419 res 420 } 421} 422 423class BaseDCacheWordResp(implicit p: Parameters) extends DCacheBundle 424{ 425 // read in s2 426 val data = UInt(VLEN.W) 427 // select in s3 428 val data_delayed = UInt(VLEN.W) 429 val id = UInt(reqIdWidth.W) 430 // cache req missed, send it to miss queue 431 val miss = Bool() 432 // cache miss, and failed to enter the missqueue, replay from RS is needed 433 val replay = Bool() 434 val replayCarry = new ReplayCarry(nWays) 435 // data has been corrupted 436 val tag_error = Bool() // tag error 437 val mshr_id = UInt(log2Up(cfg.nMissEntries).W) 438 439 val debug_robIdx = UInt(log2Ceil(RobSize).W) 440 def dump() = { 441 XSDebug("DCacheWordResp: data: %x id: %d miss: %b replay: %b\n", 442 data, id, miss, replay) 443 } 444} 445 446class DCacheWordResp(implicit p: Parameters) extends BaseDCacheWordResp 447{ 448 val meta_prefetch = UInt(L1PfSourceBits.W) 449 val meta_access = Bool() 450 // s2 451 val handled = Bool() 452 val real_miss = Bool() 453 // s3: 1 cycle after data resp 454 val error_delayed = Bool() // all kinds of errors, include tag error 455 val replacementUpdated = Bool() 456} 457 458class BankedDCacheWordResp(implicit p: Parameters) extends DCacheWordResp 459{ 460 val bank_data = Vec(DCacheBanks, Bits(DCacheSRAMRowBits.W)) 461 val bank_oh = UInt(DCacheBanks.W) 462} 463 464class DCacheWordRespWithError(implicit p: Parameters) extends BaseDCacheWordResp 465{ 466 val error = Bool() // all kinds of errors, include tag error 467 val nderr = Bool() 468} 469 470class DCacheLineResp(implicit p: Parameters) extends DCacheBundle 471{ 472 val data = UInt((cfg.blockBytes * 8).W) 473 // cache req missed, send it to miss queue 474 val miss = Bool() 475 // cache req nacked, replay it later 476 val replay = Bool() 477 val id = UInt(reqIdWidth.W) 478 def dump() = { 479 XSDebug("DCacheLineResp: data: %x id: %d miss: %b replay: %b\n", 480 data, id, miss, replay) 481 } 482} 483 484class Refill(implicit p: Parameters) extends DCacheBundle 485{ 486 val addr = UInt(PAddrBits.W) 487 val data = UInt(l1BusDataWidth.W) 488 val error = Bool() // refilled data has been corrupted 489 // for debug usage 490 val data_raw = UInt((cfg.blockBytes * 8).W) 491 val hasdata = Bool() 492 val refill_done = Bool() 493 def dump() = { 494 XSDebug("Refill: addr: %x data: %x\n", addr, data) 495 } 496 val id = UInt(log2Up(cfg.nMissEntries).W) 497} 498 499class Release(implicit p: Parameters) extends DCacheBundle 500{ 501 val paddr = UInt(PAddrBits.W) 502 def dump() = { 503 XSDebug("Release: paddr: %x\n", paddr(PAddrBits-1, DCacheTagOffset)) 504 } 505} 506 507class DCacheWordIO(implicit p: Parameters) extends DCacheBundle 508{ 509 val req = DecoupledIO(new DCacheWordReq) 510 val resp = Flipped(DecoupledIO(new DCacheWordResp)) 511} 512 513 514class UncacheWordReq(implicit p: Parameters) extends DCacheBundle 515{ 516 val cmd = UInt(M_SZ.W) 517 val addr = UInt(PAddrBits.W) 518 val data = UInt(XLEN.W) 519 val mask = UInt((XLEN/8).W) 520 val id = UInt(uncacheIdxBits.W) 521 val instrtype = UInt(sourceTypeWidth.W) 522 val atomic = Bool() 523 val isFirstIssue = Bool() 524 val replayCarry = new ReplayCarry(nWays) 525 526 def dump() = { 527 XSDebug("UncacheWordReq: cmd: %x addr: %x data: %x mask: %x id: %d\n", 528 cmd, addr, data, mask, id) 529 } 530} 531 532class UncacheWordResp(implicit p: Parameters) extends DCacheBundle 533{ 534 val data = UInt(XLEN.W) 535 val data_delayed = UInt(XLEN.W) 536 val id = UInt(uncacheIdxBits.W) 537 val miss = Bool() 538 val replay = Bool() 539 val tag_error = Bool() 540 val error = Bool() 541 val nderr = Bool() 542 val replayCarry = new ReplayCarry(nWays) 543 val mshr_id = UInt(log2Up(cfg.nMissEntries).W) // FIXME: why uncacheWordResp is not merged to baseDcacheResp 544 545 val debug_robIdx = UInt(log2Ceil(RobSize).W) 546 def dump() = { 547 XSDebug("UncacheWordResp: data: %x id: %d miss: %b replay: %b, tag_error: %b, error: %b\n", 548 data, id, miss, replay, tag_error, error) 549 } 550} 551 552class UncacheWordIO(implicit p: Parameters) extends DCacheBundle 553{ 554 val req = DecoupledIO(new UncacheWordReq) 555 val resp = Flipped(DecoupledIO(new UncacheWordResp)) 556} 557 558class MainPipeResp(implicit p: Parameters) extends DCacheBundle { 559 //distinguish amo 560 val source = UInt(sourceTypeWidth.W) 561 val data = UInt(DataBits.W) 562 val miss = Bool() 563 val miss_id = UInt(log2Up(cfg.nMissEntries).W) 564 val replay = Bool() 565 val error = Bool() 566 567 val ack_miss_queue = Bool() 568 569 val id = UInt(reqIdWidth.W) 570 571 def isAMO: Bool = source === AMO_SOURCE.U 572 def isStore: Bool = source === STORE_SOURCE.U 573} 574 575class AtomicWordIO(implicit p: Parameters) extends DCacheBundle 576{ 577 val req = DecoupledIO(new MainPipeReq) 578 val resp = Flipped(ValidIO(new MainPipeResp)) 579 val block_lr = Input(Bool()) 580} 581 582// used by load unit 583class DCacheLoadIO(implicit p: Parameters) extends DCacheWordIO 584{ 585 // kill previous cycle's req 586 val s1_kill_data_read = Output(Bool()) // only kill bandedDataRead at s1 587 val s1_kill = Output(Bool()) // kill loadpipe req at s1 588 val s2_kill = Output(Bool()) 589 val s0_pc = Output(UInt(VAddrBits.W)) 590 val s1_pc = Output(UInt(VAddrBits.W)) 591 val s2_pc = Output(UInt(VAddrBits.W)) 592 // cycle 0: load has updated replacement before 593 val replacementUpdated = Output(Bool()) 594 val is128Req = Bool() 595 // cycle 0: prefetch source bits 596 val pf_source = Output(UInt(L1PfSourceBits.W)) 597 // cycle0: load microop 598 // val s0_uop = Output(new MicroOp) 599 // cycle 0: virtual address: req.addr 600 // cycle 1: physical address: s1_paddr 601 val s1_paddr_dup_lsu = Output(UInt(PAddrBits.W)) // lsu side paddr 602 val s1_paddr_dup_dcache = Output(UInt(PAddrBits.W)) // dcache side paddr 603 val s1_disable_fast_wakeup = Input(Bool()) 604 // cycle 2: hit signal 605 val s2_hit = Input(Bool()) // hit signal for lsu, 606 val s2_first_hit = Input(Bool()) 607 val s2_bank_conflict = Input(Bool()) 608 val s2_wpu_pred_fail = Input(Bool()) 609 val s2_mq_nack = Input(Bool()) 610 611 // debug 612 val debug_s1_hit_way = Input(UInt(nWays.W)) 613 val debug_s2_pred_way_num = Input(UInt(XLEN.W)) 614 val debug_s2_dm_way_num = Input(UInt(XLEN.W)) 615 val debug_s2_real_way_num = Input(UInt(XLEN.W)) 616} 617 618class DCacheLineIO(implicit p: Parameters) extends DCacheBundle 619{ 620 val req = DecoupledIO(new DCacheLineReq) 621 val resp = Flipped(DecoupledIO(new DCacheLineResp)) 622} 623 624class DCacheToSbufferIO(implicit p: Parameters) extends DCacheBundle { 625 // sbuffer will directly send request to dcache main pipe 626 val req = Flipped(Decoupled(new DCacheLineReq)) 627 628 val main_pipe_hit_resp = ValidIO(new DCacheLineResp) 629 //val refill_hit_resp = ValidIO(new DCacheLineResp) 630 631 val replay_resp = ValidIO(new DCacheLineResp) 632 633 //def hit_resps: Seq[ValidIO[DCacheLineResp]] = Seq(main_pipe_hit_resp, refill_hit_resp) 634 def hit_resps: Seq[ValidIO[DCacheLineResp]] = Seq(main_pipe_hit_resp) 635} 636 637// forward tilelink channel D's data to ldu 638class DcacheToLduForwardIO(implicit p: Parameters) extends DCacheBundle { 639 val valid = Bool() 640 val data = UInt(l1BusDataWidth.W) 641 val mshrid = UInt(log2Up(cfg.nMissEntries).W) 642 val last = Bool() 643 644 def apply(req_valid : Bool, req_data : UInt, req_mshrid : UInt, req_last : Bool) = { 645 valid := req_valid 646 data := req_data 647 mshrid := req_mshrid 648 last := req_last 649 } 650 651 def dontCare() = { 652 valid := false.B 653 data := DontCare 654 mshrid := DontCare 655 last := DontCare 656 } 657 658 def forward(req_valid : Bool, req_mshr_id : UInt, req_paddr : UInt) = { 659 val all_match = req_valid && valid && 660 req_mshr_id === mshrid && 661 req_paddr(log2Up(refillBytes)) === last 662 val forward_D = RegInit(false.B) 663 val forwardData = RegInit(VecInit(List.fill(VLEN/8)(0.U(8.W)))) 664 665 val block_idx = req_paddr(log2Up(refillBytes) - 1, 3) 666 val block_data = Wire(Vec(l1BusDataWidth / 64, UInt(64.W))) 667 (0 until l1BusDataWidth / 64).map(i => { 668 block_data(i) := data(64 * i + 63, 64 * i) 669 }) 670 val selected_data = Wire(UInt(128.W)) 671 selected_data := Mux(req_paddr(3), Fill(2, block_data(block_idx)), Cat(block_data(block_idx + 1.U), block_data(block_idx))) 672 673 forward_D := all_match 674 for (i <- 0 until VLEN/8) { 675 when (all_match) { 676 forwardData(i) := selected_data(8 * i + 7, 8 * i) 677 } 678 } 679 680 (forward_D, forwardData) 681 } 682} 683 684class MissEntryForwardIO(implicit p: Parameters) extends DCacheBundle { 685 val inflight = Bool() 686 val paddr = UInt(PAddrBits.W) 687 val raw_data = Vec(blockRows, UInt(rowBits.W)) 688 val firstbeat_valid = Bool() 689 val lastbeat_valid = Bool() 690 691 def apply(mshr_valid : Bool, mshr_paddr : UInt, mshr_rawdata : Vec[UInt], mshr_first_valid : Bool, mshr_last_valid : Bool) = { 692 inflight := mshr_valid 693 paddr := mshr_paddr 694 raw_data := mshr_rawdata 695 firstbeat_valid := mshr_first_valid 696 lastbeat_valid := mshr_last_valid 697 } 698 699 // check if we can forward from mshr or D channel 700 def check(req_valid : Bool, req_paddr : UInt) = { 701 RegNext(req_valid && inflight && req_paddr(PAddrBits - 1, blockOffBits) === paddr(PAddrBits - 1, blockOffBits)) // TODO: clock gate(1-bit) 702 } 703 704 def forward(req_valid : Bool, req_paddr : UInt) = { 705 val all_match = (req_paddr(log2Up(refillBytes)) === 0.U && firstbeat_valid) || 706 (req_paddr(log2Up(refillBytes)) === 1.U && lastbeat_valid) 707 708 val forward_mshr = RegInit(false.B) 709 val forwardData = RegInit(VecInit(List.fill(VLEN/8)(0.U(8.W)))) 710 711 val block_idx = req_paddr(log2Up(refillBytes), 3) 712 val block_data = raw_data 713 714 val selected_data = Wire(UInt(128.W)) 715 selected_data := Mux(req_paddr(3), Fill(2, block_data(block_idx)), Cat(block_data(block_idx + 1.U), block_data(block_idx))) 716 717 forward_mshr := all_match 718 for (i <- 0 until VLEN/8) { 719 forwardData(i) := selected_data(8 * i + 7, 8 * i) 720 } 721 722 (forward_mshr, forwardData) 723 } 724} 725 726// forward mshr's data to ldu 727class LduToMissqueueForwardIO(implicit p: Parameters) extends DCacheBundle { 728 // req 729 val valid = Input(Bool()) 730 val mshrid = Input(UInt(log2Up(cfg.nMissEntries).W)) 731 val paddr = Input(UInt(PAddrBits.W)) 732 // resp 733 val forward_mshr = Output(Bool()) 734 val forwardData = Output(Vec(VLEN/8, UInt(8.W))) 735 val forward_result_valid = Output(Bool()) 736 737 def connect(sink: LduToMissqueueForwardIO) = { 738 sink.valid := valid 739 sink.mshrid := mshrid 740 sink.paddr := paddr 741 forward_mshr := sink.forward_mshr 742 forwardData := sink.forwardData 743 forward_result_valid := sink.forward_result_valid 744 } 745 746 def forward() = { 747 (forward_result_valid, forward_mshr, forwardData) 748 } 749} 750 751class StorePrefetchReq(implicit p: Parameters) extends DCacheBundle { 752 val paddr = UInt(PAddrBits.W) 753 val vaddr = UInt(VAddrBits.W) 754} 755 756class DCacheToLsuIO(implicit p: Parameters) extends DCacheBundle { 757 val load = Vec(LoadPipelineWidth, Flipped(new DCacheLoadIO)) // for speculative load 758 val sta = Vec(StorePipelineWidth, Flipped(new DCacheStoreIO)) // for non-blocking store 759 //val lsq = ValidIO(new Refill) // refill to load queue, wake up load misses 760 val tl_d_channel = Output(new DcacheToLduForwardIO) 761 val store = new DCacheToSbufferIO // for sbuffer 762 val atomics = Flipped(new AtomicWordIO) // atomics reqs 763 val release = ValidIO(new Release) // cacheline release hint for ld-ld violation check 764 val forward_D = Output(Vec(LoadPipelineWidth, new DcacheToLduForwardIO)) 765 val forward_mshr = Vec(LoadPipelineWidth, new LduToMissqueueForwardIO) 766} 767 768class DCacheTopDownIO(implicit p: Parameters) extends DCacheBundle { 769 val robHeadVaddr = Flipped(Valid(UInt(VAddrBits.W))) 770 val robHeadMissInDCache = Output(Bool()) 771 val robHeadOtherReplay = Input(Bool()) 772} 773 774class DCacheIO(implicit p: Parameters) extends DCacheBundle { 775 val hartId = Input(UInt(hartIdLen.W)) 776 val l2_pf_store_only = Input(Bool()) 777 val lsu = new DCacheToLsuIO 778 val csr = new L1CacheToCsrIO 779 val error = ValidIO(new L1CacheErrorInfo) 780 val mshrFull = Output(Bool()) 781 val memSetPattenDetected = Output(Bool()) 782 val lqEmpty = Input(Bool()) 783 val pf_ctrl = Output(new PrefetchControlBundle) 784 val force_write = Input(Bool()) 785 val sms_agt_evict_req = DecoupledIO(new AGTEvictReq) 786 val debugTopDown = new DCacheTopDownIO 787 val debugRolling = Flipped(new RobDebugRollingIO) 788 val l2_hint = Input(Valid(new L2ToL1Hint())) 789} 790 791private object ArbiterCtrl { 792 def apply(request: Seq[Bool]): Seq[Bool] = request.length match { 793 case 0 => Seq() 794 case 1 => Seq(true.B) 795 case _ => true.B +: request.tail.init.scanLeft(request.head)(_ || _).map(!_) 796 } 797} 798 799class TreeArbiter[T <: MissReqWoStoreData](val gen: T, val n: Int) extends Module{ 800 val io = IO(new ArbiterIO(gen, n)) 801 802 def selectTree(in: Vec[Valid[T]], sIdx: UInt): Tuple2[UInt, T] = { 803 if (in.length == 1) { 804 (sIdx, in(0).bits) 805 } else if (in.length == 2) { 806 ( 807 Mux(in(0).valid, sIdx, sIdx + 1.U), 808 Mux(in(0).valid, in(0).bits, in(1).bits) 809 ) 810 } else { 811 val half = in.length / 2 812 val leftValid = in.slice(0, half).map(_.valid).reduce(_ || _) 813 val (leftIdx, leftSel) = selectTree(VecInit(in.slice(0, half)), sIdx) 814 val (rightIdx, rightSel) = selectTree(VecInit(in.slice(half, in.length)), sIdx + half.U) 815 ( 816 Mux(leftValid, leftIdx, rightIdx), 817 Mux(leftValid, leftSel, rightSel) 818 ) 819 } 820 } 821 val ins = Wire(Vec(n, Valid(gen))) 822 for (i <- 0 until n) { 823 ins(i).valid := io.in(i).valid 824 ins(i).bits := io.in(i).bits 825 } 826 val (idx, sel) = selectTree(ins, 0.U) 827 // NOTE: io.chosen is very slow, dont use it 828 io.chosen := idx 829 io.out.bits := sel 830 831 val grant = ArbiterCtrl(io.in.map(_.valid)) 832 for ((in, g) <- io.in.zip(grant)) 833 in.ready := g && io.out.ready 834 io.out.valid := !grant.last || io.in.last.valid 835} 836 837class DCacheMEQueryIOBundle(implicit p: Parameters) extends DCacheBundle 838{ 839 val req = ValidIO(new MissReqWoStoreData) 840 val primary_ready = Input(Bool()) 841 val secondary_ready = Input(Bool()) 842 val secondary_reject = Input(Bool()) 843} 844 845class DCacheMQQueryIOBundle(implicit p: Parameters) extends DCacheBundle 846{ 847 val req = ValidIO(new MissReq) 848 val ready = Input(Bool()) 849} 850 851class MissReadyGen(val n: Int)(implicit p: Parameters) extends XSModule { 852 val io = IO(new Bundle { 853 val in = Vec(n, Flipped(DecoupledIO(new MissReq))) 854 val queryMQ = Vec(n, new DCacheMQQueryIOBundle) 855 }) 856 857 val mqReadyVec = io.queryMQ.map(_.ready) 858 859 io.queryMQ.zipWithIndex.foreach{ 860 case (q, idx) => { 861 q.req.valid := io.in(idx).valid 862 q.req.bits := io.in(idx).bits 863 } 864 } 865 io.in.zipWithIndex.map { 866 case (r, idx) => { 867 if (idx == 0) { 868 r.ready := mqReadyVec(idx) 869 } else { 870 r.ready := mqReadyVec(idx) && !Cat(io.in.slice(0, idx).map(_.valid)).orR 871 } 872 } 873 } 874 875} 876 877class DCache()(implicit p: Parameters) extends LazyModule with HasDCacheParameters { 878 override def shouldBeInlined: Boolean = false 879 880 val reqFields: Seq[BundleFieldBase] = Seq( 881 PrefetchField(), 882 ReqSourceField(), 883 VaddrField(VAddrBits - blockOffBits), 884 // IsKeywordField() 885 ) ++ cacheParams.aliasBitsOpt.map(AliasField) 886 val echoFields: Seq[BundleFieldBase] = Seq( 887 IsKeywordField() 888 ) 889 890 val clientParameters = TLMasterPortParameters.v1( 891 Seq(TLMasterParameters.v1( 892 name = "dcache", 893 sourceId = IdRange(0, nEntries + 1), 894 supportsProbe = TransferSizes(cfg.blockBytes) 895 )), 896 requestFields = reqFields, 897 echoFields = echoFields 898 ) 899 900 val clientNode = TLClientNode(Seq(clientParameters)) 901 902 lazy val module = new DCacheImp(this) 903} 904 905 906class DCacheImp(outer: DCache) extends LazyModuleImp(outer) with HasDCacheParameters with HasPerfEvents with HasL1PrefetchSourceParameter { 907 908 val io = IO(new DCacheIO) 909 910 val (bus, edge) = outer.clientNode.out.head 911 require(bus.d.bits.data.getWidth == l1BusDataWidth, "DCache: tilelink width does not match") 912 913 println("DCache:") 914 println(" DCacheSets: " + DCacheSets) 915 println(" DCacheSetDiv: " + DCacheSetDiv) 916 println(" DCacheWays: " + DCacheWays) 917 println(" DCacheBanks: " + DCacheBanks) 918 println(" DCacheSRAMRowBits: " + DCacheSRAMRowBits) 919 println(" DCacheWordOffset: " + DCacheWordOffset) 920 println(" DCacheBankOffset: " + DCacheBankOffset) 921 println(" DCacheSetOffset: " + DCacheSetOffset) 922 println(" DCacheTagOffset: " + DCacheTagOffset) 923 println(" DCacheAboveIndexOffset: " + DCacheAboveIndexOffset) 924 println(" DcacheMaxPrefetchEntry: " + MaxPrefetchEntry) 925 println(" WPUEnable: " + dwpuParam.enWPU) 926 println(" WPUEnableCfPred: " + dwpuParam.enCfPred) 927 println(" WPUAlgorithm: " + dwpuParam.algoName) 928 println(" HasCMO: " + HasCMO) 929 930 // Enable L1 Store prefetch 931 val StorePrefetchL1Enabled = EnableStorePrefetchAtCommit || EnableStorePrefetchAtIssue || EnableStorePrefetchSPB 932 val MetaReadPort = 933 if (StorePrefetchL1Enabled) 934 1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt 935 else 936 1 + backendParams.LduCnt + backendParams.HyuCnt 937 val TagReadPort = 938 if (StorePrefetchL1Enabled) 939 1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt 940 else 941 1 + backendParams.LduCnt + backendParams.HyuCnt 942 943 // Enable L1 Load prefetch 944 val LoadPrefetchL1Enabled = true 945 val AccessArrayReadPort = if(LoadPrefetchL1Enabled) LoadPipelineWidth + 1 + 1 else LoadPipelineWidth + 1 946 val PrefetchArrayReadPort = if(LoadPrefetchL1Enabled) LoadPipelineWidth + 1 + 1 else LoadPipelineWidth + 1 947 948 //---------------------------------------- 949 // core data structures 950 val bankedDataArray = if(dwpuParam.enWPU) Module(new SramedDataArray) else Module(new BankedDataArray) 951 val metaArray = Module(new L1CohMetaArray(readPorts = LoadPipelineWidth + 1, writePorts = 1)) 952 val errorArray = Module(new L1FlagMetaArray(readPorts = LoadPipelineWidth + 1, writePorts = 1)) 953 val prefetchArray = Module(new L1PrefetchSourceArray(readPorts = PrefetchArrayReadPort, writePorts = 1 + LoadPipelineWidth)) // prefetch flag array 954 val accessArray = Module(new L1FlagMetaArray(readPorts = AccessArrayReadPort, writePorts = LoadPipelineWidth + 1)) 955 val tagArray = Module(new DuplicatedTagArray(readPorts = TagReadPort)) 956 val prefetcherMonitor = Module(new PrefetcherMonitor) 957 val fdpMonitor = Module(new FDPrefetcherMonitor) 958 val bloomFilter = Module(new BloomFilter(BLOOM_FILTER_ENTRY_NUM, true)) 959 val counterFilter = Module(new CounterFilter) 960 bankedDataArray.dump() 961 962 //---------------------------------------- 963 // miss queue 964 // missReqArb port: 965 // enableStorePrefetch: main pipe * 1 + load pipe * 2 + store pipe * 1 + 966 // hybrid * 1; disable: main pipe * 1 + load pipe * 2 + hybrid * 1 967 // higher priority is given to lower indices 968 val MissReqPortCount = if(StorePrefetchL1Enabled) 1 + backendParams.LduCnt + backendParams.StaCnt + backendParams.HyuCnt else 1 + backendParams.LduCnt + backendParams.HyuCnt 969 val MainPipeMissReqPort = 0 970 val HybridMissReqBase = MissReqPortCount - backendParams.HyuCnt 971 972 //---------------------------------------- 973 // core modules 974 val ldu = Seq.tabulate(LoadPipelineWidth)({ i => Module(new LoadPipe(i))}) 975 val stu = Seq.tabulate(StorePipelineWidth)({ i => Module(new StorePipe(i))}) 976 val mainPipe = Module(new MainPipe) 977 // val refillPipe = Module(new RefillPipe) 978 val missQueue = Module(new MissQueue(edge, MissReqPortCount)) 979 val probeQueue = Module(new ProbeQueue(edge)) 980 val wb = Module(new WritebackQueue(edge)) 981 982 missQueue.io.lqEmpty := io.lqEmpty 983 missQueue.io.hartId := io.hartId 984 missQueue.io.l2_pf_store_only := RegNext(io.l2_pf_store_only, false.B) 985 missQueue.io.debugTopDown <> io.debugTopDown 986 missQueue.io.l2_hint <> RegNext(io.l2_hint) 987 missQueue.io.mainpipe_info := mainPipe.io.mainpipe_info 988 mainPipe.io.refill_info := missQueue.io.refill_info 989 mainPipe.io.replace_block := missQueue.io.replace_block 990 mainPipe.io.sms_agt_evict_req <> io.sms_agt_evict_req 991 io.memSetPattenDetected := missQueue.io.memSetPattenDetected 992 993 val errors = ldu.map(_.io.error) ++ // load error 994 Seq(mainPipe.io.error) // store / misc error 995 val error_valid = errors.map(e => e.valid).reduce(_|_) 996 io.error.bits <> RegEnable( 997 Mux1H(errors.map(e => RegNext(e.valid) -> RegEnable(e.bits, e.valid))), 998 RegNext(error_valid)) 999 io.error.valid := RegNext(RegNext(error_valid, init = false.B), init = false.B) 1000 1001 //---------------------------------------- 1002 // meta array 1003 val HybridLoadReadBase = LoadPipelineWidth - backendParams.HyuCnt 1004 val HybridStoreReadBase = StorePipelineWidth - backendParams.HyuCnt 1005 1006 val hybrid_meta_read_ports = Wire(Vec(backendParams.HyuCnt, DecoupledIO(new MetaReadReq))) 1007 val hybrid_meta_resp_ports = Wire(Vec(backendParams.HyuCnt, ldu(0).io.meta_resp.cloneType)) 1008 for (i <- 0 until backendParams.HyuCnt) { 1009 val HybridLoadMetaReadPort = HybridLoadReadBase + i 1010 val HybridStoreMetaReadPort = HybridStoreReadBase + i 1011 1012 hybrid_meta_read_ports(i).valid := ldu(HybridLoadMetaReadPort).io.meta_read.valid || 1013 (stu(HybridStoreMetaReadPort).io.meta_read.valid && StorePrefetchL1Enabled.B) 1014 hybrid_meta_read_ports(i).bits := Mux(ldu(HybridLoadMetaReadPort).io.meta_read.valid, ldu(HybridLoadMetaReadPort).io.meta_read.bits, 1015 stu(HybridStoreMetaReadPort).io.meta_read.bits) 1016 1017 ldu(HybridLoadMetaReadPort).io.meta_read.ready := hybrid_meta_read_ports(i).ready 1018 stu(HybridStoreMetaReadPort).io.meta_read.ready := hybrid_meta_read_ports(i).ready && StorePrefetchL1Enabled.B 1019 1020 ldu(HybridLoadMetaReadPort).io.meta_resp := hybrid_meta_resp_ports(i) 1021 stu(HybridStoreMetaReadPort).io.meta_resp := hybrid_meta_resp_ports(i) 1022 } 1023 1024 // read / write coh meta 1025 val meta_read_ports = ldu.map(_.io.meta_read).take(HybridLoadReadBase) ++ 1026 Seq(mainPipe.io.meta_read) ++ 1027 stu.map(_.io.meta_read).take(HybridStoreReadBase) ++ hybrid_meta_read_ports 1028 1029 val meta_resp_ports = ldu.map(_.io.meta_resp).take(HybridLoadReadBase) ++ 1030 Seq(mainPipe.io.meta_resp) ++ 1031 stu.map(_.io.meta_resp).take(HybridStoreReadBase) ++ hybrid_meta_resp_ports 1032 1033 val meta_write_ports = Seq( 1034 mainPipe.io.meta_write 1035 // refillPipe.io.meta_write 1036 ) 1037 if(StorePrefetchL1Enabled) { 1038 meta_read_ports.zip(metaArray.io.read).foreach { case (p, r) => r <> p } 1039 meta_resp_ports.zip(metaArray.io.resp).foreach { case (p, r) => p := r } 1040 } else { 1041 (meta_read_ports.take(HybridLoadReadBase + 1) ++ 1042 meta_read_ports.takeRight(backendParams.HyuCnt)).zip(metaArray.io.read).foreach { case (p, r) => r <> p } 1043 (meta_resp_ports.take(HybridLoadReadBase + 1) ++ 1044 meta_resp_ports.takeRight(backendParams.HyuCnt)).zip(metaArray.io.resp).foreach { case (p, r) => p := r } 1045 1046 meta_read_ports.drop(HybridLoadReadBase + 1).take(HybridStoreReadBase).foreach { case p => p.ready := false.B } 1047 meta_resp_ports.drop(HybridLoadReadBase + 1).take(HybridStoreReadBase).foreach { case p => p := 0.U.asTypeOf(p) } 1048 } 1049 meta_write_ports.zip(metaArray.io.write).foreach { case (p, w) => w <> p } 1050 1051 // read extra meta (exclude stu) 1052 (meta_read_ports.take(HybridLoadReadBase + 1) ++ 1053 meta_read_ports.takeRight(backendParams.HyuCnt)).zip(errorArray.io.read).foreach { case (p, r) => r <> p } 1054 (meta_read_ports.take(HybridLoadReadBase + 1) ++ 1055 meta_read_ports.takeRight(backendParams.HyuCnt)).zip(prefetchArray.io.read).foreach { case (p, r) => r <> p } 1056 (meta_read_ports.take(HybridLoadReadBase + 1) ++ 1057 meta_read_ports.takeRight(backendParams.HyuCnt)).zip(accessArray.io.read).foreach { case (p, r) => r <> p } 1058 val extra_meta_resp_ports = ldu.map(_.io.extra_meta_resp).take(HybridLoadReadBase) ++ 1059 Seq(mainPipe.io.extra_meta_resp) ++ 1060 ldu.map(_.io.extra_meta_resp).takeRight(backendParams.HyuCnt) 1061 extra_meta_resp_ports.zip(errorArray.io.resp).foreach { case (p, r) => { 1062 (0 until nWays).map(i => { p(i).error := r(i) }) 1063 }} 1064 extra_meta_resp_ports.zip(prefetchArray.io.resp).foreach { case (p, r) => { 1065 (0 until nWays).map(i => { p(i).prefetch := r(i) }) 1066 }} 1067 extra_meta_resp_ports.zip(accessArray.io.resp).foreach { case (p, r) => { 1068 (0 until nWays).map(i => { p(i).access := r(i) }) 1069 }} 1070 1071 if(LoadPrefetchL1Enabled) { 1072 // use last port to read prefetch and access flag 1073// prefetchArray.io.read.last.valid := refillPipe.io.prefetch_flag_write.valid 1074// prefetchArray.io.read.last.bits.idx := refillPipe.io.prefetch_flag_write.bits.idx 1075// prefetchArray.io.read.last.bits.way_en := refillPipe.io.prefetch_flag_write.bits.way_en 1076// 1077// accessArray.io.read.last.valid := refillPipe.io.prefetch_flag_write.valid 1078// accessArray.io.read.last.bits.idx := refillPipe.io.prefetch_flag_write.bits.idx 1079// accessArray.io.read.last.bits.way_en := refillPipe.io.prefetch_flag_write.bits.way_en 1080 prefetchArray.io.read.last.valid := mainPipe.io.prefetch_flag_write.valid 1081 prefetchArray.io.read.last.bits.idx := mainPipe.io.prefetch_flag_write.bits.idx 1082 prefetchArray.io.read.last.bits.way_en := mainPipe.io.prefetch_flag_write.bits.way_en 1083 1084 accessArray.io.read.last.valid := mainPipe.io.prefetch_flag_write.valid 1085 accessArray.io.read.last.bits.idx := mainPipe.io.prefetch_flag_write.bits.idx 1086 accessArray.io.read.last.bits.way_en := mainPipe.io.prefetch_flag_write.bits.way_en 1087 1088 val extra_flag_valid = RegNext(mainPipe.io.prefetch_flag_write.valid) 1089 val extra_flag_way_en = RegEnable(mainPipe.io.prefetch_flag_write.bits.way_en, mainPipe.io.prefetch_flag_write.valid) 1090 val extra_flag_prefetch = Mux1H(extra_flag_way_en, prefetchArray.io.resp.last) 1091 val extra_flag_access = Mux1H(extra_flag_way_en, accessArray.io.resp.last) 1092 1093 prefetcherMonitor.io.validity.good_prefetch := extra_flag_valid && isPrefetchRelated(extra_flag_prefetch) && extra_flag_access 1094 prefetcherMonitor.io.validity.bad_prefetch := extra_flag_valid && isPrefetchRelated(extra_flag_prefetch) && !extra_flag_access 1095 } 1096 1097 // write extra meta 1098 val error_flag_write_ports = Seq( 1099 mainPipe.io.error_flag_write // error flag generated by corrupted store 1100 // refillPipe.io.error_flag_write // corrupted signal from l2 1101 ) 1102 error_flag_write_ports.zip(errorArray.io.write).foreach { case (p, w) => w <> p } 1103 1104 val prefetch_flag_write_ports = ldu.map(_.io.prefetch_flag_write) ++ Seq( 1105 mainPipe.io.prefetch_flag_write // set prefetch_flag to false if coh is set to Nothing 1106 // refillPipe.io.prefetch_flag_write // refill required by prefetch will set prefetch_flag 1107 ) 1108 prefetch_flag_write_ports.zip(prefetchArray.io.write).foreach { case (p, w) => w <> p } 1109 1110 // FIXME: add hybrid unit? 1111 val same_cycle_update_pf_flag = ldu(0).io.prefetch_flag_write.valid && ldu(1).io.prefetch_flag_write.valid && (ldu(0).io.prefetch_flag_write.bits.idx === ldu(1).io.prefetch_flag_write.bits.idx) && (ldu(0).io.prefetch_flag_write.bits.way_en === ldu(1).io.prefetch_flag_write.bits.way_en) 1112 XSPerfAccumulate("same_cycle_update_pf_flag", same_cycle_update_pf_flag) 1113 1114 val access_flag_write_ports = ldu.map(_.io.access_flag_write) ++ Seq( 1115 mainPipe.io.access_flag_write 1116 // refillPipe.io.access_flag_write 1117 ) 1118 access_flag_write_ports.zip(accessArray.io.write).foreach { case (p, w) => w <> p } 1119 1120 //---------------------------------------- 1121 // tag array 1122 if(StorePrefetchL1Enabled) { 1123 require(tagArray.io.read.size == (LoadPipelineWidth + StorePipelineWidth - backendParams.HyuCnt + 1)) 1124 }else { 1125 require(tagArray.io.read.size == (LoadPipelineWidth + 1)) 1126 } 1127 // val tag_write_intend = missQueue.io.refill_pipe_req.valid || mainPipe.io.tag_write_intend 1128 val tag_write_intend = mainPipe.io.tag_write_intend 1129 assert(!RegNext(!tag_write_intend && tagArray.io.write.valid)) 1130 ldu.take(HybridLoadReadBase).zipWithIndex.foreach { 1131 case (ld, i) => 1132 tagArray.io.read(i) <> ld.io.tag_read 1133 ld.io.tag_resp := tagArray.io.resp(i) 1134 ld.io.tag_read.ready := !tag_write_intend 1135 } 1136 if(StorePrefetchL1Enabled) { 1137 stu.take(HybridStoreReadBase).zipWithIndex.foreach { 1138 case (st, i) => 1139 tagArray.io.read(HybridLoadReadBase + i) <> st.io.tag_read 1140 st.io.tag_resp := tagArray.io.resp(HybridLoadReadBase + i) 1141 st.io.tag_read.ready := !tag_write_intend 1142 } 1143 }else { 1144 stu.foreach { 1145 case st => 1146 st.io.tag_read.ready := false.B 1147 st.io.tag_resp := 0.U.asTypeOf(st.io.tag_resp) 1148 } 1149 } 1150 for (i <- 0 until backendParams.HyuCnt) { 1151 val HybridLoadTagReadPort = HybridLoadReadBase + i 1152 val HybridStoreTagReadPort = HybridStoreReadBase + i 1153 val TagReadPort = 1154 if (EnableStorePrefetchSPB) 1155 HybridLoadReadBase + HybridStoreReadBase + i 1156 else 1157 HybridLoadReadBase + i 1158 1159 // read tag 1160 ldu(HybridLoadTagReadPort).io.tag_read.ready := false.B 1161 stu(HybridStoreTagReadPort).io.tag_read.ready := false.B 1162 1163 if (StorePrefetchL1Enabled) { 1164 when (ldu(HybridLoadTagReadPort).io.tag_read.valid) { 1165 tagArray.io.read(TagReadPort) <> ldu(HybridLoadTagReadPort).io.tag_read 1166 ldu(HybridLoadTagReadPort).io.tag_read.ready := !tag_write_intend 1167 } .otherwise { 1168 tagArray.io.read(TagReadPort) <> stu(HybridStoreTagReadPort).io.tag_read 1169 stu(HybridStoreTagReadPort).io.tag_read.ready := !tag_write_intend 1170 } 1171 } else { 1172 tagArray.io.read(TagReadPort) <> ldu(HybridLoadTagReadPort).io.tag_read 1173 ldu(HybridLoadTagReadPort).io.tag_read.ready := !tag_write_intend 1174 } 1175 1176 // tag resp 1177 ldu(HybridLoadTagReadPort).io.tag_resp := tagArray.io.resp(TagReadPort) 1178 stu(HybridStoreTagReadPort).io.tag_resp := tagArray.io.resp(TagReadPort) 1179 } 1180 tagArray.io.read.last <> mainPipe.io.tag_read 1181 mainPipe.io.tag_resp := tagArray.io.resp.last 1182 1183 val fake_tag_read_conflict_this_cycle = PopCount(ldu.map(ld=> ld.io.tag_read.valid)) 1184 XSPerfAccumulate("fake_tag_read_conflict", fake_tag_read_conflict_this_cycle) 1185 1186 val tag_write_arb = Module(new Arbiter(new TagWriteReq, 1)) 1187 // tag_write_arb.io.in(0) <> refillPipe.io.tag_write 1188 tag_write_arb.io.in(0) <> mainPipe.io.tag_write 1189 tagArray.io.write <> tag_write_arb.io.out 1190 1191 ldu.map(m => { 1192 m.io.vtag_update.valid := tagArray.io.write.valid 1193 m.io.vtag_update.bits := tagArray.io.write.bits 1194 }) 1195 1196 //---------------------------------------- 1197 // data array 1198 mainPipe.io.data_read.zip(ldu).map(x => x._1 := x._2.io.lsu.req.valid) 1199 1200 val dataWriteArb = Module(new Arbiter(new L1BankedDataWriteReq, 1)) 1201 // dataWriteArb.io.in(0) <> refillPipe.io.data_write 1202 dataWriteArb.io.in(0) <> mainPipe.io.data_write 1203 1204 bankedDataArray.io.write <> dataWriteArb.io.out 1205 1206 for (bank <- 0 until DCacheBanks) { 1207 val dataWriteArb_dup = Module(new Arbiter(new L1BankedDataWriteReqCtrl, 1)) 1208 // dataWriteArb_dup.io.in(0).valid := refillPipe.io.data_write_dup(bank).valid 1209 // dataWriteArb_dup.io.in(0).bits := refillPipe.io.data_write_dup(bank).bits 1210 dataWriteArb_dup.io.in(0).valid := mainPipe.io.data_write_dup(bank).valid 1211 dataWriteArb_dup.io.in(0).bits := mainPipe.io.data_write_dup(bank).bits 1212 1213 bankedDataArray.io.write_dup(bank) <> dataWriteArb_dup.io.out 1214 } 1215 1216 bankedDataArray.io.readline <> mainPipe.io.data_readline 1217 bankedDataArray.io.readline_intend := mainPipe.io.data_read_intend 1218 mainPipe.io.readline_error_delayed := bankedDataArray.io.readline_error_delayed 1219 mainPipe.io.data_resp := bankedDataArray.io.readline_resp 1220 1221 (0 until LoadPipelineWidth).map(i => { 1222 bankedDataArray.io.read(i) <> ldu(i).io.banked_data_read 1223 bankedDataArray.io.is128Req(i) <> ldu(i).io.is128Req 1224 bankedDataArray.io.read_error_delayed(i) <> ldu(i).io.read_error_delayed 1225 1226 ldu(i).io.banked_data_resp := bankedDataArray.io.read_resp(i) 1227 1228 ldu(i).io.bank_conflict_slow := bankedDataArray.io.bank_conflict_slow(i) 1229 }) 1230 val isKeyword = bus.d.bits.echo.lift(IsKeywordKey).getOrElse(false.B) 1231 (0 until LoadPipelineWidth).map(i => { 1232 val (_, _, done, _) = edge.count(bus.d) 1233 when(bus.d.bits.opcode === TLMessages.GrantData) { 1234 io.lsu.forward_D(i).apply(bus.d.valid, bus.d.bits.data, bus.d.bits.source, isKeyword ^ done) 1235 // io.lsu.forward_D(i).apply(bus.d.valid, bus.d.bits.data, bus.d.bits.source,done) 1236 }.otherwise { 1237 io.lsu.forward_D(i).dontCare() 1238 } 1239 }) 1240 // tl D channel wakeup 1241 val (_, _, done, _) = edge.count(bus.d) 1242 when (bus.d.bits.opcode === TLMessages.GrantData || bus.d.bits.opcode === TLMessages.Grant) { 1243 io.lsu.tl_d_channel.apply(bus.d.valid, bus.d.bits.data, bus.d.bits.source, done) 1244 } .otherwise { 1245 io.lsu.tl_d_channel.dontCare() 1246 } 1247 mainPipe.io.force_write <> io.force_write 1248 1249 /** dwpu */ 1250 if (dwpuParam.enWPU) { 1251 val dwpu = Module(new DCacheWpuWrapper(LoadPipelineWidth)) 1252 for(i <- 0 until LoadPipelineWidth){ 1253 dwpu.io.req(i) <> ldu(i).io.dwpu.req(0) 1254 dwpu.io.resp(i) <> ldu(i).io.dwpu.resp(0) 1255 dwpu.io.lookup_upd(i) <> ldu(i).io.dwpu.lookup_upd(0) 1256 dwpu.io.cfpred(i) <> ldu(i).io.dwpu.cfpred(0) 1257 } 1258 dwpu.io.tagwrite_upd.valid := tagArray.io.write.valid 1259 dwpu.io.tagwrite_upd.bits.vaddr := tagArray.io.write.bits.vaddr 1260 dwpu.io.tagwrite_upd.bits.s1_real_way_en := tagArray.io.write.bits.way_en 1261 } else { 1262 for(i <- 0 until LoadPipelineWidth){ 1263 ldu(i).io.dwpu.req(0).ready := true.B 1264 ldu(i).io.dwpu.resp(0).valid := false.B 1265 ldu(i).io.dwpu.resp(0).bits := DontCare 1266 } 1267 } 1268 1269 //---------------------------------------- 1270 // load pipe 1271 // the s1 kill signal 1272 // only lsu uses this, replay never kills 1273 for (w <- 0 until LoadPipelineWidth) { 1274 ldu(w).io.lsu <> io.lsu.load(w) 1275 1276 // TODO:when have load128Req 1277 ldu(w).io.load128Req := io.lsu.load(w).is128Req 1278 1279 // replay and nack not needed anymore 1280 // TODO: remove replay and nack 1281 ldu(w).io.nack := false.B 1282 1283 ldu(w).io.disable_ld_fast_wakeup := 1284 bankedDataArray.io.disable_ld_fast_wakeup(w) // load pipe fast wake up should be disabled when bank conflict 1285 } 1286 1287 prefetcherMonitor.io.timely.total_prefetch := ldu.map(_.io.prefetch_info.naive.total_prefetch).reduce(_ || _) 1288 prefetcherMonitor.io.timely.late_hit_prefetch := ldu.map(_.io.prefetch_info.naive.late_hit_prefetch).reduce(_ || _) 1289 prefetcherMonitor.io.timely.late_miss_prefetch := missQueue.io.prefetch_info.naive.late_miss_prefetch 1290 prefetcherMonitor.io.timely.prefetch_hit := PopCount(ldu.map(_.io.prefetch_info.naive.prefetch_hit)) 1291 io.pf_ctrl <> prefetcherMonitor.io.pf_ctrl 1292 XSPerfAccumulate("useless_prefetch", ldu.map(_.io.prefetch_info.naive.total_prefetch).reduce(_ || _) && !(ldu.map(_.io.prefetch_info.naive.useful_prefetch).reduce(_ || _))) 1293 XSPerfAccumulate("useful_prefetch", ldu.map(_.io.prefetch_info.naive.useful_prefetch).reduce(_ || _)) 1294 XSPerfAccumulate("late_prefetch_hit", ldu.map(_.io.prefetch_info.naive.late_prefetch_hit).reduce(_ || _)) 1295 XSPerfAccumulate("late_load_hit", ldu.map(_.io.prefetch_info.naive.late_load_hit).reduce(_ || _)) 1296 1297 /** LoadMissDB: record load miss state */ 1298 val hartId = p(XSCoreParamsKey).HartId 1299 val isWriteLoadMissTable = Constantin.createRecord(s"isWriteLoadMissTable$hartId") 1300 val isFirstHitWrite = Constantin.createRecord(s"isFirstHitWrite$hartId") 1301 val tableName = s"LoadMissDB$hartId" 1302 val siteName = s"DcacheWrapper$hartId" 1303 val loadMissTable = ChiselDB.createTable(tableName, new LoadMissEntry) 1304 for( i <- 0 until LoadPipelineWidth){ 1305 val loadMissEntry = Wire(new LoadMissEntry) 1306 val loadMissWriteEn = 1307 (!ldu(i).io.lsu.resp.bits.replay && ldu(i).io.miss_req.fire) || 1308 (ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid && isFirstHitWrite.orR) 1309 loadMissEntry.timeCnt := GTimer() 1310 loadMissEntry.robIdx := ldu(i).io.lsu.resp.bits.debug_robIdx 1311 loadMissEntry.paddr := ldu(i).io.miss_req.bits.addr 1312 loadMissEntry.vaddr := ldu(i).io.miss_req.bits.vaddr 1313 loadMissEntry.missState := OHToUInt(Cat(Seq( 1314 ldu(i).io.miss_req.fire & ldu(i).io.miss_resp.merged, 1315 ldu(i).io.miss_req.fire & !ldu(i).io.miss_resp.merged, 1316 ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid 1317 ))) 1318 loadMissTable.log( 1319 data = loadMissEntry, 1320 en = isWriteLoadMissTable.orR && loadMissWriteEn, 1321 site = siteName, 1322 clock = clock, 1323 reset = reset 1324 ) 1325 } 1326 1327 val isWriteLoadAccessTable = Constantin.createRecord(s"isWriteLoadAccessTable$hartId") 1328 val loadAccessTable = ChiselDB.createTable(s"LoadAccessDB$hartId", new LoadAccessEntry) 1329 for (i <- 0 until LoadPipelineWidth) { 1330 val loadAccessEntry = Wire(new LoadAccessEntry) 1331 loadAccessEntry.timeCnt := GTimer() 1332 loadAccessEntry.robIdx := ldu(i).io.lsu.resp.bits.debug_robIdx 1333 loadAccessEntry.paddr := ldu(i).io.miss_req.bits.addr 1334 loadAccessEntry.vaddr := ldu(i).io.miss_req.bits.vaddr 1335 loadAccessEntry.missState := OHToUInt(Cat(Seq( 1336 ldu(i).io.miss_req.fire & ldu(i).io.miss_resp.merged, 1337 ldu(i).io.miss_req.fire & !ldu(i).io.miss_resp.merged, 1338 ldu(i).io.lsu.s2_first_hit && ldu(i).io.lsu.resp.valid 1339 ))) 1340 loadAccessEntry.pred_way_num := ldu(i).io.lsu.debug_s2_pred_way_num 1341 loadAccessEntry.real_way_num := ldu(i).io.lsu.debug_s2_real_way_num 1342 loadAccessEntry.dm_way_num := ldu(i).io.lsu.debug_s2_dm_way_num 1343 loadAccessTable.log( 1344 data = loadAccessEntry, 1345 en = isWriteLoadAccessTable.orR && ldu(i).io.lsu.resp.valid, 1346 site = siteName + "_loadpipe" + i.toString, 1347 clock = clock, 1348 reset = reset 1349 ) 1350 } 1351 1352 //---------------------------------------- 1353 // Sta pipe 1354 for (w <- 0 until StorePipelineWidth) { 1355 stu(w).io.lsu <> io.lsu.sta(w) 1356 } 1357 1358 //---------------------------------------- 1359 // atomics 1360 // atomics not finished yet 1361 val atomic_resp_valid = mainPipe.io.atomic_resp.valid && mainPipe.io.atomic_resp.bits.isAMO 1362 io.lsu.atomics.resp.valid := RegNext(atomic_resp_valid) 1363 io.lsu.atomics.resp.bits := RegEnable(mainPipe.io.atomic_resp.bits, atomic_resp_valid) 1364 io.lsu.atomics.block_lr := mainPipe.io.block_lr 1365 // atomicsReplayUnit.io.pipe_resp := RegNext(mainPipe.io.atomic_resp) 1366 // atomicsReplayUnit.io.block_lr <> mainPipe.io.block_lr 1367 1368 // Request 1369 val missReqArb = Module(new TreeArbiter(new MissReq, MissReqPortCount)) 1370 // seperately generating miss queue enq ready for better timeing 1371 val missReadyGen = Module(new MissReadyGen(MissReqPortCount)) 1372 1373 missReqArb.io.in(MainPipeMissReqPort) <> mainPipe.io.miss_req 1374 missReadyGen.io.in(MainPipeMissReqPort) <> mainPipe.io.miss_req 1375 for (w <- 0 until backendParams.LduCnt) { 1376 missReqArb.io.in(w + 1) <> ldu(w).io.miss_req 1377 missReadyGen.io.in(w + 1) <> ldu(w).io.miss_req 1378 } 1379 1380 for (w <- 0 until LoadPipelineWidth) { ldu(w).io.miss_resp := missQueue.io.resp } 1381 mainPipe.io.miss_resp := missQueue.io.resp 1382 1383 if(StorePrefetchL1Enabled) { 1384 for (w <- 0 until backendParams.StaCnt) { 1385 missReqArb.io.in(1 + backendParams.LduCnt + w) <> stu(w).io.miss_req 1386 missReadyGen.io.in(1 + backendParams.LduCnt + w) <> stu(w).io.miss_req 1387 } 1388 }else { 1389 for (w <- 0 until backendParams.StaCnt) { stu(w).io.miss_req.ready := false.B } 1390 } 1391 1392 for (i <- 0 until backendParams.HyuCnt) { 1393 val HybridLoadReqPort = HybridLoadReadBase + i 1394 val HybridStoreReqPort = HybridStoreReadBase + i 1395 val HybridMissReqPort = HybridMissReqBase + i 1396 1397 ldu(HybridLoadReqPort).io.miss_req.ready := false.B 1398 stu(HybridStoreReqPort).io.miss_req.ready := false.B 1399 1400 if (StorePrefetchL1Enabled) { 1401 when (ldu(HybridLoadReqPort).io.miss_req.valid) { 1402 missReqArb.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req 1403 missReadyGen.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req 1404 } .otherwise { 1405 missReqArb.io.in(HybridMissReqPort) <> stu(HybridStoreReqPort).io.miss_req 1406 missReadyGen.io.in(HybridMissReqPort) <> stu(HybridStoreReqPort).io.miss_req 1407 } 1408 } else { 1409 missReqArb.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req 1410 missReadyGen.io.in(HybridMissReqPort) <> ldu(HybridLoadReqPort).io.miss_req 1411 } 1412 } 1413 1414 for(w <- 0 until LoadPipelineWidth) { 1415 wb.io.miss_req_conflict_check(w) := ldu(w).io.wbq_conflict_check 1416 ldu(w).io.wbq_block_miss_req := wb.io.block_miss_req(w) 1417 } 1418 1419 wb.io.miss_req_conflict_check(3) := mainPipe.io.wbq_conflict_check 1420 mainPipe.io.wbq_block_miss_req := wb.io.block_miss_req(3) 1421 1422 wb.io.miss_req_conflict_check(4).valid := missReqArb.io.out.valid 1423 wb.io.miss_req_conflict_check(4).bits := missReqArb.io.out.bits.addr 1424 missQueue.io.wbq_block_miss_req := wb.io.block_miss_req(4) 1425 1426 missReqArb.io.out <> missQueue.io.req 1427 missReadyGen.io.queryMQ <> missQueue.io.queryMQ 1428 1429 for (w <- 0 until LoadPipelineWidth) { ldu(w).io.mq_enq_cancel := missQueue.io.mq_enq_cancel } 1430 1431 XSPerfAccumulate("miss_queue_fire", PopCount(VecInit(missReqArb.io.in.map(_.fire))) >= 1.U) 1432 XSPerfAccumulate("miss_queue_muti_fire", PopCount(VecInit(missReqArb.io.in.map(_.fire))) > 1.U) 1433 1434 XSPerfAccumulate("miss_queue_has_enq_req", PopCount(VecInit(missReqArb.io.in.map(_.valid))) >= 1.U) 1435 XSPerfAccumulate("miss_queue_has_muti_enq_req", PopCount(VecInit(missReqArb.io.in.map(_.valid))) > 1.U) 1436 XSPerfAccumulate("miss_queue_has_muti_enq_but_not_fire", PopCount(VecInit(missReqArb.io.in.map(_.valid))) > 1.U && PopCount(VecInit(missReqArb.io.in.map(_.fire))) === 0.U) 1437 1438 // forward missqueue 1439 (0 until LoadPipelineWidth).map(i => io.lsu.forward_mshr(i).connect(missQueue.io.forward(i))) 1440 1441 // refill to load queue 1442 // io.lsu.lsq <> missQueue.io.refill_to_ldq 1443 1444 // tilelink stuff 1445 bus.a <> missQueue.io.mem_acquire 1446 bus.e <> missQueue.io.mem_finish 1447 missQueue.io.probe_addr := bus.b.bits.address 1448 missQueue.io.replace_addr := mainPipe.io.replace_addr 1449 1450 missQueue.io.main_pipe_resp.valid := RegNext(mainPipe.io.atomic_resp.valid) 1451 missQueue.io.main_pipe_resp.bits := RegEnable(mainPipe.io.atomic_resp.bits, mainPipe.io.atomic_resp.valid) 1452 1453 //---------------------------------------- 1454 // probe 1455 // probeQueue.io.mem_probe <> bus.b 1456 block_decoupled(bus.b, probeQueue.io.mem_probe, missQueue.io.probe_block) 1457 probeQueue.io.lrsc_locked_block <> mainPipe.io.lrsc_locked_block 1458 probeQueue.io.update_resv_set <> mainPipe.io.update_resv_set 1459 1460 val refill_req = RegNext(missQueue.io.main_pipe_req.valid && ((missQueue.io.main_pipe_req.bits.isLoad) | (missQueue.io.main_pipe_req.bits.isStore))) 1461 //---------------------------------------- 1462 // mainPipe 1463 // when a req enters main pipe, if it is set-conflict with replace pipe or refill pipe, 1464 // block the req in main pipe 1465 probeQueue.io.pipe_req <> mainPipe.io.probe_req 1466 io.lsu.store.req <> mainPipe.io.store_req 1467 1468 io.lsu.store.replay_resp.valid := RegNext(mainPipe.io.store_replay_resp.valid) 1469 io.lsu.store.replay_resp.bits := RegEnable(mainPipe.io.store_replay_resp.bits, mainPipe.io.store_replay_resp.valid) 1470 io.lsu.store.main_pipe_hit_resp := mainPipe.io.store_hit_resp 1471 1472 mainPipe.io.atomic_req <> io.lsu.atomics.req 1473 1474 mainPipe.io.invalid_resv_set := RegNext( 1475 wb.io.req.fire && 1476 wb.io.req.bits.addr === mainPipe.io.lrsc_locked_block.bits && 1477 mainPipe.io.lrsc_locked_block.valid 1478 ) 1479 1480 //---------------------------------------- 1481 // replace (main pipe) 1482 val mpStatus = mainPipe.io.status 1483 mainPipe.io.refill_req <> missQueue.io.main_pipe_req 1484 1485 mainPipe.io.data_write_ready_dup := VecInit(Seq.fill(nDupDataWriteReady)(true.B)) 1486 mainPipe.io.tag_write_ready_dup := VecInit(Seq.fill(nDupDataWriteReady)(true.B)) 1487 mainPipe.io.wb_ready_dup := wb.io.req_ready_dup 1488 1489 //---------------------------------------- 1490 // wb 1491 // add a queue between MainPipe and WritebackUnit to reduce MainPipe stalls due to WritebackUnit busy 1492 1493 wb.io.req <> mainPipe.io.wb 1494 bus.c <> wb.io.mem_release 1495 // wb.io.release_wakeup := refillPipe.io.release_wakeup 1496 // wb.io.release_update := mainPipe.io.release_update 1497 //wb.io.probe_ttob_check_req <> mainPipe.io.probe_ttob_check_req 1498 //wb.io.probe_ttob_check_resp <> mainPipe.io.probe_ttob_check_resp 1499 1500 io.lsu.release.valid := RegNext(wb.io.req.fire) 1501 io.lsu.release.bits.paddr := RegEnable(wb.io.req.bits.addr, wb.io.req.fire) 1502 // Note: RegNext() is required by: 1503 // * load queue released flag update logic 1504 // * load / load violation check logic 1505 // * and timing requirements 1506 // CHANGE IT WITH CARE 1507 1508 // connect bus d 1509 missQueue.io.mem_grant.valid := false.B 1510 missQueue.io.mem_grant.bits := DontCare 1511 1512 wb.io.mem_grant.valid := false.B 1513 wb.io.mem_grant.bits := DontCare 1514 1515 // in L1DCache, we ony expect Grant[Data] and ReleaseAck 1516 bus.d.ready := false.B 1517 when (bus.d.bits.opcode === TLMessages.Grant || bus.d.bits.opcode === TLMessages.GrantData) { 1518 missQueue.io.mem_grant <> bus.d 1519 } .elsewhen (bus.d.bits.opcode === TLMessages.ReleaseAck) { 1520 wb.io.mem_grant <> bus.d 1521 } .otherwise { 1522 assert (!bus.d.fire) 1523 } 1524 1525 //---------------------------------------- 1526 // Feedback Direct Prefetch Monitor 1527 fdpMonitor.io.refill := missQueue.io.prefetch_info.fdp.prefetch_monitor_cnt 1528 fdpMonitor.io.timely.late_prefetch := missQueue.io.prefetch_info.fdp.late_miss_prefetch 1529 fdpMonitor.io.accuracy.total_prefetch := missQueue.io.prefetch_info.fdp.total_prefetch 1530 for (w <- 0 until LoadPipelineWidth) { 1531 if(w == 0) { 1532 fdpMonitor.io.accuracy.useful_prefetch(w) := ldu(w).io.prefetch_info.fdp.useful_prefetch 1533 }else { 1534 fdpMonitor.io.accuracy.useful_prefetch(w) := Mux(same_cycle_update_pf_flag, false.B, ldu(w).io.prefetch_info.fdp.useful_prefetch) 1535 } 1536 } 1537 for (w <- 0 until LoadPipelineWidth) { fdpMonitor.io.pollution.cache_pollution(w) := ldu(w).io.prefetch_info.fdp.pollution } 1538 for (w <- 0 until LoadPipelineWidth) { fdpMonitor.io.pollution.demand_miss(w) := ldu(w).io.prefetch_info.fdp.demand_miss } 1539 fdpMonitor.io.debugRolling := io.debugRolling 1540 1541 //---------------------------------------- 1542 // Bloom Filter 1543 // bloomFilter.io.set <> missQueue.io.bloom_filter_query.set 1544 // bloomFilter.io.clr <> missQueue.io.bloom_filter_query.clr 1545 bloomFilter.io.set <> mainPipe.io.bloom_filter_query.set 1546 bloomFilter.io.clr <> mainPipe.io.bloom_filter_query.clr 1547 1548 for (w <- 0 until LoadPipelineWidth) { bloomFilter.io.query(w) <> ldu(w).io.bloom_filter_query.query } 1549 for (w <- 0 until LoadPipelineWidth) { bloomFilter.io.resp(w) <> ldu(w).io.bloom_filter_query.resp } 1550 1551 for (w <- 0 until LoadPipelineWidth) { counterFilter.io.ld_in(w) <> ldu(w).io.counter_filter_enq } 1552 for (w <- 0 until LoadPipelineWidth) { counterFilter.io.query(w) <> ldu(w).io.counter_filter_query } 1553 1554 //---------------------------------------- 1555 // replacement algorithm 1556 val replacer = ReplacementPolicy.fromString(cacheParams.replacer, nWays, nSets) 1557 val replWayReqs = ldu.map(_.io.replace_way) ++ Seq(mainPipe.io.replace_way) ++ stu.map(_.io.replace_way) 1558 1559 if (dwpuParam.enCfPred) { 1560 val victimList = VictimList(nSets) 1561 replWayReqs.foreach { 1562 case req => 1563 req.way := DontCare 1564 when(req.set.valid) { 1565 when(victimList.whether_sa(req.set.bits)) { 1566 req.way := replacer.way(req.set.bits) 1567 }.otherwise { 1568 req.way := req.dmWay 1569 } 1570 } 1571 } 1572 } else { 1573 replWayReqs.foreach { 1574 case req => 1575 req.way := DontCare 1576 when(req.set.valid) { 1577 req.way := replacer.way(req.set.bits) 1578 } 1579 } 1580 } 1581 1582 val replAccessReqs = ldu.map(_.io.replace_access) ++ Seq( 1583 mainPipe.io.replace_access 1584 ) ++ stu.map(_.io.replace_access) 1585 val touchWays = Seq.fill(replAccessReqs.size)(Wire(ValidIO(UInt(log2Up(nWays).W)))) 1586 touchWays.zip(replAccessReqs).foreach { 1587 case (w, req) => 1588 w.valid := req.valid 1589 w.bits := req.bits.way 1590 } 1591 val touchSets = replAccessReqs.map(_.bits.set) 1592 replacer.access(touchSets, touchWays) 1593 1594 //---------------------------------------- 1595 // assertions 1596 // dcache should only deal with DRAM addresses 1597 import freechips.rocketchip.util._ 1598 when (bus.a.fire) { 1599 assert(PmemRanges.map(range => bus.a.bits.address.inRange(range._1.U, range._2.U)).reduce(_ || _)) 1600 } 1601 when (bus.b.fire) { 1602 assert(PmemRanges.map(range => bus.b.bits.address.inRange(range._1.U, range._2.U)).reduce(_ || _)) 1603 } 1604 when (bus.c.fire) { 1605 assert(PmemRanges.map(range => bus.c.bits.address.inRange(range._1.U, range._2.U)).reduce(_ || _)) 1606 } 1607 1608 //---------------------------------------- 1609 // utility functions 1610 def block_decoupled[T <: Data](source: DecoupledIO[T], sink: DecoupledIO[T], block_signal: Bool) = { 1611 sink.valid := source.valid && !block_signal 1612 source.ready := sink.ready && !block_signal 1613 sink.bits := source.bits 1614 } 1615 1616 1617 //---------------------------------------- 1618 // Customized csr cache op support 1619 val cacheOpDecoder = Module(new CSRCacheOpDecoder("dcache", CacheInstrucion.COP_ID_DCACHE)) 1620 cacheOpDecoder.io.csr <> io.csr 1621 bankedDataArray.io.cacheOp.req := cacheOpDecoder.io.cache.req 1622 // dup cacheOp_req_valid 1623 bankedDataArray.io.cacheOp_req_dup.zipWithIndex.map{ case(dup, i) => dup := cacheOpDecoder.io.cache_req_dup(i) } 1624 // dup cacheOp_req_bits_opCode 1625 bankedDataArray.io.cacheOp_req_bits_opCode_dup.zipWithIndex.map{ case (dup, i) => dup := cacheOpDecoder.io.cacheOp_req_bits_opCode_dup(i) } 1626 1627 tagArray.io.cacheOp.req := cacheOpDecoder.io.cache.req 1628 // dup cacheOp_req_valid 1629 tagArray.io.cacheOp_req_dup.zipWithIndex.map{ case(dup, i) => dup := cacheOpDecoder.io.cache_req_dup(i) } 1630 // dup cacheOp_req_bits_opCode 1631 tagArray.io.cacheOp_req_bits_opCode_dup.zipWithIndex.map{ case (dup, i) => dup := cacheOpDecoder.io.cacheOp_req_bits_opCode_dup(i) } 1632 1633 cacheOpDecoder.io.cache.resp.valid := bankedDataArray.io.cacheOp.resp.valid || 1634 tagArray.io.cacheOp.resp.valid 1635 cacheOpDecoder.io.cache.resp.bits := Mux1H(List( 1636 bankedDataArray.io.cacheOp.resp.valid -> bankedDataArray.io.cacheOp.resp.bits, 1637 tagArray.io.cacheOp.resp.valid -> tagArray.io.cacheOp.resp.bits, 1638 )) 1639 cacheOpDecoder.io.error := io.error 1640 assert(!((bankedDataArray.io.cacheOp.resp.valid +& tagArray.io.cacheOp.resp.valid) > 1.U)) 1641 1642 //---------------------------------------- 1643 // performance counters 1644 val num_loads = PopCount(ldu.map(e => e.io.lsu.req.fire)) 1645 XSPerfAccumulate("num_loads", num_loads) 1646 1647 io.mshrFull := missQueue.io.full 1648 1649 // performance counter 1650 // val ld_access = Wire(Vec(LoadPipelineWidth, missQueue.io.debug_early_replace.last.cloneType)) 1651 // val st_access = Wire(ld_access.last.cloneType) 1652 // ld_access.zip(ldu).foreach { 1653 // case (a, u) => 1654 // a.valid := RegNext(u.io.lsu.req.fire) && !u.io.lsu.s1_kill 1655 // a.bits.idx := RegEnable(get_idx(u.io.lsu.req.bits.vaddr), u.io.lsu.req.fire) 1656 // a.bits.tag := get_tag(u.io.lsu.s1_paddr_dup_dcache) 1657 // } 1658 // st_access.valid := RegNext(mainPipe.io.store_req.fire) 1659 // st_access.bits.idx := RegEnable(get_idx(mainPipe.io.store_req.bits.vaddr), mainPipe.io.store_req.fire) 1660 // st_access.bits.tag := RegEnable(get_tag(mainPipe.io.store_req.bits.addr), mainPipe.io.store_req.fire) 1661 // val access_info = ld_access.toSeq ++ Seq(st_access) 1662 // val early_replace = RegNext(missQueue.io.debug_early_replace) // TODO: clock gate 1663 // val access_early_replace = access_info.map { 1664 // case acc => 1665 // Cat(early_replace.map { 1666 // case r => 1667 // acc.valid && r.valid && 1668 // acc.bits.tag === r.bits.tag && 1669 // acc.bits.idx === r.bits.idx 1670 // }) 1671 // } 1672 // XSPerfAccumulate("access_early_replace", PopCount(Cat(access_early_replace))) 1673 1674 val perfEvents = (Seq(wb, mainPipe, missQueue, probeQueue) ++ ldu).flatMap(_.getPerfEvents) 1675 generatePerfEvent() 1676} 1677 1678class AMOHelper() extends ExtModule { 1679 val clock = IO(Input(Clock())) 1680 val enable = IO(Input(Bool())) 1681 val cmd = IO(Input(UInt(5.W))) 1682 val addr = IO(Input(UInt(64.W))) 1683 val wdata = IO(Input(UInt(64.W))) 1684 val mask = IO(Input(UInt(8.W))) 1685 val rdata = IO(Output(UInt(64.W))) 1686} 1687 1688class DCacheWrapper()(implicit p: Parameters) extends LazyModule with HasXSParameter { 1689 override def shouldBeInlined: Boolean = false 1690 1691 val useDcache = coreParams.dcacheParametersOpt.nonEmpty 1692 val clientNode = if (useDcache) TLIdentityNode() else null 1693 val dcache = if (useDcache) LazyModule(new DCache()) else null 1694 if (useDcache) { 1695 clientNode := dcache.clientNode 1696 } 1697 1698 class DCacheWrapperImp(wrapper: LazyModule) extends LazyModuleImp(wrapper) with HasPerfEvents { 1699 val io = IO(new DCacheIO) 1700 val perfEvents = if (!useDcache) { 1701 // a fake dcache which uses dpi-c to access memory, only for debug usage! 1702 val fake_dcache = Module(new FakeDCache()) 1703 io <> fake_dcache.io 1704 Seq() 1705 } 1706 else { 1707 io <> dcache.module.io 1708 dcache.module.getPerfEvents 1709 } 1710 generatePerfEvent() 1711 } 1712 1713 lazy val module = new DCacheWrapperImp(this) 1714}