1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import xiangshan.cache._ 25import difftest._ 26 27class SbufferFlushBundle extends Bundle { 28 val valid = Output(Bool()) 29 val empty = Input(Bool()) 30} 31 32trait HasSbufferConst extends HasXSParameter { 33 val EvictCycles = 1 << 20 34 val SbufferReplayDelayCycles = 16 35 require(isPow2(EvictCycles)) 36 val EvictCountBits = log2Up(EvictCycles+1) 37 val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1 38 39 val SbufferIndexWidth: Int = log2Up(StoreBufferSize) 40 // paddr = ptag + offset 41 val CacheLineBytes: Int = CacheLineSize / 8 42 val CacheLineWords: Int = CacheLineBytes / DataBytes 43 val OffsetWidth: Int = log2Up(CacheLineBytes) 44 val WordsWidth: Int = log2Up(CacheLineWords) 45 val PTagWidth: Int = PAddrBits - OffsetWidth 46 val VTagWidth: Int = VAddrBits - OffsetWidth 47 val WordOffsetWidth: Int = PAddrBits - WordsWidth 48} 49 50class SbufferEntryState (implicit p: Parameters) extends SbufferBundle { 51 val state_valid = Bool() // this entry is active 52 val state_inflight = Bool() // sbuffer is trying to write this entry to dcache 53 val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout 54 val w_sameblock_inflight = Bool() // same cache block dcache req is inflight 55 val s_recheck_inflight = Bool() // recheck if same cache block dcache req is inflight 56 57 def isInvalid(): Bool = !state_valid 58 def isValid(): Bool = state_valid 59 def isActive(): Bool = state_valid && !state_inflight 60 def isInflight(): Bool = state_inflight 61 def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight 62} 63 64class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst 65 66class DataWriteReq(implicit p: Parameters) extends SbufferBundle { 67 val idx = UInt(SbufferIndexWidth.W) 68 val mask = UInt((DataBits/8).W) 69 val data = UInt(DataBits.W) 70 val wordOffset = UInt(WordOffsetWidth.W) 71 val wline = Bool() 72} 73 74class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst { 75 val io = IO(new Bundle(){ 76 val writeReq = Vec(StorePipelineWidth, Flipped(ValidIO(new DataWriteReq))) 77 val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W))))) 78 }) 79 80 val data = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W))))) 81 82 val req = io.writeReq 83 84 for(i <- 0 until StorePipelineWidth) { 85 when(req(i).valid){ 86 for(word <- 0 until CacheLineWords){ 87 for(byte <- 0 until DataBytes){ 88 when( 89 req(i).bits.mask(byte) && (req(i).bits.wordOffset(WordsWidth-1, 0) === word.U) || 90 req(i).bits.wline 91 ){ 92 data(req(i).bits.idx)(word)(byte) := req(i).bits.data(byte*8+7, byte*8) 93 } 94 } 95 } 96 } 97 } 98 99 io.dataOut := data 100} 101 102class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst { 103 val io = IO(new Bundle() { 104 val hartId = Input(UInt(8.W)) 105 val in = Vec(StorePipelineWidth, Flipped(Decoupled(new DCacheWordReqWithVaddr))) //Todo: store logic only support Width == 2 now 106 val dcache = Flipped(new DCacheToSbufferIO) 107 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 108 val sqempty = Input(Bool()) 109 val flush = Flipped(new SbufferFlushBundle) 110 val csrCtrl = Flipped(new CustomCSRCtrlIO) 111 }) 112 113 val dataModule = Module(new SbufferData) 114 dataModule.io.writeReq <> DontCare 115 val writeReq = dataModule.io.writeReq 116 117 val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W))) 118 val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W))) 119 val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 120 val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W))) 121 val data = dataModule.io.dataOut 122 val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState)))) 123 val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W)))) 124 val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W)))) 125 126 val willSendDcacheReq = Wire(Bool()) 127 128 /* 129 idle --[flush] --> drain --[buf empty]--> idle 130 --[buf full]--> replace --[dcache resp]--> idle 131 */ 132 // x_drain_all: drain store queue and sbuffer 133 // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write 134 val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4) 135 def needDrain(state: UInt): Bool = 136 state(1) 137 val sbuffer_state = RegInit(x_idle) 138 139 // ---------------------- Store Enq Sbuffer --------------------- 140 141 def getPTag(pa: UInt): UInt = 142 pa(PAddrBits - 1, PAddrBits - PTagWidth) 143 144 def getVTag(va: UInt): UInt = 145 va(VAddrBits - 1, VAddrBits - VTagWidth) 146 147 def getWord(pa: UInt): UInt = 148 pa(PAddrBits-1, 3) 149 150 def getWordOffset(pa: UInt): UInt = 151 pa(OffsetWidth-1, 3) 152 153 def getAddr(ptag: UInt): UInt = 154 Cat(ptag, 0.U((PAddrBits - PTagWidth).W)) 155 156 def getByteOffset(offect: UInt): UInt = 157 Cat(offect(OffsetWidth - 1, 3), 0.U(3.W)) 158 159 def isOneOf(key: UInt, seq: Seq[UInt]): Bool = 160 if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR() 161 162 def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f 163 164 // sbuffer entry count 165 166 val plru = new PseudoLRU(StoreBufferSize) 167 val accessIdx = Wire(Vec(StorePipelineWidth + 1, Valid(UInt(SbufferIndexWidth.W)))) 168 169 val replaceIdx = plru.way 170 plru.access(accessIdx) 171 172 //-------------------------cohCount----------------------------- 173 // insert and merge: cohCount=0 174 // every cycle cohCount+=1 175 // if cohCount(EvictCountBits-1)==1, evict 176 val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive())) 177 val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask) 178 val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout)) 179 val (missqReplayTimeOutIdx, missqReplayMayHasTimeOut) = PriorityEncoderWithFlag(missqReplayTimeOutMask) 180 val missqReplayHasTimeOut = RegNext(missqReplayMayHasTimeOut) && !RegNext(willSendDcacheReq) 181 val missqReplayTimeOutIdxReg = RegEnable(missqReplayTimeOutIdx, missqReplayMayHasTimeOut) 182 183 val activeMask = VecInit(stateVec.map(s => s.isActive())) 184 val drainIdx = PriorityEncoder(activeMask) 185 186 val inflightMask = VecInit(stateVec.map(s => s.isInflight())) 187 188 val inptags = io.in.map(in => getPTag(in.bits.addr)) 189 val invtags = io.in.map(in => getVTag(in.bits.vaddr)) 190 val sameTag = inptags(0) === inptags(1) 191 val firstWord = getWord(io.in(0).bits.addr) 192 val secondWord = getWord(io.in(1).bits.addr) 193 val sameWord = firstWord === secondWord 194 195 // merge condition 196 val mergeMask = Wire(Vec(StorePipelineWidth, Vec(StoreBufferSize, Bool()))) 197 val mergeIdx = mergeMask.map(PriorityEncoder(_)) 198 val canMerge = mergeMask.map(ParallelOR(_)) 199 200 for(i <- 0 until StorePipelineWidth){ 201 mergeMask(i) := widthMap(j => 202 inptags(i) === ptag(j) && activeMask(j) 203 ) 204 } 205 206 // insert condition 207 // firstInsert: the first invalid entry 208 // if first entry canMerge or second entry has the same ptag with the first entry, 209 // secondInsert equal the first invalid entry, otherwise, the second invalid entry 210 val invalidMask = VecInit(stateVec.map(s => s.isInvalid())) 211 val evenInvalidMask = GetEvenBits(invalidMask.asUInt) 212 val oddInvalidMask = GetOddBits(invalidMask.asUInt) 213 214 val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask) 215 val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask) 216 val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) 217 val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) 218 219 val enbufferSelReg = RegInit(false.B) 220 when(io.in(0).valid) { 221 enbufferSelReg := ~enbufferSelReg 222 } 223 224 val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) 225 val secondInsertIdx = Mux(sameTag, 226 firstInsertIdx, 227 Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx) 228 ) 229 val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert) 230 val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag, 231 firstCanInsert, 232 Mux(~enbufferSelReg, evenCanInsert, oddCanInsert) 233 ) 234 val forward_need_uarch_drain = WireInit(false.B) 235 val merge_need_uarch_drain = WireInit(false.B) 236 val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain)) 237 XSPerfAccumulate("do_uarch_drain", do_uarch_drain) 238 239 io.in(0).ready := firstCanInsert 240 io.in(1).ready := secondCanInsert && !sameWord && io.in(0).ready 241 242 def wordReqToBufLine(req: DCacheWordReq, reqptag: UInt, reqvtag: UInt, insertIdx: UInt, wordOffset: UInt, flushMask: Bool): Unit = { 243 val sameBlockInflightMask = genSameBlockInflightMask(reqptag) 244 stateVec(insertIdx).state_valid := true.B 245 stateVec(insertIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated 246 when(sameBlockInflightMask.orR){ 247 waitInflightMask(insertIdx) := sameBlockInflightMask 248 } 249 cohCount(insertIdx) := 0.U 250 // missqReplayCount(insertIdx) := 0.U 251 ptag(insertIdx) := reqptag 252 vtag(insertIdx) := reqvtag // update vtag iff a new sbuffer line is allocated 253 when(flushMask){ 254 for(j <- 0 until CacheLineWords){ 255 for(i <- 0 until DataBytes){ 256 mask(insertIdx)(j)(i) := false.B 257 } 258 } 259 } 260 for(i <- 0 until DataBytes){ 261 when(req.mask(i)){ 262 mask(insertIdx)(wordOffset)(i) := true.B 263// data(insertIdx)(wordOffset)(i) := req.data(i*8+7, i*8) 264 } 265 } 266 } 267 268 def mergeWordReq(req: DCacheWordReq, reqptag: UInt, reqvtag: UInt, mergeIdx: UInt, wordOffset: UInt): Unit = { 269 cohCount(mergeIdx) := 0.U 270 // missqReplayCount(mergeIdx) := 0.U 271 for(i <- 0 until DataBytes){ 272 when(req.mask(i)){ 273 mask(mergeIdx)(wordOffset)(i) := true.B 274// data(mergeIdx)(wordOffset)(i) := req.data(i*8+7, i*8) 275 } 276 } 277 // check if vtag is the same, if not, trigger sbuffer flush 278 when(reqvtag =/= vtag(mergeIdx)) { 279 XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n", 280 reqvtag << OffsetWidth, 281 reqptag << OffsetWidth, 282 vtag(mergeIdx) << OffsetWidth, 283 ptag(mergeIdx) << OffsetWidth 284 ) 285 merge_need_uarch_drain := true.B 286 } 287 } 288 289 for(((in, wordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){ 290 writeReq(i).valid := in.fire() 291 writeReq(i).bits.wordOffset := wordOffset 292 writeReq(i).bits.mask := in.bits.mask 293 writeReq(i).bits.data := in.bits.data 294 writeReq(i).bits.wline := in.bits.wline 295 val insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx 296 val flushMask = if(i == 0) true.B else !sameTag 297 accessIdx(i).valid := RegNext(in.fire()) 298 accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx)) 299 when(in.fire()){ 300 when(canMerge(i)){ 301 writeReq(i).bits.idx := mergeIdx(i) 302 mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), wordOffset) 303 XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n") 304 }.otherwise({ 305 writeReq(i).bits.idx := insertIdx 306 wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, wordOffset, flushMask) 307 XSDebug(p"insert req $i to line[$insertIdx]\n") 308 }) 309 } 310 } 311 312 313 for(i <- 0 until StoreBufferSize){ 314 XSDebug(stateVec(i).isValid(), 315 p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n" 316 ) 317 } 318 319 for((req, i) <- io.in.zipWithIndex){ 320 XSDebug(req.fire(), 321 p"accept req [$i]: " + 322 p"addr:${Hexadecimal(req.bits.addr)} " + 323 p"mask:${Binary(req.bits.mask)} " + 324 p"data:${Hexadecimal(req.bits.data)}\n" 325 ) 326 XSDebug(req.valid && !req.ready, 327 p"req [$i] blocked by sbuffer\n" 328 ) 329 } 330 331 // ---------------------- Send Dcache Req --------------------- 332 333 val sbuffer_empty = Cat(invalidMask).andR() 334 val sq_empty = !Cat(io.in.map(_.valid)).orR() 335 val empty = sbuffer_empty && sq_empty 336 val threshold = RegNext(io.csrCtrl.sbuffer_threshold +& 1.U) 337 val validCount = PopCount(activeMask) 338 val do_eviction = RegNext(validCount >= threshold || validCount === (StoreBufferSize-1).U, init = false.B) 339 require((StoreBufferThreshold + 1) <= StoreBufferSize) 340 341 XSDebug(p"validCount[$validCount]\n") 342 343 io.flush.empty := RegNext(empty && io.sqempty) 344 // lru.io.flush := sbuffer_state === x_drain_all && empty 345 switch(sbuffer_state){ 346 is(x_idle){ 347 when(io.flush.valid){ 348 sbuffer_state := x_drain_all 349 }.elsewhen(do_uarch_drain){ 350 sbuffer_state := x_drain_sbuffer 351 }.elsewhen(do_eviction){ 352 sbuffer_state := x_replace 353 } 354 } 355 is(x_drain_all){ 356 when(empty){ 357 sbuffer_state := x_idle 358 } 359 } 360 is(x_drain_sbuffer){ 361 when(io.flush.valid){ 362 sbuffer_state := x_drain_all 363 }.elsewhen(sbuffer_empty){ 364 sbuffer_state := x_idle 365 } 366 } 367 is(x_replace){ 368 when(io.flush.valid){ 369 sbuffer_state := x_drain_all 370 }.elsewhen(do_uarch_drain){ 371 sbuffer_state := x_drain_sbuffer 372 }.elsewhen(!do_eviction){ 373 sbuffer_state := x_idle 374 } 375 } 376 } 377 XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n") 378 379 def noSameBlockInflight(idx: UInt): Bool = { 380 // stateVec(idx) itself must not be s_inflight 381 !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR() 382 } 383 384 def genSameBlockInflightMask(ptag_in: UInt): UInt = { 385 val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care 386 assert(!(PopCount(mask) > 1.U)) 387 mask 388 } 389 390 def haveSameBlockInflight(ptag_in: UInt): Bool = { 391 genSameBlockInflightMask(ptag_in).orR 392 } 393 394 val need_drain = needDrain(sbuffer_state) 395 val need_replace = do_eviction || (sbuffer_state === x_replace) 396 val evictionIdx = Mux(missqReplayHasTimeOut, 397 missqReplayTimeOutIdxReg, 398 Mux(need_drain, 399 drainIdx, 400 Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx) 401 ) 402 ) 403 404 /* 405 If there is a inflight dcache req which has same ptag with evictionIdx's ptag, 406 current eviction should be blocked. 407 */ 408 val prepareValid = missqReplayHasTimeOut || 409 stateVec(evictionIdx).isDcacheReqCandidate() && (need_drain || cohHasTimeOut || need_replace) 410 assert(!(stateVec(evictionIdx).isDcacheReqCandidate && !noSameBlockInflight(evictionIdx))) 411 val prepareValidReg = RegInit(false.B) 412 // when canSendDcacheReq, send dcache req stored in pipeline reg to dcache 413 val canSendDcacheReq = io.dcache.req.ready || !prepareValidReg 414 // when willSendDcacheReq, read dcache req data and store them in a pipeline reg 415 willSendDcacheReq := prepareValid && canSendDcacheReq 416 when(io.dcache.req.fire()){ 417 prepareValidReg := false.B 418 } 419 when(canSendDcacheReq){ 420 prepareValidReg := prepareValid 421 } 422 when(willSendDcacheReq){ 423 stateVec(evictionIdx).state_inflight := true.B 424 stateVec(evictionIdx).w_timeout := false.B 425 // stateVec(evictionIdx).s_pipe_req := true.B 426 XSDebug(p"$evictionIdx will be sent to Dcache\n") 427 } 428 XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n") 429 XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " + 430 p"blocked:${!noSameBlockInflight(evictionIdx)} v:${activeMask(evictionIdx)}\n") 431 XSDebug(p"prepareValid:$prepareValid evictIdx:$evictionIdx dcache ready:${io.dcache.req.ready}\n") 432 // Note: if other dcache req in the same block are inflight, 433 // the lru update may not accurate 434 accessIdx(StorePipelineWidth).valid := invalidMask(replaceIdx) || ( 435 need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && canSendDcacheReq && activeMask(replaceIdx)) 436 accessIdx(StorePipelineWidth).bits := replaceIdx 437 val evictionIdxReg = RegEnable(evictionIdx, enable = willSendDcacheReq) 438 val evictionPTag = RegEnable(ptag(evictionIdx), enable = willSendDcacheReq) 439 val evictionVTag = RegEnable(vtag(evictionIdx), enable = willSendDcacheReq) 440 441 io.dcache.req.valid := prepareValidReg 442 io.dcache.req.bits := DontCare 443 io.dcache.req.bits.cmd := MemoryOpConstants.M_XWR 444 io.dcache.req.bits.addr := getAddr(evictionPTag) 445 io.dcache.req.bits.vaddr := getAddr(evictionVTag) 446 io.dcache.req.bits.data := data(evictionIdxReg).asUInt 447 io.dcache.req.bits.mask := mask(evictionIdxReg).asUInt 448 io.dcache.req.bits.id := evictionIdxReg 449 450 when (io.dcache.req.fire()) { 451 assert(!(io.dcache.req.bits.vaddr === 0.U)) 452 assert(!(io.dcache.req.bits.addr === 0.U)) 453 } 454 455 XSDebug(io.dcache.req.fire(), 456 p"send buf [$evictionIdxReg] to Dcache, req fire\n" 457 ) 458 459 // update sbuffer status according to dcache resp source 460 461 def id_to_sbuffer_id(id: UInt): UInt = { 462 require(id.getWidth >= log2Up(StoreBufferSize)) 463 id(log2Up(StoreBufferSize)-1, 0) 464 } 465 466 // hit resp 467 io.dcache.hit_resps.map(resp => { 468 val dcache_resp_id = resp.bits.id 469 when (resp.fire()) { 470 stateVec(dcache_resp_id).state_inflight := false.B 471 stateVec(dcache_resp_id).state_valid := false.B 472 assert(!resp.bits.replay) 473 assert(!resp.bits.miss) // not need to resp if miss, to be opted 474 assert(stateVec(dcache_resp_id).state_inflight === true.B) 475 } 476 477 // Update w_sameblock_inflight flag is delayed for 1 cycle 478 // 479 // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore 480 // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check 481 // if the same block is still inflight 482 (0 until StoreBufferSize).map(i => { 483 when( 484 stateVec(i).w_sameblock_inflight && 485 stateVec(i).state_valid && 486 RegNext(resp.fire()) && 487 waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id))) 488 ){ 489 stateVec(i).w_sameblock_inflight := false.B 490 } 491 }) 492 }) 493 494 495 // replay resp 496 val replay_resp_id = io.dcache.replay_resp.bits.id 497 when (io.dcache.replay_resp.fire()) { 498 missqReplayCount(replay_resp_id) := 0.U 499 stateVec(replay_resp_id).w_timeout := true.B 500 // waiting for timeout 501 assert(io.dcache.replay_resp.bits.replay) 502 assert(stateVec(replay_resp_id).state_inflight === true.B) 503 } 504 505 // TODO: reuse cohCount 506 (0 until StoreBufferSize).map(i => { 507 when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) { 508 missqReplayCount(i) := missqReplayCount(i) + 1.U 509 } 510 when(activeMask(i) && !cohTimeOutMask(i)){ 511 cohCount(i) := cohCount(i)+1.U 512 } 513 }) 514 515 if (env.EnableDifftest) { 516 // hit resp 517 io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => { 518 val difftest = Module(new DifftestSbufferEvent) 519 val dcache_resp_id = resp.bits.id 520 difftest.io.clock := clock 521 difftest.io.coreid := io.hartId 522 difftest.io.index := index.U 523 difftest.io.sbufferResp := RegNext(resp.fire()) 524 difftest.io.sbufferAddr := RegNext(getAddr(ptag(dcache_resp_id))) 525 difftest.io.sbufferData := RegNext(data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W)))) 526 difftest.io.sbufferMask := RegNext(mask(dcache_resp_id).asUInt) 527 }} 528 } 529 530 // ---------------------- Load Data Forward --------------------- 531 val mismatch = Wire(Vec(LoadPipelineWidth, Bool())) 532 XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1)) 533 for ((forward, i) <- io.forward.zipWithIndex) { 534 val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr))) 535 val ptag_matches = VecInit(widthMap(w => ptag(w) === getPTag(forward.paddr))) 536 val tag_matches = vtag_matches 537 val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w => 538 RegNext(vtag_matches(w)) =/= RegNext(ptag_matches(w)) && RegNext((activeMask(w) || inflightMask(w))) 539 )).asUInt.orR 540 mismatch(i) := tag_mismatch 541 when (tag_mismatch) { 542 XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n", 543 RegNext(ptag_matches.asUInt), 544 RegNext(vtag_matches.asUInt), 545 RegNext(forward.vaddr), 546 RegNext(forward.paddr) 547 ) 548 forward_need_uarch_drain := true.B 549 } 550 val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w)) 551 val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w)) 552 val line_offset_mask = UIntToOH(getWordOffset(forward.paddr)) 553 554 val valid_tag_match_reg = valid_tag_matches.map(RegNext(_)) 555 val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_)) 556 val line_offset_reg = RegNext(line_offset_mask) 557 val forward_mask_candidate_reg = RegEnable( 558 VecInit(mask.map(entry => entry(getWordOffset(forward.paddr)))), 559 forward.valid 560 ) 561 val forward_data_candidate_reg = RegEnable( 562 VecInit(data.map(entry => entry(getWordOffset(forward.paddr)))), 563 forward.valid 564 ) 565 566 val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg) 567 val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg) 568 selectedValidMask.suggestName("selectedValidMask_"+i) 569 selectedValidData.suggestName("selectedValidData_"+i) 570 571 val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg) 572 val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg) 573 selectedInflightMask.suggestName("selectedInflightMask_"+i) 574 selectedInflightData.suggestName("selectedInflightData_"+i) 575 576 // currently not being used 577 val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool())))) 578 val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool())))) 579 580 forward.dataInvalid := false.B // data in store line merge buffer is always ready 581 forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match 582 for (j <- 0 until DataBytes) { 583 forward.forwardMask(j) := false.B 584 forward.forwardData(j) := DontCare 585 586 // valid entries have higher priority than inflight entries 587 when(selectedInflightMask(j)) { 588 forward.forwardMask(j) := true.B 589 forward.forwardData(j) := selectedInflightData(j) 590 } 591 when(selectedValidMask(j)) { 592 forward.forwardMask(j) := true.B 593 forward.forwardData(j) := selectedValidData(j) 594 } 595 596 forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j) 597 } 598 } 599 600 for (i <- 0 until StoreBufferSize) { 601 XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n", 602 ptag(i) << OffsetWidth, 603 vtag(i) << OffsetWidth, 604 stateVec(i).isValid(), 605 activeMask(i), 606 inflightMask(i), 607 stateVec(i).w_timeout 608 ) 609 } 610 611 val perf_valid_entry_count = PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt) 612 XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1) 613 XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt)) 614 XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire())).asUInt)) 615 XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt)) 616 XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt)) 617 XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid) 618 XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire()) 619 XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle) 620 XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer) 621 XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace) 622 XSPerfAccumulate("evenCanInsert", evenCanInsert) 623 XSPerfAccumulate("oddCanInsert", oddCanInsert) 624 XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire()) 625 XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire()) 626 XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire()) 627 XSPerfAccumulate("coh_timeout", cohHasTimeOut) 628 629 // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire(), io.lsu.resp.fire()) 630 // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10) 631 // XSPerfAccumulate("store_req", io.lsu.req.fire()) 632 633 val perfinfo = IO(new Bundle(){ 634 val perfEvents = Output(new PerfEventsBundle(10)) 635 }) 636 val perfEvents = Seq( 637 ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt) ), 638 ("sbuffer_req_fire ", PopCount(VecInit(io.in.map(_.fire())).asUInt) ), 639 ("sbuffer_merge ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt) ), 640 ("sbuffer_newline ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt) ), 641 ("dcache_req_valid ", io.dcache.req.valid ), 642 ("dcache_req_fire ", io.dcache.req.fire() ), 643 ("sbuffer_idle ", sbuffer_state === x_idle ), 644 ("sbuffer_flush ", sbuffer_state === x_drain_sbuffer ), 645 ("sbuffer_replace ", sbuffer_state === x_replace ), 646 ("mpipe_resp_valid ", io.dcache.main_pipe_hit_resp.fire() ), 647 ("refill_resp_valid ", io.dcache.refill_hit_resp.fire() ), 648 ("replay_resp_valid ", io.dcache.replay_resp.fire() ), 649 ("coh_timeout ", cohHasTimeOut ), 650 ("sbuffer_1/4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U)) ), 651 ("sbuffer_2/4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U)) ), 652 ("sbuffer_3/4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))), 653 ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U))) 654 ) 655 656 for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) { 657 perf_out.incr_step := RegNext(perf) 658 } 659} 660