1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import utility._ 25import xiangshan.cache._ 26import difftest._ 27import freechips.rocketchip.util._ 28 29class SbufferFlushBundle extends Bundle { 30 val valid = Output(Bool()) 31 val empty = Input(Bool()) 32} 33 34trait HasSbufferConst extends HasXSParameter { 35 val EvictCycles = 1 << 20 36 val SbufferReplayDelayCycles = 16 37 require(isPow2(EvictCycles)) 38 val EvictCountBits = log2Up(EvictCycles+1) 39 val MissqReplayCountBits = log2Up(SbufferReplayDelayCycles) + 1 40 41 // dcache write hit resp has 2 sources 42 // refill pipe resp and main pipe resp 43 val NumDcacheWriteResp = 2 // hardcoded 44 45 val SbufferIndexWidth: Int = log2Up(StoreBufferSize) 46 // paddr = ptag + offset 47 val CacheLineBytes: Int = CacheLineSize / 8 48 val CacheLineWords: Int = CacheLineBytes / DataBytes 49 val OffsetWidth: Int = log2Up(CacheLineBytes) 50 val WordsWidth: Int = log2Up(CacheLineWords) 51 val PTagWidth: Int = PAddrBits - OffsetWidth 52 val VTagWidth: Int = VAddrBits - OffsetWidth 53 val WordOffsetWidth: Int = PAddrBits - WordsWidth 54} 55 56class SbufferEntryState (implicit p: Parameters) extends SbufferBundle { 57 val state_valid = Bool() // this entry is active 58 val state_inflight = Bool() // sbuffer is trying to write this entry to dcache 59 val w_timeout = Bool() // with timeout resp, waiting for resend store pipeline req timeout 60 val w_sameblock_inflight = Bool() // same cache block dcache req is inflight 61 62 def isInvalid(): Bool = !state_valid 63 def isValid(): Bool = state_valid 64 def isActive(): Bool = state_valid && !state_inflight 65 def isInflight(): Bool = state_inflight 66 def isDcacheReqCandidate(): Bool = state_valid && !state_inflight && !w_sameblock_inflight 67} 68 69class SbufferBundle(implicit p: Parameters) extends XSBundle with HasSbufferConst 70 71class DataWriteReq(implicit p: Parameters) extends SbufferBundle { 72 // univerisal writemask 73 val wvec = UInt(StoreBufferSize.W) 74 // 2 cycle update 75 val mask = UInt((DataBits/8).W) 76 val data = UInt(DataBits.W) 77 val wordOffset = UInt(WordOffsetWidth.W) 78 val wline = Bool() // write full cacheline 79} 80 81class MaskFlushReq(implicit p: Parameters) extends SbufferBundle { 82 // univerisal writemask 83 val wvec = UInt(StoreBufferSize.W) 84} 85 86class SbufferData(implicit p: Parameters) extends XSModule with HasSbufferConst { 87 val io = IO(new Bundle(){ 88 // update data and mask when alloc or merge 89 val writeReq = Vec(EnsbufferWidth, Flipped(ValidIO(new DataWriteReq))) 90 // clean mask when deq 91 val maskFlushReq = Vec(NumDcacheWriteResp, Flipped(ValidIO(new MaskFlushReq))) 92 val dataOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W))))) 93 val maskOut = Output(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 94 }) 95 96 val data = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, UInt(8.W))))) 97 // val mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 98 val mask = RegInit( 99 VecInit(Seq.fill(StoreBufferSize)( 100 VecInit(Seq.fill(CacheLineWords)( 101 VecInit(Seq.fill(DataBytes)(false.B)) 102 )) 103 )) 104 ) 105 106 // 2 cycle line mask clean 107 for(line <- 0 until StoreBufferSize){ 108 val line_mask_clean_flag = RegNext( 109 io.maskFlushReq.map(a => a.valid && a.bits.wvec(line)).reduce(_ || _) 110 ) 111 line_mask_clean_flag.suggestName("line_mask_clean_flag_"+line) 112 when(line_mask_clean_flag){ 113 for(word <- 0 until CacheLineWords){ 114 for(byte <- 0 until DataBytes){ 115 mask(line)(word)(byte) := false.B 116 } 117 } 118 } 119 } 120 121 // 2 cycle data / mask update 122 for(i <- 0 until EnsbufferWidth) { 123 val req = io.writeReq(i) 124 for(line <- 0 until StoreBufferSize){ 125 val sbuffer_in_s1_line_wen = req.valid && req.bits.wvec(line) 126 val sbuffer_in_s2_line_wen = RegNext(sbuffer_in_s1_line_wen) 127 val line_write_buffer_data = RegEnable(req.bits.data, sbuffer_in_s1_line_wen) 128 val line_write_buffer_wline = RegEnable(req.bits.wline, sbuffer_in_s1_line_wen) 129 val line_write_buffer_mask = RegEnable(req.bits.mask, sbuffer_in_s1_line_wen) 130 val line_write_buffer_offset = RegEnable(req.bits.wordOffset(WordsWidth-1, 0), sbuffer_in_s1_line_wen) 131 sbuffer_in_s1_line_wen.suggestName("sbuffer_in_s1_line_wen_"+line) 132 sbuffer_in_s2_line_wen.suggestName("sbuffer_in_s2_line_wen_"+line) 133 line_write_buffer_data.suggestName("line_write_buffer_data_"+line) 134 line_write_buffer_wline.suggestName("line_write_buffer_wline_"+line) 135 line_write_buffer_mask.suggestName("line_write_buffer_mask_"+line) 136 line_write_buffer_offset.suggestName("line_write_buffer_offset_"+line) 137 for(word <- 0 until CacheLineWords){ 138 for(byte <- 0 until DataBytes){ 139 val write_byte = sbuffer_in_s2_line_wen && ( 140 line_write_buffer_mask(byte) && (line_write_buffer_offset === word.U) || 141 line_write_buffer_wline 142 ) 143 when(write_byte){ 144 data(line)(word)(byte) := line_write_buffer_data(byte*8+7, byte*8) 145 mask(line)(word)(byte) := true.B 146 } 147 } 148 } 149 } 150 } 151 152 // 1 cycle line mask clean 153 // for(i <- 0 until EnsbufferWidth) { 154 // val req = io.writeReq(i) 155 // when(req.valid){ 156 // for(line <- 0 until StoreBufferSize){ 157 // when( 158 // req.bits.wvec(line) && 159 // req.bits.cleanMask 160 // ){ 161 // for(word <- 0 until CacheLineWords){ 162 // for(byte <- 0 until DataBytes){ 163 // mask(line)(word)(byte) := false.B 164 // val debug_last_cycle_write_byte = RegNext(req.valid && req.bits.wvec(line) && ( 165 // req.bits.mask(byte) && (req.bits.wordOffset(WordsWidth-1, 0) === word.U) || 166 // req.bits.wline 167 // )) 168 // assert(!debug_last_cycle_write_byte) 169 // } 170 // } 171 // } 172 // } 173 // } 174 // } 175 176 io.dataOut := data 177 io.maskOut := mask 178} 179 180class Sbuffer(implicit p: Parameters) extends DCacheModule with HasSbufferConst with HasPerfEvents { 181 val io = IO(new Bundle() { 182 val hartId = Input(UInt(8.W)) 183 val in = Vec(EnsbufferWidth, Flipped(Decoupled(new DCacheWordReqWithVaddr))) //Todo: store logic only support Width == 2 now 184 val dcache = Flipped(new DCacheToSbufferIO) 185 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 186 val sqempty = Input(Bool()) 187 val flush = Flipped(new SbufferFlushBundle) 188 val csrCtrl = Flipped(new CustomCSRCtrlIO) 189 }) 190 191 val dataModule = Module(new SbufferData) 192 dataModule.io.writeReq <> DontCare 193 val writeReq = dataModule.io.writeReq 194 195 val ptag = Reg(Vec(StoreBufferSize, UInt(PTagWidth.W))) 196 val vtag = Reg(Vec(StoreBufferSize, UInt(VTagWidth.W))) 197 val debug_mask = Reg(Vec(StoreBufferSize, Vec(CacheLineWords, Vec(DataBytes, Bool())))) 198 val waitInflightMask = Reg(Vec(StoreBufferSize, UInt(StoreBufferSize.W))) 199 val data = dataModule.io.dataOut 200 val mask = dataModule.io.maskOut 201 val stateVec = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SbufferEntryState)))) 202 val cohCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(EvictCountBits.W)))) 203 val missqReplayCount = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U(MissqReplayCountBits.W)))) 204 205 val sbuffer_out_s0_fire = Wire(Bool()) 206 207 /* 208 idle --[flush] --> drain --[buf empty]--> idle 209 --[buf full]--> replace --[dcache resp]--> idle 210 */ 211 // x_drain_all: drain store queue and sbuffer 212 // x_drain_sbuffer: drain sbuffer only, block store queue to sbuffer write 213 val x_idle :: x_replace :: x_drain_all :: x_drain_sbuffer :: Nil = Enum(4) 214 def needDrain(state: UInt): Bool = 215 state(1) 216 val sbuffer_state = RegInit(x_idle) 217 218 // ---------------------- Store Enq Sbuffer --------------------- 219 220 def getPTag(pa: UInt): UInt = 221 pa(PAddrBits - 1, PAddrBits - PTagWidth) 222 223 def getVTag(va: UInt): UInt = 224 va(VAddrBits - 1, VAddrBits - VTagWidth) 225 226 def getWord(pa: UInt): UInt = 227 pa(PAddrBits-1, 3) 228 229 def getWordOffset(pa: UInt): UInt = 230 pa(OffsetWidth-1, 3) 231 232 def getAddr(ptag: UInt): UInt = 233 Cat(ptag, 0.U((PAddrBits - PTagWidth).W)) 234 235 def getByteOffset(offect: UInt): UInt = 236 Cat(offect(OffsetWidth - 1, 3), 0.U(3.W)) 237 238 def isOneOf(key: UInt, seq: Seq[UInt]): Bool = 239 if(seq.isEmpty) false.B else Cat(seq.map(_===key)).orR() 240 241 def widthMap[T <: Data](f: Int => T) = (0 until StoreBufferSize) map f 242 243 // sbuffer entry count 244 245 val plru = new PseudoLRU(StoreBufferSize) 246 val accessIdx = Wire(Vec(EnsbufferWidth + 1, Valid(UInt(SbufferIndexWidth.W)))) 247 248 val replaceIdx = plru.way 249 val replaceIdxOH = UIntToOH(plru.way) 250 plru.access(accessIdx) 251 252 //-------------------------cohCount----------------------------- 253 // insert and merge: cohCount=0 254 // every cycle cohCount+=1 255 // if cohCount(EvictCountBits-1)==1, evict 256 val cohTimeOutMask = VecInit(widthMap(i => cohCount(i)(EvictCountBits - 1) && stateVec(i).isActive())) 257 val (cohTimeOutIdx, cohHasTimeOut) = PriorityEncoderWithFlag(cohTimeOutMask) 258 val cohTimeOutOH = PriorityEncoderOH(cohTimeOutMask) 259 val missqReplayTimeOutMask = VecInit(widthMap(i => missqReplayCount(i)(MissqReplayCountBits - 1) && stateVec(i).w_timeout)) 260 val (missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) = PriorityEncoderWithFlag(missqReplayTimeOutMask) 261 val missqReplayHasTimeOut = RegNext(missqReplayHasTimeOutGen) && !RegNext(sbuffer_out_s0_fire) 262 val missqReplayTimeOutIdx = RegEnable(missqReplayTimeOutIdxGen, missqReplayHasTimeOutGen) 263 264 //-------------------------sbuffer enqueue----------------------------- 265 266 // Now sbuffer enq logic is divided into 3 stages: 267 268 // sbuffer_in_s0: 269 // * read data and meta from store queue 270 // * store them in 2 entry fifo queue 271 272 // sbuffer_in_s1: 273 // * read data and meta from fifo queue 274 // * update sbuffer meta (vtag, ptag, flag) 275 // * prevert that line from being sent to dcache (add a block condition) 276 // * prepare cacheline level write enable signal, RegNext() data and mask 277 278 // sbuffer_in_s2: 279 // * use cacheline level buffer to update sbuffer data and mask 280 // * remove dcache write block (if there is) 281 282 val activeMask = VecInit(stateVec.map(s => s.isActive())) 283 val drainIdx = PriorityEncoder(activeMask) 284 285 val inflightMask = VecInit(stateVec.map(s => s.isInflight())) 286 287 val inptags = io.in.map(in => getPTag(in.bits.addr)) 288 val invtags = io.in.map(in => getVTag(in.bits.vaddr)) 289 val sameTag = inptags(0) === inptags(1) 290 val firstWord = getWord(io.in(0).bits.addr) 291 val secondWord = getWord(io.in(1).bits.addr) 292 val sameWord = firstWord === secondWord 293 294 // merge condition 295 val mergeMask = Wire(Vec(EnsbufferWidth, Vec(StoreBufferSize, Bool()))) 296 val mergeIdx = mergeMask.map(PriorityEncoder(_)) // avoid using mergeIdx for better timing 297 val canMerge = mergeMask.map(ParallelOR(_)) 298 val mergeVec = mergeMask.map(_.asUInt) 299 300 for(i <- 0 until EnsbufferWidth){ 301 mergeMask(i) := widthMap(j => 302 inptags(i) === ptag(j) && activeMask(j) 303 ) 304 assert(!(PopCount(mergeMask(i).asUInt) > 1.U && io.in(i).fire())) 305 } 306 307 // insert condition 308 // firstInsert: the first invalid entry 309 // if first entry canMerge or second entry has the same ptag with the first entry, 310 // secondInsert equal the first invalid entry, otherwise, the second invalid entry 311 val invalidMask = VecInit(stateVec.map(s => s.isInvalid())) 312 val evenInvalidMask = GetEvenBits(invalidMask.asUInt) 313 val oddInvalidMask = GetOddBits(invalidMask.asUInt) 314 315 def getFirstOneOH(input: UInt): UInt = { 316 assert(input.getWidth > 1) 317 val output = WireInit(VecInit(input.asBools)) 318 (1 until input.getWidth).map(i => { 319 output(i) := !input(i - 1, 0).orR && input(i) 320 }) 321 output.asUInt 322 } 323 324 val evenRawInsertVec = getFirstOneOH(evenInvalidMask) 325 val oddRawInsertVec = getFirstOneOH(oddInvalidMask) 326 val (evenRawInsertIdx, evenCanInsert) = PriorityEncoderWithFlag(evenInvalidMask) 327 val (oddRawInsertIdx, oddCanInsert) = PriorityEncoderWithFlag(oddInvalidMask) 328 val evenInsertIdx = Cat(evenRawInsertIdx, 0.U(1.W)) // slow to generate, for debug only 329 val oddInsertIdx = Cat(oddRawInsertIdx, 1.U(1.W)) // slow to generate, for debug only 330 val evenInsertVec = GetEvenBits.reverse(evenRawInsertVec) 331 val oddInsertVec = GetOddBits.reverse(oddRawInsertVec) 332 333 val enbufferSelReg = RegInit(false.B) 334 when(io.in(0).valid) { 335 enbufferSelReg := ~enbufferSelReg 336 } 337 338 val firstInsertIdx = Mux(enbufferSelReg, evenInsertIdx, oddInsertIdx) // slow to generate, for debug only 339 val secondInsertIdx = Mux(sameTag, 340 firstInsertIdx, 341 Mux(~enbufferSelReg, evenInsertIdx, oddInsertIdx) 342 ) // slow to generate, for debug only 343 val firstInsertVec = Mux(enbufferSelReg, evenInsertVec, oddInsertVec) 344 val secondInsertVec = Mux(sameTag, 345 firstInsertVec, 346 Mux(~enbufferSelReg, evenInsertVec, oddInsertVec) 347 ) // slow to generate, for debug only 348 val firstCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(enbufferSelReg, evenCanInsert, oddCanInsert) 349 val secondCanInsert = sbuffer_state =/= x_drain_sbuffer && Mux(sameTag, 350 firstCanInsert, 351 Mux(~enbufferSelReg, evenCanInsert, oddCanInsert) 352 ) && (EnsbufferWidth >= 1).B 353 val forward_need_uarch_drain = WireInit(false.B) 354 val merge_need_uarch_drain = WireInit(false.B) 355 val do_uarch_drain = RegNext(forward_need_uarch_drain) || RegNext(RegNext(merge_need_uarch_drain)) 356 XSPerfAccumulate("do_uarch_drain", do_uarch_drain) 357 358 io.in(0).ready := firstCanInsert 359 io.in(1).ready := secondCanInsert && !sameWord && io.in(0).ready 360 361 def wordReqToBufLine( // allocate a new line in sbuffer 362 req: DCacheWordReq, 363 reqptag: UInt, 364 reqvtag: UInt, 365 insertIdx: UInt, 366 insertVec: UInt, 367 wordOffset: UInt 368 ): Unit = { 369 assert(UIntToOH(insertIdx) === insertVec) 370 val sameBlockInflightMask = genSameBlockInflightMask(reqptag) 371 (0 until StoreBufferSize).map(entryIdx => { 372 when(insertVec(entryIdx)){ 373 stateVec(entryIdx).state_valid := true.B 374 stateVec(entryIdx).w_sameblock_inflight := sameBlockInflightMask.orR // set w_sameblock_inflight when a line is first allocated 375 when(sameBlockInflightMask.orR){ 376 waitInflightMask(entryIdx) := sameBlockInflightMask 377 } 378 cohCount(entryIdx) := 0.U 379 // missqReplayCount(insertIdx) := 0.U 380 ptag(entryIdx) := reqptag 381 vtag(entryIdx) := reqvtag // update vtag iff a new sbuffer line is allocated 382 } 383 }) 384 } 385 386 def mergeWordReq( // merge write req into an existing line 387 req: DCacheWordReq, 388 reqptag: UInt, 389 reqvtag: UInt, 390 mergeIdx: UInt, 391 mergeVec: UInt, 392 wordOffset: UInt 393 ): Unit = { 394 assert(UIntToOH(mergeIdx) === mergeVec) 395 (0 until StoreBufferSize).map(entryIdx => { 396 when(mergeVec(entryIdx)) { 397 cohCount(entryIdx) := 0.U 398 // missqReplayCount(entryIdx) := 0.U 399 // check if vtag is the same, if not, trigger sbuffer flush 400 when(reqvtag =/= vtag(entryIdx)) { 401 XSDebug("reqvtag =/= sbufvtag req(vtag %x ptag %x) sbuffer(vtag %x ptag %x)\n", 402 reqvtag << OffsetWidth, 403 reqptag << OffsetWidth, 404 vtag(entryIdx) << OffsetWidth, 405 ptag(entryIdx) << OffsetWidth 406 ) 407 merge_need_uarch_drain := true.B 408 } 409 } 410 }) 411 } 412 413 for(((in, wordOffset), i) <- io.in.zip(Seq(firstWord, secondWord)).zipWithIndex){ 414 writeReq(i).valid := in.fire() 415 writeReq(i).bits.wordOffset := wordOffset 416 writeReq(i).bits.mask := in.bits.mask 417 writeReq(i).bits.data := in.bits.data 418 writeReq(i).bits.wline := in.bits.wline 419 val debug_insertIdx = if(i == 0) firstInsertIdx else secondInsertIdx 420 val insertVec = if(i == 0) firstInsertVec else secondInsertVec 421 assert(!((PopCount(insertVec) > 1.U) && in.fire())) 422 val insertIdx = OHToUInt(insertVec) 423 accessIdx(i).valid := RegNext(in.fire()) 424 accessIdx(i).bits := RegNext(Mux(canMerge(i), mergeIdx(i), insertIdx)) 425 when(in.fire()){ 426 when(canMerge(i)){ 427 writeReq(i).bits.wvec := mergeVec(i) 428 mergeWordReq(in.bits, inptags(i), invtags(i), mergeIdx(i), mergeVec(i), wordOffset) 429 XSDebug(p"merge req $i to line [${mergeIdx(i)}]\n") 430 }.otherwise({ 431 writeReq(i).bits.wvec := insertVec 432 wordReqToBufLine(in.bits, inptags(i), invtags(i), insertIdx, insertVec, wordOffset) 433 XSDebug(p"insert req $i to line[$insertIdx]\n") 434 assert(debug_insertIdx === insertIdx) 435 }) 436 } 437 } 438 439 440 for(i <- 0 until StoreBufferSize){ 441 XSDebug(stateVec(i).isValid(), 442 p"[$i] timeout:${cohCount(i)(EvictCountBits-1)} state:${stateVec(i)}\n" 443 ) 444 } 445 446 for((req, i) <- io.in.zipWithIndex){ 447 XSDebug(req.fire(), 448 p"accept req [$i]: " + 449 p"addr:${Hexadecimal(req.bits.addr)} " + 450 p"mask:${Binary(req.bits.mask)} " + 451 p"data:${Hexadecimal(req.bits.data)}\n" 452 ) 453 XSDebug(req.valid && !req.ready, 454 p"req [$i] blocked by sbuffer\n" 455 ) 456 } 457 458 // ---------------------- Send Dcache Req --------------------- 459 460 val sbuffer_empty = Cat(invalidMask).andR() 461 val sq_empty = !Cat(io.in.map(_.valid)).orR() 462 val empty = sbuffer_empty && sq_empty 463 val threshold = RegNext(io.csrCtrl.sbuffer_threshold +& 1.U) 464 val validCount = PopCount(activeMask) 465 val do_eviction = RegNext(validCount >= threshold || validCount === (StoreBufferSize-1).U, init = false.B) 466 require((StoreBufferThreshold + 1) <= StoreBufferSize) 467 468 XSDebug(p"validCount[$validCount]\n") 469 470 io.flush.empty := RegNext(empty && io.sqempty) 471 // lru.io.flush := sbuffer_state === x_drain_all && empty 472 switch(sbuffer_state){ 473 is(x_idle){ 474 when(io.flush.valid){ 475 sbuffer_state := x_drain_all 476 }.elsewhen(do_uarch_drain){ 477 sbuffer_state := x_drain_sbuffer 478 }.elsewhen(do_eviction){ 479 sbuffer_state := x_replace 480 } 481 } 482 is(x_drain_all){ 483 when(empty){ 484 sbuffer_state := x_idle 485 } 486 } 487 is(x_drain_sbuffer){ 488 when(io.flush.valid){ 489 sbuffer_state := x_drain_all 490 }.elsewhen(sbuffer_empty){ 491 sbuffer_state := x_idle 492 } 493 } 494 is(x_replace){ 495 when(io.flush.valid){ 496 sbuffer_state := x_drain_all 497 }.elsewhen(do_uarch_drain){ 498 sbuffer_state := x_drain_sbuffer 499 }.elsewhen(!do_eviction){ 500 sbuffer_state := x_idle 501 } 502 } 503 } 504 XSDebug(p"sbuffer state:${sbuffer_state} do eviction:${do_eviction} empty:${empty}\n") 505 506 def noSameBlockInflight(idx: UInt): Bool = { 507 // stateVec(idx) itself must not be s_inflight 508 !Cat(widthMap(i => inflightMask(i) && ptag(idx) === ptag(i))).orR() 509 } 510 511 def genSameBlockInflightMask(ptag_in: UInt): UInt = { 512 val mask = VecInit(widthMap(i => inflightMask(i) && ptag_in === ptag(i))).asUInt // quite slow, use it with care 513 assert(!(PopCount(mask) > 1.U)) 514 mask 515 } 516 517 def haveSameBlockInflight(ptag_in: UInt): Bool = { 518 genSameBlockInflightMask(ptag_in).orR 519 } 520 521 // --------------------------------------------------------------------------- 522 // sbuffer to dcache pipeline 523 // --------------------------------------------------------------------------- 524 525 // Now sbuffer deq logic is divided into 2 stages: 526 527 // sbuffer_out_s0: 528 // * read data and meta from sbuffer 529 // * RegNext() them 530 // * set line state to inflight 531 532 // sbuffer_out_s1: 533 // * send write req to dcache 534 535 // sbuffer_out_extra: 536 // * receive write result from dcache 537 // * update line state 538 539 val sbuffer_out_s1_ready = Wire(Bool()) 540 541 // --------------------------------------------------------------------------- 542 // sbuffer_out_s0 543 // --------------------------------------------------------------------------- 544 545 val need_drain = needDrain(sbuffer_state) 546 val need_replace = do_eviction || (sbuffer_state === x_replace) 547 val sbuffer_out_s0_evictionIdx = Mux(missqReplayHasTimeOut, 548 missqReplayTimeOutIdx, 549 Mux(need_drain, 550 drainIdx, 551 Mux(cohHasTimeOut, cohTimeOutIdx, replaceIdx) 552 ) 553 ) 554 555 // If there is a inflight dcache req which has same ptag with sbuffer_out_s0_evictionIdx's ptag, 556 // current eviction should be blocked. 557 val sbuffer_out_s0_valid = missqReplayHasTimeOut || 558 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate() && 559 (need_drain || cohHasTimeOut || need_replace) 560 assert(!( 561 stateVec(sbuffer_out_s0_evictionIdx).isDcacheReqCandidate && 562 !noSameBlockInflight(sbuffer_out_s0_evictionIdx) 563 )) 564 val sbuffer_out_s0_cango = sbuffer_out_s1_ready 565 sbuffer_out_s0_fire := sbuffer_out_s0_valid && sbuffer_out_s0_cango 566 567 // --------------------------------------------------------------------------- 568 // sbuffer_out_s1 569 // --------------------------------------------------------------------------- 570 571 // TODO: use EnsbufferWidth 572 val shouldWaitWriteFinish = RegNext(VecInit((0 until EnsbufferWidth).map{i => 573 (writeReq(i).bits.wvec.asUInt & UIntToOH(sbuffer_out_s0_evictionIdx).asUInt).orR && 574 writeReq(i).valid 575 }).asUInt.orR) 576 // block dcache write if read / write hazard 577 val blockDcacheWrite = shouldWaitWriteFinish 578 579 val sbuffer_out_s1_valid = RegInit(false.B) 580 sbuffer_out_s1_ready := io.dcache.req.ready && !blockDcacheWrite || !sbuffer_out_s1_valid 581 val sbuffer_out_s1_fire = io.dcache.req.fire() 582 583 // when sbuffer_out_s1_fire, send dcache req stored in pipeline reg to dcache 584 when(sbuffer_out_s1_fire){ 585 sbuffer_out_s1_valid := false.B 586 } 587 // when sbuffer_out_s0_fire, read dcache req data and store them in a pipeline reg 588 when(sbuffer_out_s0_cango){ 589 sbuffer_out_s1_valid := sbuffer_out_s0_valid 590 } 591 when(sbuffer_out_s0_fire){ 592 stateVec(sbuffer_out_s0_evictionIdx).state_inflight := true.B 593 stateVec(sbuffer_out_s0_evictionIdx).w_timeout := false.B 594 // stateVec(sbuffer_out_s0_evictionIdx).s_pipe_req := true.B 595 XSDebug(p"$sbuffer_out_s0_evictionIdx will be sent to Dcache\n") 596 } 597 598 XSDebug(p"need drain:$need_drain cohHasTimeOut: $cohHasTimeOut need replace:$need_replace\n") 599 XSDebug(p"drainIdx:$drainIdx tIdx:$cohTimeOutIdx replIdx:$replaceIdx " + 600 p"blocked:${!noSameBlockInflight(sbuffer_out_s0_evictionIdx)} v:${activeMask(sbuffer_out_s0_evictionIdx)}\n") 601 XSDebug(p"sbuffer_out_s0_valid:$sbuffer_out_s0_valid evictIdx:$sbuffer_out_s0_evictionIdx dcache ready:${io.dcache.req.ready}\n") 602 // Note: if other dcache req in the same block are inflight, 603 // the lru update may not accurate 604 accessIdx(EnsbufferWidth).valid := invalidMask(replaceIdx) || ( 605 need_replace && !need_drain && !cohHasTimeOut && !missqReplayHasTimeOut && sbuffer_out_s0_cango && activeMask(replaceIdx)) 606 accessIdx(EnsbufferWidth).bits := replaceIdx 607 val sbuffer_out_s1_evictionIdx = RegEnable(sbuffer_out_s0_evictionIdx, enable = sbuffer_out_s0_fire) 608 val sbuffer_out_s1_evictionPTag = RegEnable(ptag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire) 609 val sbuffer_out_s1_evictionVTag = RegEnable(vtag(sbuffer_out_s0_evictionIdx), enable = sbuffer_out_s0_fire) 610 611 io.dcache.req.valid := sbuffer_out_s1_valid && !blockDcacheWrite 612 io.dcache.req.bits := DontCare 613 io.dcache.req.bits.cmd := MemoryOpConstants.M_XWR 614 io.dcache.req.bits.addr := getAddr(sbuffer_out_s1_evictionPTag) 615 io.dcache.req.bits.vaddr := getAddr(sbuffer_out_s1_evictionVTag) 616 io.dcache.req.bits.data := data(sbuffer_out_s1_evictionIdx).asUInt 617 io.dcache.req.bits.mask := mask(sbuffer_out_s1_evictionIdx).asUInt 618 io.dcache.req.bits.id := sbuffer_out_s1_evictionIdx 619 620 when (sbuffer_out_s1_fire) { 621 assert(!(io.dcache.req.bits.vaddr === 0.U)) 622 assert(!(io.dcache.req.bits.addr === 0.U)) 623 } 624 625 XSDebug(sbuffer_out_s1_fire, 626 p"send buf [$sbuffer_out_s1_evictionIdx] to Dcache, req fire\n" 627 ) 628 629 // update sbuffer status according to dcache resp source 630 631 def id_to_sbuffer_id(id: UInt): UInt = { 632 require(id.getWidth >= log2Up(StoreBufferSize)) 633 id(log2Up(StoreBufferSize)-1, 0) 634 } 635 636 // hit resp 637 io.dcache.hit_resps.map(resp => { 638 val dcache_resp_id = resp.bits.id 639 when (resp.fire()) { 640 stateVec(dcache_resp_id).state_inflight := false.B 641 stateVec(dcache_resp_id).state_valid := false.B 642 assert(!resp.bits.replay) 643 assert(!resp.bits.miss) // not need to resp if miss, to be opted 644 assert(stateVec(dcache_resp_id).state_inflight === true.B) 645 } 646 647 // Update w_sameblock_inflight flag is delayed for 1 cycle 648 // 649 // When a new req allocate a new line in sbuffer, sameblock_inflight check will ignore 650 // current dcache.hit_resps. Then, in the next cycle, we have plenty of time to check 651 // if the same block is still inflight 652 (0 until StoreBufferSize).map(i => { 653 when( 654 stateVec(i).w_sameblock_inflight && 655 stateVec(i).state_valid && 656 RegNext(resp.fire()) && 657 waitInflightMask(i) === UIntToOH(RegNext(id_to_sbuffer_id(dcache_resp_id))) 658 ){ 659 stateVec(i).w_sameblock_inflight := false.B 660 } 661 }) 662 }) 663 664 io.dcache.hit_resps.zip(dataModule.io.maskFlushReq).map{case (resp, maskFlush) => { 665 maskFlush.valid := resp.fire() 666 maskFlush.bits.wvec := UIntToOH(resp.bits.id) 667 }} 668 669 // replay resp 670 val replay_resp_id = io.dcache.replay_resp.bits.id 671 when (io.dcache.replay_resp.fire()) { 672 missqReplayCount(replay_resp_id) := 0.U 673 stateVec(replay_resp_id).w_timeout := true.B 674 // waiting for timeout 675 assert(io.dcache.replay_resp.bits.replay) 676 assert(stateVec(replay_resp_id).state_inflight === true.B) 677 } 678 679 // TODO: reuse cohCount 680 (0 until StoreBufferSize).map(i => { 681 when(stateVec(i).w_timeout && stateVec(i).state_inflight && !missqReplayCount(i)(MissqReplayCountBits-1)) { 682 missqReplayCount(i) := missqReplayCount(i) + 1.U 683 } 684 when(activeMask(i) && !cohTimeOutMask(i)){ 685 cohCount(i) := cohCount(i)+1.U 686 } 687 }) 688 689 if (env.EnableDifftest) { 690 // hit resp 691 io.dcache.hit_resps.zipWithIndex.map{case (resp, index) => { 692 val difftest = Module(new DifftestSbufferEvent) 693 val dcache_resp_id = resp.bits.id 694 difftest.io.clock := clock 695 difftest.io.coreid := io.hartId 696 difftest.io.index := index.U 697 difftest.io.sbufferResp := RegNext(resp.fire()) 698 difftest.io.sbufferAddr := RegNext(getAddr(ptag(dcache_resp_id))) 699 difftest.io.sbufferData := RegNext(data(dcache_resp_id).asTypeOf(Vec(CacheLineBytes, UInt(8.W)))) 700 difftest.io.sbufferMask := RegNext(mask(dcache_resp_id).asUInt) 701 }} 702 } 703 704 // ---------------------- Load Data Forward --------------------- 705 val mismatch = Wire(Vec(LoadPipelineWidth, Bool())) 706 XSPerfAccumulate("vaddr_match_failed", mismatch(0) || mismatch(1)) 707 for ((forward, i) <- io.forward.zipWithIndex) { 708 val vtag_matches = VecInit(widthMap(w => vtag(w) === getVTag(forward.vaddr))) 709 // ptag_matches uses paddr from dtlb, which is far from sbuffer 710 val ptag_matches = VecInit(widthMap(w => RegEnable(ptag(w), forward.valid) === RegEnable(getPTag(forward.paddr), forward.valid))) 711 val tag_matches = vtag_matches 712 val tag_mismatch = RegNext(forward.valid) && VecInit(widthMap(w => 713 RegNext(vtag_matches(w)) =/= ptag_matches(w) && RegNext((activeMask(w) || inflightMask(w))) 714 )).asUInt.orR 715 mismatch(i) := tag_mismatch 716 when (tag_mismatch) { 717 XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n", 718 RegNext(ptag_matches.asUInt), 719 RegNext(vtag_matches.asUInt), 720 RegNext(forward.vaddr), 721 RegNext(forward.paddr) 722 ) 723 forward_need_uarch_drain := true.B 724 } 725 val valid_tag_matches = widthMap(w => tag_matches(w) && activeMask(w)) 726 val inflight_tag_matches = widthMap(w => tag_matches(w) && inflightMask(w)) 727 val line_offset_mask = UIntToOH(getWordOffset(forward.paddr)) 728 729 val valid_tag_match_reg = valid_tag_matches.map(RegNext(_)) 730 val inflight_tag_match_reg = inflight_tag_matches.map(RegNext(_)) 731 val line_offset_reg = RegNext(line_offset_mask) 732 val forward_mask_candidate_reg = RegEnable( 733 VecInit(mask.map(entry => entry(getWordOffset(forward.paddr)))), 734 forward.valid 735 ) 736 val forward_data_candidate_reg = RegEnable( 737 VecInit(data.map(entry => entry(getWordOffset(forward.paddr)))), 738 forward.valid 739 ) 740 741 val selectedValidMask = Mux1H(valid_tag_match_reg, forward_mask_candidate_reg) 742 val selectedValidData = Mux1H(valid_tag_match_reg, forward_data_candidate_reg) 743 selectedValidMask.suggestName("selectedValidMask_"+i) 744 selectedValidData.suggestName("selectedValidData_"+i) 745 746 val selectedInflightMask = Mux1H(inflight_tag_match_reg, forward_mask_candidate_reg) 747 val selectedInflightData = Mux1H(inflight_tag_match_reg, forward_data_candidate_reg) 748 selectedInflightMask.suggestName("selectedInflightMask_"+i) 749 selectedInflightData.suggestName("selectedInflightData_"+i) 750 751 // currently not being used 752 val selectedInflightMaskFast = Mux1H(line_offset_mask, Mux1H(inflight_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool())))) 753 val selectedValidMaskFast = Mux1H(line_offset_mask, Mux1H(valid_tag_matches, mask).asTypeOf(Vec(CacheLineWords, Vec(DataBytes, Bool())))) 754 755 forward.dataInvalid := false.B // data in store line merge buffer is always ready 756 forward.matchInvalid := tag_mismatch // paddr / vaddr cam result does not match 757 for (j <- 0 until DataBytes) { 758 forward.forwardMask(j) := false.B 759 forward.forwardData(j) := DontCare 760 761 // valid entries have higher priority than inflight entries 762 when(selectedInflightMask(j)) { 763 forward.forwardMask(j) := true.B 764 forward.forwardData(j) := selectedInflightData(j) 765 } 766 when(selectedValidMask(j)) { 767 forward.forwardMask(j) := true.B 768 forward.forwardData(j) := selectedValidData(j) 769 } 770 771 forward.forwardMaskFast(j) := selectedInflightMaskFast(j) || selectedValidMaskFast(j) 772 } 773 } 774 775 for (i <- 0 until StoreBufferSize) { 776 XSDebug("sbf entry " + i + " : ptag %x vtag %x valid %x active %x inflight %x w_timeout %x\n", 777 ptag(i) << OffsetWidth, 778 vtag(i) << OffsetWidth, 779 stateVec(i).isValid(), 780 activeMask(i), 781 inflightMask(i), 782 stateVec(i).w_timeout 783 ) 784 } 785 786 val perf_valid_entry_count = RegNext(PopCount(VecInit(stateVec.map(s => !s.isInvalid())).asUInt)) 787 XSPerfHistogram("util", perf_valid_entry_count, true.B, 0, StoreBufferSize, 1) 788 XSPerfAccumulate("sbuffer_req_valid", PopCount(VecInit(io.in.map(_.valid)).asUInt)) 789 XSPerfAccumulate("sbuffer_req_fire", PopCount(VecInit(io.in.map(_.fire())).asUInt)) 790 XSPerfAccumulate("sbuffer_merge", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt)) 791 XSPerfAccumulate("sbuffer_newline", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt)) 792 XSPerfAccumulate("dcache_req_valid", io.dcache.req.valid) 793 XSPerfAccumulate("dcache_req_fire", io.dcache.req.fire()) 794 XSPerfAccumulate("sbuffer_idle", sbuffer_state === x_idle) 795 XSPerfAccumulate("sbuffer_flush", sbuffer_state === x_drain_sbuffer) 796 XSPerfAccumulate("sbuffer_replace", sbuffer_state === x_replace) 797 XSPerfAccumulate("evenCanInsert", evenCanInsert) 798 XSPerfAccumulate("oddCanInsert", oddCanInsert) 799 XSPerfAccumulate("mainpipe_resp_valid", io.dcache.main_pipe_hit_resp.fire()) 800 XSPerfAccumulate("refill_resp_valid", io.dcache.refill_hit_resp.fire()) 801 XSPerfAccumulate("replay_resp_valid", io.dcache.replay_resp.fire()) 802 XSPerfAccumulate("coh_timeout", cohHasTimeOut) 803 804 // val (store_latency_sample, store_latency) = TransactionLatencyCounter(io.lsu.req.fire(), io.lsu.resp.fire()) 805 // XSPerfHistogram("store_latency", store_latency, store_latency_sample, 0, 100, 10) 806 // XSPerfAccumulate("store_req", io.lsu.req.fire()) 807 808 val perfEvents = Seq( 809 ("sbuffer_req_valid ", PopCount(VecInit(io.in.map(_.valid)).asUInt) ), 810 ("sbuffer_req_fire ", PopCount(VecInit(io.in.map(_.fire())).asUInt) ), 811 ("sbuffer_merge ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && canMerge(i)})).asUInt) ), 812 ("sbuffer_newline ", PopCount(VecInit(io.in.zipWithIndex.map({case (in, i) => in.fire() && !canMerge(i)})).asUInt) ), 813 ("dcache_req_valid ", io.dcache.req.valid ), 814 ("dcache_req_fire ", io.dcache.req.fire() ), 815 ("sbuffer_idle ", sbuffer_state === x_idle ), 816 ("sbuffer_flush ", sbuffer_state === x_drain_sbuffer ), 817 ("sbuffer_replace ", sbuffer_state === x_replace ), 818 ("mpipe_resp_valid ", io.dcache.main_pipe_hit_resp.fire() ), 819 ("refill_resp_valid ", io.dcache.refill_hit_resp.fire() ), 820 ("replay_resp_valid ", io.dcache.replay_resp.fire() ), 821 ("coh_timeout ", cohHasTimeOut ), 822 ("sbuffer_1_4_valid ", (perf_valid_entry_count < (StoreBufferSize.U/4.U)) ), 823 ("sbuffer_2_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/4.U)) & (perf_valid_entry_count <= (StoreBufferSize.U/2.U)) ), 824 ("sbuffer_3_4_valid ", (perf_valid_entry_count > (StoreBufferSize.U/2.U)) & (perf_valid_entry_count <= (StoreBufferSize.U*3.U/4.U))), 825 ("sbuffer_full_valid", (perf_valid_entry_count > (StoreBufferSize.U*3.U/4.U))) 826 ) 827 generatePerfEvent() 828 829}