1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import xiangshan._ 24import xiangshan.cache._ 25import xiangshan.cache.{DCacheWordIO, DCacheLineIO, MemoryOpConstants} 26import xiangshan.backend.rob.{RobLsqIO, RobPtr} 27import difftest._ 28import device.RAMHelper 29 30class SqPtr(implicit p: Parameters) extends CircularQueuePtr[SqPtr]( 31 p => p(XSCoreParamsKey).StoreQueueSize 32){ 33 override def cloneType = (new SqPtr).asInstanceOf[this.type] 34} 35 36object SqPtr { 37 def apply(f: Bool, v: UInt)(implicit p: Parameters): SqPtr = { 38 val ptr = Wire(new SqPtr) 39 ptr.flag := f 40 ptr.value := v 41 ptr 42 } 43} 44 45class SqEnqIO(implicit p: Parameters) extends XSBundle { 46 val canAccept = Output(Bool()) 47 val lqCanAccept = Input(Bool()) 48 val needAlloc = Vec(exuParameters.LsExuCnt, Input(Bool())) 49 val req = Vec(exuParameters.LsExuCnt, Flipped(ValidIO(new MicroOp))) 50 val resp = Vec(exuParameters.LsExuCnt, Output(new SqPtr)) 51} 52 53class DataBufferEntry (implicit p: Parameters) extends DCacheBundle { 54 val addr = UInt(PAddrBits.W) 55 val vaddr = UInt(VAddrBits.W) 56 val data = UInt(DataBits.W) 57 val mask = UInt((DataBits/8).W) 58 val wline = Bool() 59 val sqPtr = new SqPtr 60} 61 62// Store Queue 63class StoreQueue(implicit p: Parameters) extends XSModule with HasDCacheParameters with HasCircularQueuePtrHelper { 64 val io = IO(new Bundle() { 65 val hartId = Input(UInt(8.W)) 66 val enq = new SqEnqIO 67 val brqRedirect = Flipped(ValidIO(new Redirect)) 68 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) // store addr, data is not included 69 val storeInRe = Vec(StorePipelineWidth, Input(new LsPipelineBundle())) // store more mmio and exception 70 val storeDataIn = Vec(StorePipelineWidth, Flipped(Valid(new StoreDataBundle))) // store data, send to sq from rs 71 val sbuffer = Vec(StorePipelineWidth, Decoupled(new DCacheWordReqWithVaddr)) // write commited store to sbuffer 72 val mmioStout = DecoupledIO(new ExuOutput) // writeback uncached store 73 val forward = Vec(LoadPipelineWidth, Flipped(new PipeLoadForwardQueryIO)) 74 val rob = Flipped(new RobLsqIO) 75 val uncache = new DCacheWordIO 76 // val refill = Flipped(Valid(new DCacheLineReq )) 77 val exceptionAddr = new ExceptionAddrIO 78 val sqempty = Output(Bool()) 79 val issuePtrExt = Output(new SqPtr) // used to wake up delayed load/store 80 val sqFull = Output(Bool()) 81 }) 82 83 println("StoreQueue: size:" + StoreQueueSize) 84 85 // data modules 86 val uop = Reg(Vec(StoreQueueSize, new MicroOp)) 87 // val data = Reg(Vec(StoreQueueSize, new LsqEntry)) 88 val dataModule = Module(new SQDataModule( 89 numEntries = StoreQueueSize, 90 numRead = StorePipelineWidth, 91 numWrite = StorePipelineWidth, 92 numForward = StorePipelineWidth 93 )) 94 dataModule.io := DontCare 95 val paddrModule = Module(new SQAddrModule( 96 dataWidth = PAddrBits, 97 numEntries = StoreQueueSize, 98 numRead = StorePipelineWidth, 99 numWrite = StorePipelineWidth, 100 numForward = StorePipelineWidth 101 )) 102 paddrModule.io := DontCare 103 val vaddrModule = Module(new SQAddrModule( 104 dataWidth = VAddrBits, 105 numEntries = StoreQueueSize, 106 numRead = StorePipelineWidth + 1, // sbuffer 2 + badvaddr 1 (TODO) 107 numWrite = StorePipelineWidth, 108 numForward = StorePipelineWidth 109 )) 110 vaddrModule.io := DontCare 111 val dataBuffer = Module(new DatamoduleResultBuffer(new DataBufferEntry)) 112 val debug_paddr = Reg(Vec(StoreQueueSize, UInt((PAddrBits).W))) 113 val debug_vaddr = Reg(Vec(StoreQueueSize, UInt((VAddrBits).W))) 114 val debug_data = Reg(Vec(StoreQueueSize, UInt((XLEN).W))) 115 116 // state & misc 117 val allocated = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // sq entry has been allocated 118 val addrvalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio addr is valid 119 val datavalid = RegInit(VecInit(List.fill(StoreQueueSize)(false.B))) // non-mmio data is valid 120 val allvalid = VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i))) // non-mmio data & addr is valid 121 val commited = Reg(Vec(StoreQueueSize, Bool())) // inst has been commited by rob 122 val pending = Reg(Vec(StoreQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of rob 123 val mmio = Reg(Vec(StoreQueueSize, Bool())) // mmio: inst is an mmio inst 124 125 // ptr 126 val enqPtrExt = RegInit(VecInit((0 until io.enq.req.length).map(_.U.asTypeOf(new SqPtr)))) 127 val rdataPtrExt = RegInit(VecInit((0 until StorePipelineWidth).map(_.U.asTypeOf(new SqPtr)))) 128 val deqPtrExt = RegInit(VecInit((0 until StorePipelineWidth).map(_.U.asTypeOf(new SqPtr)))) 129 val cmtPtrExt = RegInit(VecInit((0 until CommitWidth).map(_.U.asTypeOf(new SqPtr)))) 130 val issuePtrExt = RegInit(0.U.asTypeOf(new SqPtr)) 131 val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W)) 132 val allowEnqueue = RegInit(true.B) 133 134 val enqPtr = enqPtrExt(0).value 135 val deqPtr = deqPtrExt(0).value 136 val cmtPtr = cmtPtrExt(0).value 137 138 val deqMask = UIntToMask(deqPtr, StoreQueueSize) 139 val enqMask = UIntToMask(enqPtr, StoreQueueSize) 140 141 val commitCount = RegNext(io.rob.scommit) 142 143 // Read dataModule 144 // rdataPtrExtNext and rdataPtrExtNext+1 entry will be read from dataModule 145 val rdataPtrExtNext = WireInit(Mux(dataBuffer.io.enq(1).fire(), 146 VecInit(rdataPtrExt.map(_ + 2.U)), 147 Mux(dataBuffer.io.enq(0).fire() || io.mmioStout.fire(), 148 VecInit(rdataPtrExt.map(_ + 1.U)), 149 rdataPtrExt 150 ) 151 )) 152 // deqPtrExtNext traces which inst is about to leave store queue 153 val deqPtrExtNext = WireInit(Mux(io.sbuffer(1).fire(), 154 VecInit(deqPtrExt.map(_ + 2.U)), 155 Mux(io.sbuffer(0).fire() || io.mmioStout.fire(), 156 VecInit(deqPtrExt.map(_ + 1.U)), 157 deqPtrExt 158 ) 159 )) 160 for (i <- 0 until StorePipelineWidth) { 161 dataModule.io.raddr(i) := rdataPtrExtNext(i).value 162 paddrModule.io.raddr(i) := rdataPtrExtNext(i).value 163 vaddrModule.io.raddr(i) := rdataPtrExtNext(i).value 164 } 165 166 // no inst will be commited 1 cycle before tval update 167 vaddrModule.io.raddr(StorePipelineWidth) := (cmtPtrExt(0) + commitCount).value 168 169 /** 170 * Enqueue at dispatch 171 * 172 * Currently, StoreQueue only allows enqueue when #emptyEntries > EnqWidth 173 */ 174 io.enq.canAccept := allowEnqueue 175 for (i <- 0 until io.enq.req.length) { 176 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 177 val sqIdx = enqPtrExt(offset) 178 val index = sqIdx.value 179 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid) { 180 uop(index) := io.enq.req(i).bits 181 allocated(index) := true.B 182 datavalid(index) := false.B 183 addrvalid(index) := false.B 184 commited(index) := false.B 185 pending(index) := false.B 186 } 187 io.enq.resp(i) := sqIdx 188 } 189 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 190 191 /** 192 * Update issuePtr when issue from rs 193 */ 194 // update issuePtr 195 val IssuePtrMoveStride = 4 196 require(IssuePtrMoveStride >= 2) 197 198 val issueLookupVec = (0 until IssuePtrMoveStride).map(issuePtrExt + _.U) 199 val issueLookup = issueLookupVec.map(ptr => allocated(ptr.value) && addrvalid(ptr.value) && datavalid(ptr.value) && ptr =/= enqPtrExt(0)) 200 val nextIssuePtr = issuePtrExt + PriorityEncoder(VecInit(issueLookup.map(!_) :+ true.B)) 201 issuePtrExt := nextIssuePtr 202 203 when (io.brqRedirect.valid) { 204 issuePtrExt := Mux( 205 isAfter(cmtPtrExt(0), deqPtrExt(0)), 206 cmtPtrExt(0), 207 deqPtrExtNext(0) // for mmio insts, deqPtr may be ahead of cmtPtr 208 ) 209 } 210 // send issuePtrExt to rs 211 // io.issuePtrExt := cmtPtrExt(0) 212 io.issuePtrExt := issuePtrExt 213 214 /** 215 * Writeback store from store units 216 * 217 * Most store instructions writeback to regfile in the previous cycle. 218 * However, 219 * (1) For an mmio instruction with exceptions, we need to mark it as addrvalid 220 * (in this way it will trigger an exception when it reaches ROB's head) 221 * instead of pending to avoid sending them to lower level. 222 * (2) For an mmio instruction without exceptions, we mark it as pending. 223 * When the instruction reaches ROB's head, StoreQueue sends it to uncache channel. 224 * Upon receiving the response, StoreQueue writes back the instruction 225 * through arbiter with store units. It will later commit as normal. 226 */ 227 228 // Write addr to sq 229 for (i <- 0 until StorePipelineWidth) { 230 paddrModule.io.wen(i) := false.B 231 vaddrModule.io.wen(i) := false.B 232 dataModule.io.mask.wen(i) := false.B 233 val stWbIndex = io.storeIn(i).bits.uop.sqIdx.value 234 when (io.storeIn(i).fire()) { 235 val addr_valid = !io.storeIn(i).bits.miss 236 addrvalid(stWbIndex) := addr_valid //!io.storeIn(i).bits.mmio 237 // pending(stWbIndex) := io.storeIn(i).bits.mmio 238 239 dataModule.io.mask.waddr(i) := stWbIndex 240 dataModule.io.mask.wdata(i) := io.storeIn(i).bits.mask 241 dataModule.io.mask.wen(i) := addr_valid 242 243 paddrModule.io.waddr(i) := stWbIndex 244 paddrModule.io.wdata(i) := io.storeIn(i).bits.paddr 245 paddrModule.io.wlineflag(i) := io.storeIn(i).bits.wlineflag 246 paddrModule.io.wen(i) := addr_valid 247 248 vaddrModule.io.waddr(i) := stWbIndex 249 vaddrModule.io.wdata(i) := io.storeIn(i).bits.vaddr 250 vaddrModule.io.wlineflag(i) := io.storeIn(i).bits.wlineflag 251 vaddrModule.io.wen(i) := addr_valid 252 253 debug_paddr(paddrModule.io.waddr(i)) := paddrModule.io.wdata(i) 254 255 // mmio(stWbIndex) := io.storeIn(i).bits.mmio 256 257 uop(stWbIndex).debugInfo := io.storeIn(i).bits.uop.debugInfo 258 XSInfo("store addr write to sq idx %d pc 0x%x miss:%d vaddr %x paddr %x mmio %x\n", 259 io.storeIn(i).bits.uop.sqIdx.value, 260 io.storeIn(i).bits.uop.cf.pc, 261 io.storeIn(i).bits.miss, 262 io.storeIn(i).bits.vaddr, 263 io.storeIn(i).bits.paddr, 264 io.storeIn(i).bits.mmio 265 ) 266 } 267 268 // re-replinish mmio, for pma/pmp will get mmio one cycle later 269 val storeInFireReg = RegNext(io.storeIn(i).fire() && !io.storeIn(i).bits.miss) 270 val stWbIndexReg = RegNext(stWbIndex) 271 when (storeInFireReg) { 272 pending(stWbIndexReg) := io.storeInRe(i).mmio 273 mmio(stWbIndexReg) := io.storeInRe(i).mmio 274 } 275 276 when(vaddrModule.io.wen(i)){ 277 debug_vaddr(vaddrModule.io.waddr(i)) := vaddrModule.io.wdata(i) 278 } 279 } 280 281 // Write data to sq 282 for (i <- 0 until StorePipelineWidth) { 283 dataModule.io.data.wen(i) := false.B 284 val stWbIndex = io.storeDataIn(i).bits.uop.sqIdx.value 285 when (io.storeDataIn(i).fire()) { 286 datavalid(stWbIndex) := true.B 287 288 dataModule.io.data.waddr(i) := stWbIndex 289 dataModule.io.data.wdata(i) := Mux(io.storeDataIn(i).bits.uop.ctrl.fuOpType === LSUOpType.cbo_zero, 290 0.U, 291 genWdata(io.storeDataIn(i).bits.data, io.storeDataIn(i).bits.uop.ctrl.fuOpType(1,0)) 292 ) 293 dataModule.io.data.wen(i) := true.B 294 295 debug_data(dataModule.io.data.waddr(i)) := dataModule.io.data.wdata(i) 296 297 XSInfo("store data write to sq idx %d pc 0x%x data %x -> %x\n", 298 io.storeDataIn(i).bits.uop.sqIdx.value, 299 io.storeDataIn(i).bits.uop.cf.pc, 300 io.storeDataIn(i).bits.data, 301 dataModule.io.data.wdata(i) 302 ) 303 } 304 io.rob.storeDataRobWb(i).valid := RegNext(io.storeDataIn(i).fire()) 305 io.rob.storeDataRobWb(i).bits := RegNext(io.storeDataIn(i).bits.uop.robIdx) 306 } 307 308 /** 309 * load forward query 310 * 311 * Check store queue for instructions that is older than the load. 312 * The response will be valid at the next cycle after req. 313 */ 314 // check over all lq entries and forward data from the first matched store 315 for (i <- 0 until LoadPipelineWidth) { 316 // Compare deqPtr (deqPtr) and forward.sqIdx, we have two cases: 317 // (1) if they have the same flag, we need to check range(tail, sqIdx) 318 // (2) if they have different flags, we need to check range(tail, LoadQueueSize) and range(0, sqIdx) 319 // Forward1: Mux(same_flag, range(tail, sqIdx), range(tail, LoadQueueSize)) 320 // Forward2: Mux(same_flag, 0.U, range(0, sqIdx) ) 321 // i.e. forward1 is the target entries with the same flag bits and forward2 otherwise 322 val differentFlag = deqPtrExt(0).flag =/= io.forward(i).sqIdx.flag 323 val forwardMask = io.forward(i).sqIdxMask 324 // all addrvalid terms need to be checked 325 val addrValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && allocated(i)))) 326 val dataValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => datavalid(i)))) 327 val allValidVec = WireInit(VecInit((0 until StoreQueueSize).map(i => addrvalid(i) && datavalid(i) && allocated(i)))) 328 val canForward1 = Mux(differentFlag, ~deqMask, deqMask ^ forwardMask) & allValidVec.asUInt 329 val canForward2 = Mux(differentFlag, forwardMask, 0.U(StoreQueueSize.W)) & allValidVec.asUInt 330 val needForward = Mux(differentFlag, ~deqMask | forwardMask, deqMask ^ forwardMask) 331 332 XSDebug(p"$i f1 ${Binary(canForward1)} f2 ${Binary(canForward2)} " + 333 p"sqIdx ${io.forward(i).sqIdx} pa ${Hexadecimal(io.forward(i).paddr)}\n" 334 ) 335 336 // do real fwd query (cam lookup in load_s1) 337 dataModule.io.needForward(i)(0) := canForward1 & vaddrModule.io.forwardMmask(i).asUInt 338 dataModule.io.needForward(i)(1) := canForward2 & vaddrModule.io.forwardMmask(i).asUInt 339 340 vaddrModule.io.forwardMdata(i) := io.forward(i).vaddr 341 paddrModule.io.forwardMdata(i) := io.forward(i).paddr 342 343 // vaddr cam result does not equal to paddr cam result 344 // replay needed 345 // val vpmaskNotEqual = ((paddrModule.io.forwardMmask(i).asUInt ^ vaddrModule.io.forwardMmask(i).asUInt) & needForward) =/= 0.U 346 // val vaddrMatchFailed = vpmaskNotEqual && io.forward(i).valid 347 val vpmaskNotEqual = ( 348 (RegNext(paddrModule.io.forwardMmask(i).asUInt) ^ RegNext(vaddrModule.io.forwardMmask(i).asUInt)) & 349 RegNext(needForward) & 350 RegNext(addrValidVec.asUInt) 351 ) =/= 0.U 352 val vaddrMatchFailed = vpmaskNotEqual && RegNext(io.forward(i).valid) 353 when (vaddrMatchFailed) { 354 XSInfo("vaddrMatchFailed: pc %x pmask %x vmask %x\n", 355 RegNext(io.forward(i).uop.cf.pc), 356 RegNext(needForward & paddrModule.io.forwardMmask(i).asUInt), 357 RegNext(needForward & vaddrModule.io.forwardMmask(i).asUInt) 358 ); 359 } 360 XSPerfAccumulate("vaddr_match_failed", vpmaskNotEqual) 361 XSPerfAccumulate("vaddr_match_really_failed", vaddrMatchFailed) 362 363 // Fast forward mask will be generated immediately (load_s1) 364 io.forward(i).forwardMaskFast := dataModule.io.forwardMaskFast(i) 365 366 // Forward result will be generated 1 cycle later (load_s2) 367 io.forward(i).forwardMask := dataModule.io.forwardMask(i) 368 io.forward(i).forwardData := dataModule.io.forwardData(i) 369 370 // If addr match, data not ready, mark it as dataInvalid 371 // load_s1: generate dataInvalid in load_s1 to set fastUop 372 io.forward(i).dataInvalidFast := (addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & needForward).orR 373 val dataInvalidSqIdxReg = RegNext(OHToUInt(addrValidVec.asUInt & ~dataValidVec.asUInt & vaddrModule.io.forwardMmask(i).asUInt & needForward)) 374 // load_s2 375 io.forward(i).dataInvalid := RegNext(io.forward(i).dataInvalidFast) 376 377 // load_s2 378 // check if vaddr forward mismatched 379 io.forward(i).matchInvalid := vaddrMatchFailed 380 io.forward(i).dataInvalidSqIdx := dataInvalidSqIdxReg 381 } 382 383 /** 384 * Memory mapped IO / other uncached operations 385 * 386 * States: 387 * (1) writeback from store units: mark as pending 388 * (2) when they reach ROB's head, they can be sent to uncache channel 389 * (3) response from uncache channel: mark as datavalidmask.wen 390 * (4) writeback to ROB (and other units): mark as writebacked 391 * (5) ROB commits the instruction: same as normal instructions 392 */ 393 //(2) when they reach ROB's head, they can be sent to uncache channel 394 val s_idle :: s_req :: s_resp :: s_wb :: s_wait :: Nil = Enum(5) 395 val uncacheState = RegInit(s_idle) 396 switch(uncacheState) { 397 is(s_idle) { 398 when(RegNext(io.rob.pendingst) && pending(deqPtr) && allocated(deqPtr) && datavalid(deqPtr) && addrvalid(deqPtr)) { 399 uncacheState := s_req 400 } 401 } 402 is(s_req) { 403 when(io.uncache.req.fire()) { 404 uncacheState := s_resp 405 } 406 } 407 is(s_resp) { 408 when(io.uncache.resp.fire()) { 409 uncacheState := s_wb 410 } 411 } 412 is(s_wb) { 413 when (io.mmioStout.fire()) { 414 uncacheState := s_wait 415 } 416 } 417 is(s_wait) { 418 when(commitCount > 0.U) { 419 uncacheState := s_idle // ready for next mmio 420 } 421 } 422 } 423 io.uncache.req.valid := uncacheState === s_req 424 425 io.uncache.req.bits.cmd := MemoryOpConstants.M_XWR 426 io.uncache.req.bits.addr := paddrModule.io.rdata(0) // data(deqPtr) -> rdata(0) 427 io.uncache.req.bits.data := dataModule.io.rdata(0).data 428 io.uncache.req.bits.mask := dataModule.io.rdata(0).mask 429 430 // CBO op type check can be delayed for 1 cycle, 431 // as uncache op will not start in s_idle 432 val cbo_mmio_addr = paddrModule.io.rdata(0) >> 2 << 2 // clear lowest 2 bits for op 433 val cbo_mmio_op = 0.U //TODO 434 val cbo_mmio_data = cbo_mmio_addr | cbo_mmio_op 435 when(RegNext(LSUOpType.isCbo(uop(deqPtr).ctrl.fuOpType))){ 436 io.uncache.req.bits.addr := DontCare // TODO 437 io.uncache.req.bits.data := paddrModule.io.rdata(0) 438 io.uncache.req.bits.mask := DontCare // TODO 439 } 440 441 io.uncache.req.bits.id := DontCare 442 io.uncache.req.bits.instrtype := DontCare 443 444 when(io.uncache.req.fire()){ 445 // mmio store should not be committed until uncache req is sent 446 pending(deqPtr) := false.B 447 448 XSDebug( 449 p"uncache req: pc ${Hexadecimal(uop(deqPtr).cf.pc)} " + 450 p"addr ${Hexadecimal(io.uncache.req.bits.addr)} " + 451 p"data ${Hexadecimal(io.uncache.req.bits.data)} " + 452 p"op ${Hexadecimal(io.uncache.req.bits.cmd)} " + 453 p"mask ${Hexadecimal(io.uncache.req.bits.mask)}\n" 454 ) 455 } 456 457 // (3) response from uncache channel: mark as datavalid 458 io.uncache.resp.ready := true.B 459 460 // (4) writeback to ROB (and other units): mark as writebacked 461 io.mmioStout.valid := uncacheState === s_wb 462 io.mmioStout.bits.uop := uop(deqPtr) 463 io.mmioStout.bits.uop.sqIdx := deqPtrExt(0) 464 io.mmioStout.bits.data := dataModule.io.rdata(0).data // dataModule.io.rdata.read(deqPtr) 465 io.mmioStout.bits.redirectValid := false.B 466 io.mmioStout.bits.redirect := DontCare 467 io.mmioStout.bits.debug.isMMIO := true.B 468 io.mmioStout.bits.debug.paddr := DontCare 469 io.mmioStout.bits.debug.isPerfCnt := false.B 470 io.mmioStout.bits.fflags := DontCare 471 io.mmioStout.bits.debug.vaddr := DontCare 472 // Remove MMIO inst from store queue after MMIO request is being sent 473 // That inst will be traced by uncache state machine 474 when (io.mmioStout.fire()) { 475 allocated(deqPtr) := false.B 476 } 477 478 /** 479 * ROB commits store instructions (mark them as commited) 480 * 481 * (1) When store commits, mark it as commited. 482 * (2) They will not be cancelled and can be sent to lower level. 483 */ 484 XSError(uncacheState =/= s_idle && uncacheState =/= s_wait && commitCount > 0.U, 485 "should not commit instruction when MMIO has not been finished\n") 486 for (i <- 0 until CommitWidth) { 487 when (commitCount > i.U) { // MMIO inst is not in progress 488 if(i == 0){ 489 // MMIO inst should not update commited flag 490 // Note that commit count has been delayed for 1 cycle 491 when(uncacheState === s_idle){ 492 commited(cmtPtrExt(0).value) := true.B 493 } 494 } else { 495 commited(cmtPtrExt(i).value) := true.B 496 } 497 } 498 } 499 cmtPtrExt := cmtPtrExt.map(_ + commitCount) 500 501 // Commited stores will not be cancelled and can be sent to lower level. 502 // remove retired insts from sq, add retired store to sbuffer 503 504 // Read data from data module 505 // As store queue grows larger and larger, time needed to read data from data 506 // module keeps growing higher. Now we give data read a whole cycle. 507 508 // For now, data read logic width is hardcoded to 2 509 require(StorePipelineWidth == 2) // TODO: add EnsbufferWidth parameter 510 val mmioStall = mmio(rdataPtrExt(0).value) 511 for (i <- 0 until StorePipelineWidth) { 512 val ptr = rdataPtrExt(i).value 513 dataBuffer.io.enq(i).valid := allocated(ptr) && commited(ptr) && !mmioStall 514 // Note that store data/addr should both be valid after store's commit 515 assert(!dataBuffer.io.enq(i).valid || allvalid(ptr)) 516 dataBuffer.io.enq(i).bits.addr := paddrModule.io.rdata(i) 517 dataBuffer.io.enq(i).bits.vaddr := vaddrModule.io.rdata(i) 518 dataBuffer.io.enq(i).bits.data := dataModule.io.rdata(i).data 519 dataBuffer.io.enq(i).bits.mask := dataModule.io.rdata(i).mask 520 dataBuffer.io.enq(i).bits.wline := paddrModule.io.rlineflag(i) 521 dataBuffer.io.enq(i).bits.sqPtr := rdataPtrExt(i) 522 } 523 524 // Send data stored in sbufferReqBitsReg to sbuffer 525 for (i <- 0 until StorePipelineWidth) { 526 io.sbuffer(i).valid := dataBuffer.io.deq(i).valid 527 dataBuffer.io.deq(i).ready := io.sbuffer(i).ready 528 // Write line request should have all 1 mask 529 assert(!(io.sbuffer(i).valid && io.sbuffer(i).bits.wline && !io.sbuffer(i).bits.mask.andR)) 530 io.sbuffer(i).bits.cmd := MemoryOpConstants.M_XWR 531 io.sbuffer(i).bits.addr := dataBuffer.io.deq(i).bits.addr 532 io.sbuffer(i).bits.vaddr := dataBuffer.io.deq(i).bits.vaddr 533 io.sbuffer(i).bits.data := dataBuffer.io.deq(i).bits.data 534 io.sbuffer(i).bits.mask := dataBuffer.io.deq(i).bits.mask 535 io.sbuffer(i).bits.wline := dataBuffer.io.deq(i).bits.wline 536 io.sbuffer(i).bits.id := DontCare 537 io.sbuffer(i).bits.instrtype := DontCare 538 539 val ptr = dataBuffer.io.deq(i).bits.sqPtr.value 540 when (io.sbuffer(i).fire()) { 541 allocated(ptr) := false.B 542 XSDebug("sbuffer "+i+" fire: ptr %d\n", ptr) 543 } 544 } 545 when (io.sbuffer(1).fire()) { 546 assert(io.sbuffer(0).fire()) 547 } 548 if (coreParams.dcacheParametersOpt.isEmpty) { 549 for (i <- 0 until StorePipelineWidth) { 550 val ptr = deqPtrExt(i).value 551 val fakeRAM = Module(new RAMHelper(64L * 1024 * 1024 * 1024)) 552 fakeRAM.clk := clock 553 fakeRAM.en := allocated(ptr) && commited(ptr) && !mmio(ptr) 554 fakeRAM.rIdx := 0.U 555 fakeRAM.wIdx := (paddrModule.io.rdata(i) - "h80000000".U) >> 3 556 fakeRAM.wdata := dataModule.io.rdata(i).data 557 fakeRAM.wmask := MaskExpand(dataModule.io.rdata(i).mask) 558 fakeRAM.wen := allocated(ptr) && commited(ptr) && !mmio(ptr) 559 } 560 } 561 562 if (env.EnableDifftest) { 563 for (i <- 0 until StorePipelineWidth) { 564 val storeCommit = io.sbuffer(i).fire() 565 val waddr = SignExt(io.sbuffer(i).bits.addr, 64) 566 val wdata = io.sbuffer(i).bits.data & MaskExpand(io.sbuffer(i).bits.mask) 567 val wmask = io.sbuffer(i).bits.mask 568 569 val difftest = Module(new DifftestStoreEvent) 570 difftest.io.clock := clock 571 difftest.io.coreid := io.hartId 572 difftest.io.index := i.U 573 difftest.io.valid := storeCommit 574 difftest.io.storeAddr := waddr 575 difftest.io.storeData := wdata 576 difftest.io.storeMask := wmask 577 } 578 } 579 580 // Read vaddr for mem exception 581 io.exceptionAddr.vaddr := vaddrModule.io.rdata(StorePipelineWidth) 582 583 // misprediction recovery / exception redirect 584 // invalidate sq term using robIdx 585 val needCancel = Wire(Vec(StoreQueueSize, Bool())) 586 for (i <- 0 until StoreQueueSize) { 587 needCancel(i) := uop(i).robIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 588 when (needCancel(i)) { 589 allocated(i) := false.B 590 } 591 } 592 593 /** 594 * update pointers 595 */ 596 val lastCycleRedirect = RegNext(io.brqRedirect.valid) 597 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 598 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 599 val enqNumber = Mux(io.enq.canAccept && io.enq.lqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U) 600 when (lastCycleRedirect) { 601 // we recover the pointers in the next cycle after redirect 602 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 603 }.otherwise { 604 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 605 } 606 607 deqPtrExt := deqPtrExtNext 608 rdataPtrExt := rdataPtrExtNext 609 610 val dequeueCount = Mux(io.sbuffer(1).fire(), 2.U, Mux(io.sbuffer(0).fire() || io.mmioStout.fire(), 1.U, 0.U)) 611 val validCount = distanceBetween(enqPtrExt(0), deqPtrExt(0)) 612 613 allowEnqueue := validCount + enqNumber <= (StoreQueueSize - io.enq.req.length).U 614 615 // io.sqempty will be used by sbuffer 616 // We delay it for 1 cycle for better timing 617 // When sbuffer need to check if it is empty, the pipeline is blocked, which means delay io.sqempty 618 // for 1 cycle will also promise that sq is empty in that cycle 619 io.sqempty := RegNext( 620 enqPtrExt(0).value === deqPtrExt(0).value && 621 enqPtrExt(0).flag === deqPtrExt(0).flag 622 ) 623 624 // perf counter 625 QueuePerf(StoreQueueSize, validCount, !allowEnqueue) 626 io.sqFull := !allowEnqueue 627 XSPerfAccumulate("mmioCycle", uncacheState =/= s_idle) // lq is busy dealing with uncache req 628 XSPerfAccumulate("mmioCnt", io.uncache.req.fire()) 629 XSPerfAccumulate("mmio_wb_success", io.mmioStout.fire()) 630 XSPerfAccumulate("mmio_wb_blocked", io.mmioStout.valid && !io.mmioStout.ready) 631 XSPerfAccumulate("validEntryCnt", distanceBetween(enqPtrExt(0), deqPtrExt(0))) 632 XSPerfAccumulate("cmtEntryCnt", distanceBetween(cmtPtrExt(0), deqPtrExt(0))) 633 XSPerfAccumulate("nCmtEntryCnt", distanceBetween(enqPtrExt(0), cmtPtrExt(0))) 634 635 val perfinfo = IO(new Bundle(){ 636 val perfEvents = Output(new PerfEventsBundle(8)) 637 }) 638 val perfEvents = Seq( 639 ("mmioCycle ", uncacheState =/= s_idle ), 640 ("mmioCnt ", io.uncache.req.fire() ), 641 ("mmio_wb_success ", io.mmioStout.fire() ), 642 ("mmio_wb_blocked ", io.mmioStout.valid && !io.mmioStout.ready ), 643 ("stq_1/4_valid ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) < (StoreQueueSize.U/4.U)) ), 644 ("stq_2/4_valid ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U/4.U)) & (distanceBetween(enqPtrExt(0), deqPtrExt(0)) <= (StoreQueueSize.U/2.U)) ), 645 ("stq_3/4_valid ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U/2.U)) & (distanceBetween(enqPtrExt(0), deqPtrExt(0)) <= (StoreQueueSize.U*3.U/4.U))), 646 ("stq_4/4_valid ", (distanceBetween(enqPtrExt(0), deqPtrExt(0)) > (StoreQueueSize.U*3.U/4.U)) ), 647 ) 648 649 for (((perf_out,(perf_name,perf)),i) <- perfinfo.perfEvents.perf_events.zip(perfEvents).zipWithIndex) { 650 perf_out.incr_step := RegNext(perf) 651 } 652 // debug info 653 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt(0).flag, deqPtr) 654 655 def PrintFlag(flag: Bool, name: String): Unit = { 656 when(flag) { 657 XSDebug(false, true.B, name) 658 }.otherwise { 659 XSDebug(false, true.B, " ") 660 } 661 } 662 663 for (i <- 0 until StoreQueueSize) { 664 XSDebug(i + ": pc %x va %x pa %x data %x ", 665 uop(i).cf.pc, 666 debug_vaddr(i), 667 debug_paddr(i), 668 debug_data(i) 669 ) 670 PrintFlag(allocated(i), "a") 671 PrintFlag(allocated(i) && addrvalid(i), "a") 672 PrintFlag(allocated(i) && datavalid(i), "d") 673 PrintFlag(allocated(i) && commited(i), "c") 674 PrintFlag(allocated(i) && pending(i), "p") 675 PrintFlag(allocated(i) && mmio(i), "m") 676 XSDebug(false, true.B, "\n") 677 } 678 679} 680