1package xiangshan.mem 2 3import chisel3._ 4import chisel3.util._ 5import freechips.rocketchip.tile.HasFPUParameters 6import utils._ 7import xiangshan._ 8import xiangshan.cache._ 9import xiangshan.cache.{DCacheLineIO, DCacheWordIO, MemoryOpConstants, TlbRequestIO} 10import xiangshan.backend.LSUOpType 11import xiangshan.mem._ 12import xiangshan.backend.roq.RoqPtr 13 14 15class LqPtr extends CircularQueuePtr(LqPtr.LoadQueueSize) { } 16 17object LqPtr extends HasXSParameter { 18 def apply(f: Bool, v: UInt): LqPtr = { 19 val ptr = Wire(new LqPtr) 20 ptr.flag := f 21 ptr.value := v 22 ptr 23 } 24} 25 26trait HasLoadHelper { this: XSModule => 27 def rdataHelper(uop: MicroOp, rdata: UInt): UInt = { 28 val fpWen = uop.ctrl.fpWen 29 LookupTree(uop.ctrl.fuOpType, List( 30 LSUOpType.lb -> SignExt(rdata(7, 0) , XLEN), 31 LSUOpType.lh -> SignExt(rdata(15, 0), XLEN), 32 LSUOpType.lw -> Mux(fpWen, rdata, SignExt(rdata(31, 0), XLEN)), 33 LSUOpType.ld -> Mux(fpWen, rdata, SignExt(rdata(63, 0), XLEN)), 34 LSUOpType.lbu -> ZeroExt(rdata(7, 0) , XLEN), 35 LSUOpType.lhu -> ZeroExt(rdata(15, 0), XLEN), 36 LSUOpType.lwu -> ZeroExt(rdata(31, 0), XLEN), 37 )) 38 } 39 40 def fpRdataHelper(uop: MicroOp, rdata: UInt): UInt = { 41 LookupTree(uop.ctrl.fuOpType, List( 42 LSUOpType.lw -> recode(rdata(31, 0), S), 43 LSUOpType.ld -> recode(rdata(63, 0), D) 44 )) 45 } 46} 47 48class LqEnqIO extends XSBundle { 49 val canAccept = Output(Bool()) 50 val sqCanAccept = Input(Bool()) 51 val needAlloc = Vec(RenameWidth, Input(Bool())) 52 val req = Vec(RenameWidth, Flipped(ValidIO(new MicroOp))) 53 val resp = Vec(RenameWidth, Output(new LqPtr)) 54} 55 56// Load Queue 57class LoadQueue extends XSModule 58 with HasDCacheParameters 59 with HasCircularQueuePtrHelper 60 with HasLoadHelper 61{ 62 val io = IO(new Bundle() { 63 val enq = new LqEnqIO 64 val brqRedirect = Input(Valid(new Redirect)) 65 val loadIn = Vec(LoadPipelineWidth, Flipped(Valid(new LsPipelineBundle))) 66 val storeIn = Vec(StorePipelineWidth, Flipped(Valid(new LsPipelineBundle))) 67 val ldout = Vec(2, DecoupledIO(new ExuOutput)) // writeback int load 68 val load_s1 = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 69 val commits = Flipped(new RoqCommitIO) 70 val rollback = Output(Valid(new Redirect)) // replay now starts from load instead of store 71 val dcache = Flipped(ValidIO(new Refill)) 72 val uncache = new DCacheWordIO 73 val roqDeqPtr = Input(new RoqPtr) 74 val exceptionAddr = new ExceptionAddrIO 75 }) 76 77 val uop = Reg(Vec(LoadQueueSize, new MicroOp)) 78 // val data = Reg(Vec(LoadQueueSize, new LsRoqEntry)) 79 val dataModule = Module(new LSQueueData(LoadQueueSize, LoadPipelineWidth)) 80 dataModule.io := DontCare 81 val allocated = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // lq entry has been allocated 82 val datavalid = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // data is valid 83 val writebacked = RegInit(VecInit(List.fill(LoadQueueSize)(false.B))) // inst has been writebacked to CDB 84 val commited = Reg(Vec(LoadQueueSize, Bool())) // inst has been writebacked to CDB 85 val miss = Reg(Vec(LoadQueueSize, Bool())) // load inst missed, waiting for miss queue to accept miss request 86 // val listening = Reg(Vec(LoadQueueSize, Bool())) // waiting for refill result 87 val pending = Reg(Vec(LoadQueueSize, Bool())) // mmio pending: inst is an mmio inst, it will not be executed until it reachs the end of roq 88 89 val debug_mmio = Reg(Vec(LoadQueueSize, Bool())) // mmio: inst is an mmio inst 90 91 val enqPtrExt = RegInit(VecInit((0 until RenameWidth).map(_.U.asTypeOf(new LqPtr)))) 92 val deqPtrExt = RegInit(0.U.asTypeOf(new LqPtr)) 93 val validCounter = RegInit(0.U(log2Ceil(LoadQueueSize + 1).W)) 94 val allowEnqueue = RegInit(true.B) 95 96 val enqPtr = enqPtrExt(0).value 97 val deqPtr = deqPtrExt.value 98 val sameFlag = enqPtrExt(0).flag === deqPtrExt.flag 99 val isEmpty = enqPtr === deqPtr && sameFlag 100 val isFull = enqPtr === deqPtr && !sameFlag 101 val allowIn = !isFull 102 103 val loadCommit = (0 until CommitWidth).map(i => io.commits.valid(i) && !io.commits.isWalk && io.commits.info(i).commitType === CommitType.LOAD) 104 val mcommitIdx = (0 until CommitWidth).map(i => io.commits.info(i).lqIdx.value) 105 106 val deqMask = UIntToMask(deqPtr, LoadQueueSize) 107 val enqMask = UIntToMask(enqPtr, LoadQueueSize) 108 109 /** 110 * Enqueue at dispatch 111 * 112 * Currently, LoadQueue only allows enqueue when #emptyEntries > RenameWidth(EnqWidth) 113 */ 114 io.enq.canAccept := allowEnqueue 115 116 for (i <- 0 until RenameWidth) { 117 val offset = if (i == 0) 0.U else PopCount(io.enq.needAlloc.take(i)) 118 val lqIdx = enqPtrExt(offset) 119 val index = lqIdx.value 120 when (io.enq.req(i).valid && io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid) { 121 uop(index) := io.enq.req(i).bits 122 allocated(index) := true.B 123 datavalid(index) := false.B 124 writebacked(index) := false.B 125 commited(index) := false.B 126 miss(index) := false.B 127 // listening(index) := false.B 128 pending(index) := false.B 129 } 130 io.enq.resp(i) := lqIdx 131 } 132 XSDebug(p"(ready, valid): ${io.enq.canAccept}, ${Binary(Cat(io.enq.req.map(_.valid)))}\n") 133 134 /** 135 * Writeback load from load units 136 * 137 * Most load instructions writeback to regfile at the same time. 138 * However, 139 * (1) For an mmio instruction with exceptions, it writes back to ROB immediately. 140 * (2) For an mmio instruction without exceptions, it does not write back. 141 * The mmio instruction will be sent to lower level when it reaches ROB's head. 142 * After uncache response, it will write back through arbiter with loadUnit. 143 * (3) For cache misses, it is marked miss and sent to dcache later. 144 * After cache refills, it will write back through arbiter with loadUnit. 145 */ 146 for (i <- 0 until LoadPipelineWidth) { 147 dataModule.io.wb(i).wen := false.B 148 when(io.loadIn(i).fire()) { 149 when(io.loadIn(i).bits.miss) { 150 XSInfo(io.loadIn(i).valid, "load miss write to lq idx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 151 io.loadIn(i).bits.uop.lqIdx.asUInt, 152 io.loadIn(i).bits.uop.cf.pc, 153 io.loadIn(i).bits.vaddr, 154 io.loadIn(i).bits.paddr, 155 io.loadIn(i).bits.data, 156 io.loadIn(i).bits.mask, 157 io.loadIn(i).bits.forwardData.asUInt, 158 io.loadIn(i).bits.forwardMask.asUInt, 159 io.loadIn(i).bits.mmio, 160 io.loadIn(i).bits.rollback, 161 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 162 ) 163 }.otherwise { 164 XSInfo(io.loadIn(i).valid, "load hit write to cbd lqidx %d pc 0x%x vaddr %x paddr %x data %x mask %x forwardData %x forwardMask: %x mmio %x roll %x exc %x\n", 165 io.loadIn(i).bits.uop.lqIdx.asUInt, 166 io.loadIn(i).bits.uop.cf.pc, 167 io.loadIn(i).bits.vaddr, 168 io.loadIn(i).bits.paddr, 169 io.loadIn(i).bits.data, 170 io.loadIn(i).bits.mask, 171 io.loadIn(i).bits.forwardData.asUInt, 172 io.loadIn(i).bits.forwardMask.asUInt, 173 io.loadIn(i).bits.mmio, 174 io.loadIn(i).bits.rollback, 175 io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 176 ) 177 } 178 val loadWbIndex = io.loadIn(i).bits.uop.lqIdx.value 179 datavalid(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 180 writebacked(loadWbIndex) := !io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 181 182 val loadWbData = Wire(new LsqEntry) 183 loadWbData.paddr := io.loadIn(i).bits.paddr 184 loadWbData.vaddr := io.loadIn(i).bits.vaddr 185 loadWbData.mask := io.loadIn(i).bits.mask 186 loadWbData.data := io.loadIn(i).bits.data // for mmio / misc / debug 187 loadWbData.fwdMask := io.loadIn(i).bits.forwardMask 188 loadWbData.fwdData := io.loadIn(i).bits.forwardData 189 loadWbData.exception := io.loadIn(i).bits.uop.cf.exceptionVec.asUInt 190 dataModule.io.wbWrite(i, loadWbIndex, loadWbData) 191 dataModule.io.wb(i).wen := true.B 192 193 debug_mmio(loadWbIndex) := io.loadIn(i).bits.mmio 194 195 val dcacheMissed = io.loadIn(i).bits.miss && !io.loadIn(i).bits.mmio 196 miss(loadWbIndex) := dcacheMissed && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR 197 // listening(loadWbIndex) := dcacheMissed 198 pending(loadWbIndex) := io.loadIn(i).bits.mmio && !io.loadIn(i).bits.uop.cf.exceptionVec.asUInt.orR 199 } 200 } 201 202 /** 203 * Cache miss request 204 * 205 * (1) writeback: miss 206 * (2) send to dcache: listing 207 * (3) dcache response: datavalid 208 * (4) writeback to ROB: writeback 209 */ 210 // val inflightReqs = RegInit(VecInit(Seq.fill(cfg.nLoadMissEntries)(0.U.asTypeOf(new InflightBlockInfo)))) 211 // val inflightReqFull = inflightReqs.map(req => req.valid).reduce(_&&_) 212 // val reqBlockIndex = PriorityEncoder(~VecInit(inflightReqs.map(req => req.valid)).asUInt) 213 214 // val missRefillSelVec = VecInit( 215 // (0 until LoadQueueSize).map{ i => 216 // val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(dataModule.io.rdata(i).paddr)).reduce(_||_) 217 // allocated(i) && miss(i) && !inflight 218 // }) 219 220 // val missRefillSel = getFirstOne(missRefillSelVec, deqMask) 221 // val missRefillBlockAddr = get_block_addr(dataModule.io.rdata(missRefillSel).paddr) 222 // io.dcache.req.valid := missRefillSelVec.asUInt.orR 223 // io.dcache.req.bits.cmd := MemoryOpConstants.M_XRD 224 // io.dcache.req.bits.addr := missRefillBlockAddr 225 // io.dcache.req.bits.data := DontCare 226 // io.dcache.req.bits.mask := DontCare 227 228 // io.dcache.req.bits.meta.id := DontCare 229 // io.dcache.req.bits.meta.vaddr := DontCare // dataModule.io.rdata(missRefillSel).vaddr 230 // io.dcache.req.bits.meta.paddr := missRefillBlockAddr 231 // io.dcache.req.bits.meta.uop := uop(missRefillSel) 232 // io.dcache.req.bits.meta.mmio := false.B // dataModule.io.rdata(missRefillSel).mmio 233 // io.dcache.req.bits.meta.tlb_miss := false.B 234 // io.dcache.req.bits.meta.mask := DontCare 235 // io.dcache.req.bits.meta.replay := false.B 236 237 // assert(!(dataModule.io.rdata(missRefillSel).mmio && io.dcache.req.valid)) 238 239 // when(io.dcache.req.fire()) { 240 // miss(missRefillSel) := false.B 241 // listening(missRefillSel) := true.B 242 243 // mark this block as inflight 244 // inflightReqs(reqBlockIndex).valid := true.B 245 // inflightReqs(reqBlockIndex).block_addr := missRefillBlockAddr 246 // assert(!inflightReqs(reqBlockIndex).valid) 247 // } 248 249 // when(io.dcache.resp.fire()) { 250 // val inflight = inflightReqs.map(req => req.valid && req.block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)).reduce(_||_) 251 // assert(inflight) 252 // for (i <- 0 until cfg.nLoadMissEntries) { 253 // when (inflightReqs(i).valid && inflightReqs(i).block_addr === get_block_addr(io.dcache.resp.bits.meta.paddr)) { 254 // inflightReqs(i).valid := false.B 255 // } 256 // } 257 // } 258 259 260 // when(io.dcache.req.fire()){ 261 // XSDebug("miss req: pc:0x%x roqIdx:%d lqIdx:%d (p)addr:0x%x vaddr:0x%x\n", 262 // io.dcache.req.bits.meta.uop.cf.pc, io.dcache.req.bits.meta.uop.roqIdx.asUInt, io.dcache.req.bits.meta.uop.lqIdx.asUInt, 263 // io.dcache.req.bits.addr, io.dcache.req.bits.meta.vaddr 264 // ) 265 // } 266 267 when(io.dcache.valid) { 268 XSDebug("miss resp: paddr:0x%x data %x\n", io.dcache.bits.addr, io.dcache.bits.data) 269 } 270 271 // Refill 64 bit in a cycle 272 // Refill data comes back from io.dcache.resp 273 dataModule.io.refill.data := io.dcache.bits.data 274 275 (0 until LoadQueueSize).map(i => { 276 val blockMatch = get_block_addr(dataModule.io.rdata(i).paddr) === get_block_addr(io.dcache.bits.addr) 277 dataModule.io.refill.wen(i) := false.B 278 when(allocated(i) && miss(i) && blockMatch && io.dcache.valid) { 279 dataModule.io.refill.wen(i) := true.B 280 datavalid(i) := true.B 281 miss(i) := false.B 282 } 283 }) 284 285 // writeback up to 2 missed load insts to CDB 286 // just randomly pick 2 missed load (data refilled), write them back to cdb 287 val loadWbSelVec = VecInit((0 until LoadQueueSize).map(i => { 288 allocated(i) && datavalid(i) && !writebacked(i) 289 })).asUInt() // use uint instead vec to reduce verilog lines 290 val loadWbSel = Wire(Vec(StorePipelineWidth, UInt(log2Up(LoadQueueSize).W))) 291 val loadWbSelV= Wire(Vec(StorePipelineWidth, Bool())) 292 val loadEvenSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i)})) 293 val loadOddSelVec = VecInit((0 until LoadQueueSize/2).map(i => {loadWbSelVec(2*i+1)})) 294 val evenDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i)})).asUInt 295 val oddDeqMask = VecInit((0 until LoadQueueSize/2).map(i => {deqMask(2*i+1)})).asUInt 296 loadWbSel(0) := Cat(getFirstOne(loadEvenSelVec, evenDeqMask), 0.U(1.W)) 297 loadWbSelV(0):= loadEvenSelVec.asUInt.orR 298 loadWbSel(1) := Cat(getFirstOne(loadOddSelVec, oddDeqMask), 1.U(1.W)) 299 loadWbSelV(1) := loadOddSelVec.asUInt.orR 300 (0 until StorePipelineWidth).map(i => { 301 // data select 302 val rdata = dataModule.io.rdata(loadWbSel(i)).data 303 val seluop = uop(loadWbSel(i)) 304 val func = seluop.ctrl.fuOpType 305 val raddr = dataModule.io.rdata(loadWbSel(i)).paddr 306 val rdataSel = LookupTree(raddr(2, 0), List( 307 "b000".U -> rdata(63, 0), 308 "b001".U -> rdata(63, 8), 309 "b010".U -> rdata(63, 16), 310 "b011".U -> rdata(63, 24), 311 "b100".U -> rdata(63, 32), 312 "b101".U -> rdata(63, 40), 313 "b110".U -> rdata(63, 48), 314 "b111".U -> rdata(63, 56) 315 )) 316 val rdataPartialLoad = rdataHelper(seluop, rdataSel) 317 318 val validWb = loadWbSelVec(loadWbSel(i)) && loadWbSelV(i) 319 320 // writeback missed int/fp load 321 // 322 // Int load writeback will finish (if not blocked) in one cycle 323 io.ldout(i).bits.uop := seluop 324 io.ldout(i).bits.uop.cf.exceptionVec := dataModule.io.rdata(loadWbSel(i)).exception.asBools 325 io.ldout(i).bits.uop.lqIdx := loadWbSel(i).asTypeOf(new LqPtr) 326 io.ldout(i).bits.data := rdataPartialLoad 327 io.ldout(i).bits.redirectValid := false.B 328 io.ldout(i).bits.redirect := DontCare 329 io.ldout(i).bits.brUpdate := DontCare 330 io.ldout(i).bits.debug.isMMIO := debug_mmio(loadWbSel(i)) 331 io.ldout(i).bits.fflags := DontCare 332 io.ldout(i).valid := validWb 333 334 when(io.ldout(i).fire()){ 335 writebacked(loadWbSel(i)) := true.B 336 } 337 338 when(io.ldout(i).fire()) { 339 XSInfo("int load miss write to cbd roqidx %d lqidx %d pc 0x%x paddr %x data %x mmio %x\n", 340 io.ldout(i).bits.uop.roqIdx.asUInt, 341 io.ldout(i).bits.uop.lqIdx.asUInt, 342 io.ldout(i).bits.uop.cf.pc, 343 dataModule.io.rdata(loadWbSel(i)).paddr, 344 dataModule.io.rdata(loadWbSel(i)).data, 345 debug_mmio(loadWbSel(i)) 346 ) 347 } 348 349 }) 350 351 /** 352 * Load commits 353 * 354 * When load commited, mark it as !allocated and move deqPtrExt forward. 355 */ 356 (0 until CommitWidth).map(i => { 357 when(loadCommit(i)) { 358 allocated(mcommitIdx(i)) := false.B 359 XSDebug("load commit %d: idx %d %x\n", i.U, mcommitIdx(i), uop(mcommitIdx(i)).cf.pc) 360 } 361 }) 362 363 def getFirstOne(mask: Vec[Bool], startMask: UInt) = { 364 val length = mask.length 365 val highBits = (0 until length).map(i => mask(i) & ~startMask(i)) 366 val highBitsUint = Cat(highBits.reverse) 367 PriorityEncoder(Mux(highBitsUint.orR(), highBitsUint, mask.asUInt)) 368 } 369 370 def getOldestInTwo(valid: Seq[Bool], uop: Seq[MicroOp]) = { 371 assert(valid.length == uop.length) 372 assert(valid.length == 2) 373 Mux(valid(0) && valid(1), 374 Mux(isAfter(uop(0).roqIdx, uop(1).roqIdx), uop(1), uop(0)), 375 Mux(valid(0) && !valid(1), uop(0), uop(1))) 376 } 377 378 def getAfterMask(valid: Seq[Bool], uop: Seq[MicroOp]) = { 379 assert(valid.length == uop.length) 380 val length = valid.length 381 (0 until length).map(i => { 382 (0 until length).map(j => { 383 Mux(valid(i) && valid(j), 384 isAfter(uop(i).roqIdx, uop(j).roqIdx), 385 Mux(!valid(i), true.B, false.B)) 386 }) 387 }) 388 } 389 390 /** 391 * Memory violation detection 392 * 393 * When store writes back, it searches LoadQueue for younger load instructions 394 * with the same load physical address. They loaded wrong data and need re-execution. 395 * 396 * Cycle 0: Store Writeback 397 * Generate match vector for store address with rangeMask(stPtr, enqPtr). 398 * Besides, load instructions in LoadUnit_S1 and S2 are also checked. 399 * Cycle 1: Redirect Generation 400 * There're three possible types of violations. Choose the oldest load. 401 * Set io.redirect according to the detected violation. 402 */ 403 io.load_s1 := DontCare 404 def detectRollback(i: Int) = { 405 val startIndex = io.storeIn(i).bits.uop.lqIdx.value 406 val lqIdxMask = UIntToMask(startIndex, LoadQueueSize) 407 val xorMask = lqIdxMask ^ enqMask 408 val sameFlag = io.storeIn(i).bits.uop.lqIdx.flag === enqPtrExt(0).flag 409 val toEnqPtrMask = Mux(sameFlag, xorMask, ~xorMask) 410 411 // check if load already in lq needs to be rolledback 412 val lqViolationVec = RegNext(VecInit((0 until LoadQueueSize).map(j => { 413 val addrMatch = allocated(j) && 414 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === dataModule.io.rdata(j).paddr(PAddrBits - 1, 3) 415 val entryNeedCheck = toEnqPtrMask(j) && addrMatch && (datavalid(j) || miss(j)) 416 // TODO: update refilled data 417 val violationVec = (0 until 8).map(k => dataModule.io.rdata(j).mask(k) && io.storeIn(i).bits.mask(k)) 418 Cat(violationVec).orR() && entryNeedCheck 419 }))) 420 val lqViolation = lqViolationVec.asUInt().orR() 421 val lqViolationIndex = getFirstOne(lqViolationVec, RegNext(lqIdxMask)) 422 val lqViolationUop = uop(lqViolationIndex) 423 // lqViolationUop.lqIdx.flag := deqMask(lqViolationIndex) ^ deqPtrExt.flag 424 // lqViolationUop.lqIdx.value := lqViolationIndex 425 XSDebug(lqViolation, p"${Binary(Cat(lqViolationVec))}, $startIndex, $lqViolationIndex\n") 426 427 // when l/s writeback to roq together, check if rollback is needed 428 val wbViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 429 io.loadIn(j).valid && 430 isAfter(io.loadIn(j).bits.uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 431 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.loadIn(j).bits.paddr(PAddrBits - 1, 3) && 432 (io.storeIn(i).bits.mask & io.loadIn(j).bits.mask).orR 433 }))) 434 val wbViolation = wbViolationVec.asUInt().orR() 435 val wbViolationUop = getOldestInTwo(wbViolationVec, RegNext(VecInit(io.loadIn.map(_.bits.uop)))) 436 XSDebug(wbViolation, p"${Binary(Cat(wbViolationVec))}, $wbViolationUop\n") 437 438 // check if rollback is needed for load in l1 439 val l1ViolationVec = RegNext(VecInit((0 until LoadPipelineWidth).map(j => { 440 io.load_s1(j).valid && // L1 valid 441 isAfter(io.load_s1(j).uop.roqIdx, io.storeIn(i).bits.uop.roqIdx) && 442 io.storeIn(i).bits.paddr(PAddrBits - 1, 3) === io.load_s1(j).paddr(PAddrBits - 1, 3) && 443 (io.storeIn(i).bits.mask & io.load_s1(j).mask).orR 444 }))) 445 val l1Violation = l1ViolationVec.asUInt().orR() 446 val l1ViolationUop = getOldestInTwo(l1ViolationVec, RegNext(VecInit(io.load_s1.map(_.uop)))) 447 XSDebug(l1Violation, p"${Binary(Cat(l1ViolationVec))}, $l1ViolationUop\n") 448 449 val rollbackValidVec = Seq(lqViolation, wbViolation, l1Violation) 450 val rollbackUopVec = Seq(lqViolationUop, wbViolationUop, l1ViolationUop) 451 452 val mask = getAfterMask(rollbackValidVec, rollbackUopVec) 453 val oneAfterZero = mask(1)(0) 454 val rollbackUop = Mux(oneAfterZero && mask(2)(0), 455 rollbackUopVec(0), 456 Mux(!oneAfterZero && mask(2)(1), rollbackUopVec(1), rollbackUopVec(2))) 457 458 XSDebug( 459 l1Violation, 460 "need rollback (l4 load) pc %x roqidx %d target %x\n", 461 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, l1ViolationUop.roqIdx.asUInt 462 ) 463 XSDebug( 464 lqViolation, 465 "need rollback (ld wb before store) pc %x roqidx %d target %x\n", 466 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, lqViolationUop.roqIdx.asUInt 467 ) 468 XSDebug( 469 wbViolation, 470 "need rollback (ld/st wb together) pc %x roqidx %d target %x\n", 471 io.storeIn(i).bits.uop.cf.pc, io.storeIn(i).bits.uop.roqIdx.asUInt, wbViolationUop.roqIdx.asUInt 472 ) 473 474 (RegNext(io.storeIn(i).valid) && Cat(rollbackValidVec).orR, rollbackUop) 475 } 476 477 // rollback check 478 val rollback = Wire(Vec(StorePipelineWidth, Valid(new MicroOp))) 479 for (i <- 0 until StorePipelineWidth) { 480 val detectedRollback = detectRollback(i) 481 rollback(i).valid := detectedRollback._1 482 rollback(i).bits := detectedRollback._2 483 } 484 485 def rollbackSel(a: Valid[MicroOp], b: Valid[MicroOp]): ValidIO[MicroOp] = { 486 Mux( 487 a.valid, 488 Mux( 489 b.valid, 490 Mux(isAfter(a.bits.roqIdx, b.bits.roqIdx), b, a), // a,b both valid, sel oldest 491 a // sel a 492 ), 493 b // sel b 494 ) 495 } 496 497 val rollbackSelected = ParallelOperation(rollback, rollbackSel) 498 val lastCycleRedirect = RegNext(io.brqRedirect) 499 500 // Note that we use roqIdx - 1.U to flush the load instruction itself. 501 // Thus, here if last cycle's roqIdx equals to this cycle's roqIdx, it still triggers the redirect. 502 io.rollback.valid := rollbackSelected.valid && 503 (!lastCycleRedirect.valid || !isAfter(rollbackSelected.bits.roqIdx, lastCycleRedirect.bits.roqIdx)) && 504 !(lastCycleRedirect.valid && lastCycleRedirect.bits.isUnconditional()) 505 506 io.rollback.bits.roqIdx := rollbackSelected.bits.roqIdx 507 io.rollback.bits.level := RedirectLevel.flush 508 io.rollback.bits.interrupt := DontCare 509 io.rollback.bits.pc := DontCare 510 io.rollback.bits.target := rollbackSelected.bits.cf.pc 511 io.rollback.bits.brTag := rollbackSelected.bits.brTag 512 513 when(io.rollback.valid) { 514 XSDebug("Mem rollback: pc %x roqidx %d\n", io.rollback.bits.pc, io.rollback.bits.roqIdx.asUInt) 515 } 516 517 /** 518 * Memory mapped IO / other uncached operations 519 * 520 */ 521 io.uncache.req.valid := pending(deqPtr) && allocated(deqPtr) && 522 io.commits.info(0).commitType === CommitType.LOAD && 523 io.roqDeqPtr === uop(deqPtr).roqIdx && 524 !io.commits.isWalk 525 526 io.uncache.req.bits.cmd := MemoryOpConstants.M_XRD 527 io.uncache.req.bits.addr := dataModule.io.rdata(deqPtr).paddr 528 io.uncache.req.bits.data := dataModule.io.rdata(deqPtr).data 529 io.uncache.req.bits.mask := dataModule.io.rdata(deqPtr).mask 530 531 io.uncache.req.bits.meta.id := DontCare 532 io.uncache.req.bits.meta.vaddr := DontCare 533 io.uncache.req.bits.meta.paddr := dataModule.io.rdata(deqPtr).paddr 534 io.uncache.req.bits.meta.uop := uop(deqPtr) 535 io.uncache.req.bits.meta.mmio := true.B 536 io.uncache.req.bits.meta.tlb_miss := false.B 537 io.uncache.req.bits.meta.mask := dataModule.io.rdata(deqPtr).mask 538 io.uncache.req.bits.meta.replay := false.B 539 540 io.uncache.resp.ready := true.B 541 542 when (io.uncache.req.fire()) { 543 pending(deqPtr) := false.B 544 545 XSDebug("uncache req: pc %x addr %x data %x op %x mask %x\n", 546 uop(deqPtr).cf.pc, 547 io.uncache.req.bits.addr, 548 io.uncache.req.bits.data, 549 io.uncache.req.bits.cmd, 550 io.uncache.req.bits.mask 551 ) 552 } 553 554 dataModule.io.uncache.wen := false.B 555 when(io.uncache.resp.fire()){ 556 datavalid(deqPtr) := true.B 557 dataModule.io.uncacheWrite(deqPtr, io.uncache.resp.bits.data(XLEN-1, 0)) 558 dataModule.io.uncache.wen := true.B 559 560 XSDebug("uncache resp: data %x\n", io.dcache.bits.data) 561 } 562 563 // Read vaddr for mem exception 564 io.exceptionAddr.vaddr := dataModule.io.rdata(io.exceptionAddr.lsIdx.lqIdx.value).vaddr 565 566 // misprediction recovery / exception redirect 567 // invalidate lq term using robIdx 568 val needCancel = Wire(Vec(LoadQueueSize, Bool())) 569 for (i <- 0 until LoadQueueSize) { 570 needCancel(i) := uop(i).roqIdx.needFlush(io.brqRedirect) && allocated(i) && !commited(i) 571 when (needCancel(i)) { 572 allocated(i) := false.B 573 } 574 } 575 576 /** 577 * update pointers 578 */ 579 val lastCycleCancelCount = PopCount(RegNext(needCancel)) 580 // when io.brqRedirect.valid, we don't allow eneuque even though it may fire. 581 val enqNumber = Mux(io.enq.canAccept && io.enq.sqCanAccept && !io.brqRedirect.valid, PopCount(io.enq.req.map(_.valid)), 0.U) 582 when (lastCycleRedirect.valid) { 583 // we recover the pointers in the next cycle after redirect 584 enqPtrExt := VecInit(enqPtrExt.map(_ - lastCycleCancelCount)) 585 }.otherwise { 586 enqPtrExt := VecInit(enqPtrExt.map(_ + enqNumber)) 587 } 588 589 val commitCount = PopCount(loadCommit) 590 deqPtrExt := deqPtrExt + commitCount 591 592 val lastLastCycleRedirect = RegNext(lastCycleRedirect.valid) 593 val trueValidCounter = distanceBetween(enqPtrExt(0), deqPtrExt) 594 validCounter := Mux(lastLastCycleRedirect, 595 trueValidCounter, 596 validCounter + enqNumber - commitCount 597 ) 598 599 allowEnqueue := Mux(io.brqRedirect.valid, 600 false.B, 601 Mux(lastLastCycleRedirect, 602 trueValidCounter <= (LoadQueueSize - RenameWidth).U, 603 validCounter + enqNumber <= (LoadQueueSize - RenameWidth).U 604 ) 605 ) 606 607 // debug info 608 XSDebug("enqPtrExt %d:%d deqPtrExt %d:%d\n", enqPtrExt(0).flag, enqPtr, deqPtrExt.flag, deqPtr) 609 610 def PrintFlag(flag: Bool, name: String): Unit = { 611 when(flag) { 612 XSDebug(false, true.B, name) 613 }.otherwise { 614 XSDebug(false, true.B, " ") 615 } 616 } 617 618 for (i <- 0 until LoadQueueSize) { 619 if (i % 4 == 0) XSDebug("") 620 XSDebug(false, true.B, "%x [%x] ", uop(i).cf.pc, dataModule.io.rdata(i).paddr) 621 PrintFlag(allocated(i), "a") 622 PrintFlag(allocated(i) && datavalid(i), "v") 623 PrintFlag(allocated(i) && writebacked(i), "w") 624 PrintFlag(allocated(i) && commited(i), "c") 625 PrintFlag(allocated(i) && miss(i), "m") 626 // PrintFlag(allocated(i) && listening(i), "l") 627 PrintFlag(allocated(i) && pending(i), "p") 628 XSDebug(false, true.B, " ") 629 if (i % 4 == 3 || i == LoadQueueSize - 1) XSDebug(false, true.B, "\n") 630 } 631 632} 633