1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utility._ 23import utils._ 24import xiangshan._ 25import xiangshan.backend.Bundles.{DecodedInst, DynInst} 26import xiangshan.backend.decode.{FusionDecodeInfo, ImmUnion, Imm_I, Imm_LUI_LOAD, Imm_U} 27import xiangshan.backend.fu.FuType 28import xiangshan.backend.rename.freelist._ 29import xiangshan.backend.rob.{RobEnqIO, RobPtr} 30import xiangshan.mem.mdp._ 31import xiangshan.ExceptionNO._ 32 33class Rename(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents { 34 35 // params alias 36 private val numRegSrc = backendParams.numRegSrc 37 private val numVecRegSrc = backendParams.numVecRegSrc 38 private val numVecRatPorts = numVecRegSrc 39 40 println(s"[Rename] numRegSrc: $numRegSrc") 41 42 val io = IO(new Bundle() { 43 val redirect = Flipped(ValidIO(new Redirect)) 44 val rabCommits = Input(new RabCommitIO) 45 // from decode 46 val in = Vec(RenameWidth, Flipped(DecoupledIO(new DecodedInst))) 47 val fusionInfo = Vec(DecodeWidth - 1, Flipped(new FusionDecodeInfo)) 48 // ssit read result 49 val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry))) 50 // waittable read result 51 val waittable = Flipped(Vec(RenameWidth, Output(Bool()))) 52 // to rename table 53 val intReadPorts = Vec(RenameWidth, Vec(2, Input(UInt(PhyRegIdxWidth.W)))) 54 val fpReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 55 val vecReadPorts = Vec(RenameWidth, Vec(numVecRatPorts, Input(UInt(PhyRegIdxWidth.W)))) 56 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 57 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 58 val vecRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 59 // from rename table 60 val int_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 61 val fp_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 62 val vec_old_pdest = Vec(RabCommitWidth, Input(UInt(PhyRegIdxWidth.W))) 63 val int_need_free = Vec(RabCommitWidth, Input(Bool())) 64 // to dispatch1 65 val out = Vec(RenameWidth, DecoupledIO(new DynInst)) 66 // for snapshots 67 val snpt = Input(new SnapshotPort) 68 val snptLastEnq = Flipped(ValidIO(new RobPtr)) 69 val snptIsFull= Input(Bool()) 70 // debug arch ports 71 val debug_int_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 72 val debug_vconfig_rat = if (backendParams.debugEn) Some(Input(UInt(PhyRegIdxWidth.W))) else None 73 val debug_fp_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 74 val debug_vec_rat = if (backendParams.debugEn) Some(Vec(32, Input(UInt(PhyRegIdxWidth.W)))) else None 75 // perf only 76 val stallReason = new Bundle { 77 val in = Flipped(new StallReasonIO(RenameWidth)) 78 val out = new StallReasonIO(RenameWidth) 79 } 80 }) 81 82 // io alias 83 private val dispatchCanAcc = io.out.head.ready 84 85 val compressUnit = Module(new CompressUnit()) 86 // create free list and rat 87 val intFreeList = Module(new MEFreeList(IntPhyRegs)) 88 val fpFreeList = Module(new StdFreeList(FpPhyRegs - FpLogicRegs, FpLogicRegs, Reg_F)) 89 val vecFreeList = Module(new StdFreeList(VfPhyRegs - VecLogicRegs, VecLogicRegs, Reg_V)) 90 91 intFreeList.io.commit <> io.rabCommits 92 intFreeList.io.debug_rat.foreach(_ <> io.debug_int_rat.get) 93 fpFreeList.io.commit <> io.rabCommits 94 fpFreeList.io.debug_rat.foreach(_ <> io.debug_fp_rat.get) 95 vecFreeList.io.commit <> io.rabCommits 96 vecFreeList.io.debug_rat.foreach(_ <> io.debug_vec_rat.get) 97 98 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 99 def needDestReg[T <: DecodedInst](reg_t: RegType, x: T): Bool = reg_t match { 100 case Reg_I => x.rfWen && x.ldest =/= 0.U 101 case Reg_F => x.fpWen 102 case Reg_V => x.vecWen 103 } 104 def needDestRegCommit[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = { 105 reg_t match { 106 case Reg_I => x.rfWen 107 case Reg_F => x.fpWen 108 case Reg_V => x.vecWen 109 } 110 } 111 def needDestRegWalk[T <: RabCommitInfo](reg_t: RegType, x: T): Bool = { 112 reg_t match { 113 case Reg_I => x.rfWen && x.ldest =/= 0.U 114 case Reg_F => x.fpWen 115 case Reg_V => x.vecWen 116 } 117 } 118 119 // connect [redirect + walk] ports for fp & vec & int free list 120 Seq(fpFreeList, vecFreeList, intFreeList).foreach { case fl => 121 fl.io.redirect := io.redirect.valid 122 fl.io.walk := io.rabCommits.isWalk 123 } 124 // only when all free list and dispatch1 has enough space can we do allocation 125 // when isWalk, freelist can definitely allocate 126 intFreeList.io.doAllocate := fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 127 fpFreeList.io.doAllocate := intFreeList.io.canAllocate && vecFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 128 vecFreeList.io.doAllocate := intFreeList.io.canAllocate && fpFreeList.io.canAllocate && dispatchCanAcc || io.rabCommits.isWalk 129 130 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ vec free list ready ++ not walk 131 val canOut = dispatchCanAcc && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && !io.rabCommits.isWalk 132 133 compressUnit.io.in.zip(io.in).foreach{ case(sink, source) => 134 sink.valid := source.valid 135 sink.bits := source.bits 136 } 137 val needRobFlags = compressUnit.io.out.needRobFlags 138 val instrSizesVec = compressUnit.io.out.instrSizes 139 val compressMasksVec = compressUnit.io.out.masks 140 141 // speculatively assign the instruction with an robIdx 142 val validCount = PopCount(io.in.zip(needRobFlags).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) // number of instructions waiting to enter rob (from decode) 143 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 144 val lastCycleMisprediction = GatedValidRegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 145 val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index 146 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 147 Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 148 /* default */ robIdxHead))) // no instructions passed by this cycle: stick to old value 149 robIdxHead := robIdxHeadNext 150 151 /** 152 * Rename: allocate free physical register and update rename table 153 */ 154 val uops = Wire(Vec(RenameWidth, new DynInst)) 155 uops.foreach( uop => { 156 uop.srcState := DontCare 157 uop.debugInfo := DontCare 158 uop.lqIdx := DontCare 159 uop.sqIdx := DontCare 160 uop.waitForRobIdx := DontCare 161 uop.singleStep := DontCare 162 uop.snapshot := DontCare 163 uop.srcLoadDependency := DontCare 164 uop.numLsElem := DontCare 165 uop.hasException := DontCare 166 }) 167 168 val needVecDest = Wire(Vec(RenameWidth, Bool())) 169 val needFpDest = Wire(Vec(RenameWidth, Bool())) 170 val needIntDest = Wire(Vec(RenameWidth, Bool())) 171 val hasValid = Cat(io.in.map(_.valid)).orR 172 private val inHeadValid = io.in.head.valid 173 174 val isMove = Wire(Vec(RenameWidth, Bool())) 175 isMove zip io.in.map(_.bits) foreach { 176 case (move, in) => move := Mux(in.exceptionVec.asUInt.orR, false.B, in.isMove) 177 } 178 179 val walkNeedIntDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 180 val walkNeedFpDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 181 val walkNeedVecDest = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 182 val walkIsMove = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 183 184 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 185 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 186 val vecSpecWen = Wire(Vec(RenameWidth, Bool())) 187 188 val walkIntSpecWen = WireDefault(VecInit(Seq.fill(RenameWidth)(false.B))) 189 190 val walkPdest = Wire(Vec(RenameWidth, UInt(PhyRegIdxWidth.W))) 191 192 // uop calculation 193 for (i <- 0 until RenameWidth) { 194 (uops(i): Data).waiveAll :<= (io.in(i).bits: Data).waiveAll 195 196 // update cf according to ssit result 197 uops(i).storeSetHit := io.ssit(i).valid 198 uops(i).loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid 199 uops(i).ssid := io.ssit(i).ssid 200 201 // update cf according to waittable result 202 uops(i).loadWaitBit := io.waittable(i) 203 204 uops(i).replayInst := false.B // set by IQ or MemQ 205 // alloc a new phy reg 206 needVecDest(i) := io.in(i).valid && needDestReg(Reg_V, io.in(i).bits) 207 needFpDest(i) := io.in(i).valid && needDestReg(Reg_F, io.in(i).bits) 208 needIntDest(i) := io.in(i).valid && needDestReg(Reg_I, io.in(i).bits) 209 if (i < RabCommitWidth) { 210 walkNeedIntDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_I, io.rabCommits.info(i)) 211 walkNeedFpDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_F, io.rabCommits.info(i)) 212 walkNeedVecDest(i) := io.rabCommits.walkValid(i) && needDestRegWalk(Reg_V, io.rabCommits.info(i)) 213 walkIsMove(i) := io.rabCommits.info(i).isMove 214 } 215 fpFreeList.io.allocateReq(i) := needFpDest(i) 216 fpFreeList.io.walkReq(i) := walkNeedFpDest(i) 217 vecFreeList.io.allocateReq(i) := needVecDest(i) 218 vecFreeList.io.walkReq(i) := walkNeedVecDest(i) 219 intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i) 220 intFreeList.io.walkReq(i) := walkNeedIntDest(i) && !walkIsMove(i) 221 222 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 223 io.in(i).ready := !hasValid || canOut 224 225 uops(i).robIdx := robIdxHead + PopCount(io.in.zip(needRobFlags).take(i).map{ case(in, needRobFlag) => in.valid && in.bits.lastUop && needRobFlag}) 226 uops(i).instrSize := instrSizesVec(i) 227 when(isMove(i)) { 228 uops(i).numUops := 0.U 229 uops(i).numWB := 0.U 230 } 231 if (i > 0) { 232 when(!needRobFlags(i - 1)) { 233 uops(i).firstUop := false.B 234 uops(i).ftqPtr := uops(i - 1).ftqPtr 235 uops(i).ftqOffset := uops(i - 1).ftqOffset 236 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 237 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 238 } 239 } 240 when(!needRobFlags(i)) { 241 uops(i).lastUop := false.B 242 uops(i).numUops := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 243 uops(i).numWB := instrSizesVec(i) - PopCount(compressMasksVec(i) & Cat(isMove.reverse)) 244 } 245 uops(i).wfflags := (compressMasksVec(i) & Cat(io.in.map(_.bits.wfflags).reverse)).orR 246 uops(i).dirtyFs := (compressMasksVec(i) & Cat(io.in.map(_.bits.fpWen).reverse)).orR 247 // vector instructions' uopSplitType cannot be UopSplitType.SCA_SIM 248 uops(i).dirtyVs := (compressMasksVec(i) & Cat(io.in.map(_.bits.uopSplitType =/= UopSplitType.SCA_SIM).reverse)).orR 249 250 uops(i).psrc(0) := Mux1H(uops(i).srcType(0), Seq(io.intReadPorts(i)(0), io.fpReadPorts(i)(0), io.vecReadPorts(i)(0))) 251 uops(i).psrc(1) := Mux1H(uops(i).srcType(1), Seq(io.intReadPorts(i)(1), io.fpReadPorts(i)(1), io.vecReadPorts(i)(1))) 252 uops(i).psrc(2) := Mux1H(uops(i).srcType(2)(2, 1), Seq(io.fpReadPorts(i)(2), io.vecReadPorts(i)(2))) 253 uops(i).psrc(3) := io.vecReadPorts(i)(3) 254 uops(i).psrc(4) := io.vecReadPorts(i)(4) // Todo: vl read port 255 256 // int psrc2 should be bypassed from next instruction if it is fused 257 if (i < RenameWidth - 1) { 258 when (io.fusionInfo(i).rs2FromRs2 || io.fusionInfo(i).rs2FromRs1) { 259 uops(i).psrc(1) := Mux(io.fusionInfo(i).rs2FromRs2, io.intReadPorts(i + 1)(1), io.intReadPorts(i + 1)(0)) 260 }.elsewhen(io.fusionInfo(i).rs2FromZero) { 261 uops(i).psrc(1) := 0.U 262 } 263 } 264 uops(i).eliminatedMove := isMove(i) 265 266 // update pdest 267 uops(i).pdest := MuxCase(0.U, Seq( 268 needIntDest(i) -> intFreeList.io.allocatePhyReg(i), 269 needFpDest(i) -> fpFreeList.io.allocatePhyReg(i), 270 needVecDest(i) -> vecFreeList.io.allocatePhyReg(i), 271 )) 272 273 // Assign performance counters 274 uops(i).debugInfo.renameTime := GTimer() 275 276 io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && vecFreeList.io.canAllocate && !io.rabCommits.isWalk 277 io.out(i).bits := uops(i) 278 // Todo: move these shit in decode stage 279 // dirty code for fence. The lsrc is passed by imm. 280 when (io.out(i).bits.fuType === FuType.fence.U) { 281 io.out(i).bits.imm := Cat(io.in(i).bits.lsrc(1), io.in(i).bits.lsrc(0)) 282 } 283 284 // dirty code for SoftPrefetch (prefetch.r/prefetch.w) 285// when (io.in(i).bits.isSoftPrefetch) { 286// io.out(i).bits.fuType := FuType.ldu.U 287// io.out(i).bits.fuOpType := Mux(io.in(i).bits.lsrc(1) === 1.U, LSUOpType.prefetch_r, LSUOpType.prefetch_w) 288// io.out(i).bits.selImm := SelImm.IMM_S 289// io.out(i).bits.imm := Cat(io.in(i).bits.imm(io.in(i).bits.imm.getWidth - 1, 5), 0.U(5.W)) 290// } 291 292 // dirty code for lui+addi(w) fusion 293 if (i < RenameWidth - 1) { 294 val fused_lui32 = io.in(i).bits.selImm === SelImm.IMM_LUI32 && io.in(i).bits.fuType === FuType.alu.U 295 when (fused_lui32) { 296 val lui_imm = io.in(i).bits.imm(19, 0) 297 val add_imm = io.in(i + 1).bits.imm(11, 0) 298 require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + add_imm.getWidth) 299 io.out(i).bits.imm := Cat(lui_imm, add_imm) 300 } 301 } 302 303 // write speculative rename table 304 // we update rat later inside commit code 305 intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 306 fpSpecWen(i) := needFpDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 307 vecSpecWen(i) := needVecDest(i) && vecFreeList.io.canAllocate && vecFreeList.io.doAllocate && !io.rabCommits.isWalk && !io.redirect.valid 308 309 310 if (i < RabCommitWidth) { 311 walkIntSpecWen(i) := walkNeedIntDest(i) && !io.redirect.valid 312 walkPdest(i) := io.rabCommits.info(i).pdest 313 } else { 314 walkPdest(i) := io.out(i).bits.pdest 315 } 316 } 317 318 /** 319 * How to set psrc: 320 * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc 321 * - default: psrc from RAT 322 * How to set pdest: 323 * - Mux(isMove, psrc, pdest_from_freelist). 324 * 325 * The critical path of rename lies here: 326 * When move elimination is enabled, we need to update the rat with psrc. 327 * However, psrc maybe comes from previous instructions' pdest, which comes from freelist. 328 * 329 * If we expand these logic for pdest(N): 330 * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N)) 331 * = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1), 332 * Mux(bypass(N, N - 2), pdest(N - 2), 333 * ... 334 * Mux(bypass(N, 0), pdest(0), 335 * rat_out(N))...)), 336 * freelist_out(N)) 337 */ 338 // a simple functional model for now 339 io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest) 340 341 // psrc(n) + pdest(1) 342 val bypassCond: Vec[MixedVec[UInt]] = Wire(Vec(numRegSrc + 1, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))))) 343 require(io.in(0).bits.srcType.size == io.in(0).bits.numSrc) 344 private val pdestLoc = io.in.head.bits.srcType.size // 2 vector src: v0, vl&vtype 345 println(s"[Rename] idx of pdest in bypassCond $pdestLoc") 346 for (i <- 1 until RenameWidth) { 347 val vecCond = io.in(i).bits.srcType.map(_ === SrcType.vp) :+ needVecDest(i) 348 val fpCond = io.in(i).bits.srcType.map(_ === SrcType.fp) :+ needFpDest(i) 349 val intCond = io.in(i).bits.srcType.map(_ === SrcType.xp) :+ needIntDest(i) 350 val target = io.in(i).bits.lsrc :+ io.in(i).bits.ldest 351 for (((((cond1, cond2), cond3), t), j) <- vecCond.zip(fpCond).zip(intCond).zip(target).zipWithIndex) { 352 val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) => 353 val indexMatch = in.bits.ldest === t 354 val writeMatch = cond3 && needIntDest(j) || cond2 && needFpDest(j) || cond1 && needVecDest(j) 355 indexMatch && writeMatch 356 } 357 bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt 358 } 359 io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) { 360 (z, next) => Mux(next._2, next._1, z) 361 } 362 io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) { 363 (z, next) => Mux(next._2, next._1, z) 364 } 365 io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) { 366 (z, next) => Mux(next._2, next._1, z) 367 } 368 io.out(i).bits.psrc(3) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).psrc(3)) { 369 (z, next) => Mux(next._2, next._1, z) 370 } 371 io.out(i).bits.psrc(4) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(4)(i-1).asBools).foldLeft(uops(i).psrc(4)) { 372 (z, next) => Mux(next._2, next._1, z) 373 } 374 io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest) 375 376 // Todo: better implementation for fields reuse 377 // For fused-lui-load, load.src(0) is replaced by the imm. 378 val last_is_lui = io.in(i - 1).bits.selImm === SelImm.IMM_U && io.in(i - 1).bits.srcType(0) =/= SrcType.pc 379 val this_is_load = io.in(i).bits.fuType === FuType.ldu.U 380 val lui_to_load = io.in(i - 1).valid && io.in(i - 1).bits.ldest === io.in(i).bits.lsrc(0) 381 val fused_lui_load = last_is_lui && this_is_load && lui_to_load 382 when (fused_lui_load) { 383 // The first LOAD operand (base address) is replaced by LUI-imm and stored in imm 384 val lui_imm = io.in(i - 1).bits.imm(ImmUnion.U.len - 1, 0) 385 val ld_imm = io.in(i).bits.imm(ImmUnion.I.len - 1, 0) 386 require(io.out(i).bits.imm.getWidth >= lui_imm.getWidth + ld_imm.getWidth) 387 io.out(i).bits.srcType(0) := SrcType.imm 388 io.out(i).bits.imm := Cat(lui_imm, ld_imm) 389 } 390 391 } 392 393 val genSnapshot = Cat(io.out.map(out => out.fire && out.bits.snapshot)).orR 394 val lastCycleCreateSnpt = RegInit(false.B) 395 lastCycleCreateSnpt := genSnapshot && !io.snptIsFull 396 val sameSnptDistance = (RobCommitWidth * 4).U 397 // notInSameSnpt: 1.robidxHead - snapLastEnq >= sameSnptDistance 2.no snap 398 val notInSameSnpt = GatedValidRegNext(distanceBetween(robIdxHeadNext, io.snptLastEnq.bits) >= sameSnptDistance || !io.snptLastEnq.valid) 399 val allowSnpt = if (EnableRenameSnapshot) notInSameSnpt && !lastCycleCreateSnpt && io.in.head.bits.firstUop else false.B 400 io.out.zip(io.in).foreach{ case (out, in) => out.bits.snapshot := allowSnpt && (!in.bits.preDecodeInfo.notCFI || FuType.isJump(in.bits.fuType)) && in.fire } 401 io.out.map{ x => 402 x.bits.hasException := Cat(selectFrontend(x.bits.exceptionVec) :+ x.bits.exceptionVec(illegalInstr) :+ x.bits.exceptionVec(virtualInstr)).orR || x.bits.trigger.getFrontendCanFire 403 } 404 if(backendParams.debugEn){ 405 dontTouch(robIdxHeadNext) 406 dontTouch(notInSameSnpt) 407 dontTouch(genSnapshot) 408 } 409 intFreeList.io.snpt := io.snpt 410 fpFreeList.io.snpt := io.snpt 411 vecFreeList.io.snpt := io.snpt 412 intFreeList.io.snpt.snptEnq := genSnapshot 413 fpFreeList.io.snpt.snptEnq := genSnapshot 414 vecFreeList.io.snpt.snptEnq := genSnapshot 415 416 /** 417 * Instructions commit: update freelist and rename table 418 */ 419 for (i <- 0 until RabCommitWidth) { 420 val commitValid = io.rabCommits.isCommit && io.rabCommits.commitValid(i) 421 val walkValid = io.rabCommits.isWalk && io.rabCommits.walkValid(i) 422 423 // I. RAT Update 424 // When redirect happens (mis-prediction), don't update the rename table 425 io.intRenamePorts(i).wen := intSpecWen(i) 426 io.intRenamePorts(i).addr := uops(i).ldest 427 io.intRenamePorts(i).data := io.out(i).bits.pdest 428 429 io.fpRenamePorts(i).wen := fpSpecWen(i) 430 io.fpRenamePorts(i).addr := uops(i).ldest 431 io.fpRenamePorts(i).data := fpFreeList.io.allocatePhyReg(i) 432 433 io.vecRenamePorts(i).wen := vecSpecWen(i) 434 io.vecRenamePorts(i).addr := uops(i).ldest 435 io.vecRenamePorts(i).data := vecFreeList.io.allocatePhyReg(i) 436 437 // II. Free List Update 438 intFreeList.io.freeReq(i) := io.int_need_free(i) 439 intFreeList.io.freePhyReg(i) := RegNext(io.int_old_pdest(i)) 440 fpFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_F, io.rabCommits.info(i))) 441 fpFreeList.io.freePhyReg(i) := io.fp_old_pdest(i) 442 vecFreeList.io.freeReq(i) := GatedValidRegNext(commitValid && needDestRegCommit(Reg_V, io.rabCommits.info(i))) 443 vecFreeList.io.freePhyReg(i) := io.vec_old_pdest(i) 444 } 445 446 /* 447 Debug and performance counters 448 */ 449 def printRenameInfo(in: DecoupledIO[DecodedInst], out: DecoupledIO[DynInst]) = { 450 XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.pc)} in(${in.valid},${in.ready}) " + 451 p"lsrc(0):${in.bits.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 452 p"lsrc(1):${in.bits.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 453 p"lsrc(2):${in.bits.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 454 p"ldest:${in.bits.ldest} -> pdest:${out.bits.pdest}\n" 455 ) 456 } 457 458 for ((x,y) <- io.in.zip(io.out)) { 459 printRenameInfo(x, y) 460 } 461 462 io.out.map { case x => 463 when(x.valid && x.bits.rfWen){ 464 assert(x.bits.ldest =/= 0.U, "rfWen cannot be 1 when Int regfile ldest is 0") 465 } 466 } 467 val debugRedirect = RegEnable(io.redirect.bits, io.redirect.valid) 468 // bad speculation 469 val recStall = io.redirect.valid || io.rabCommits.isWalk 470 val ctrlRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsCtrl, io.rabCommits.isWalk && debugRedirect.debugIsCtrl) 471 val mvioRecStall = Mux(io.redirect.valid, io.redirect.bits.debugIsMemVio, io.rabCommits.isWalk && debugRedirect.debugIsMemVio) 472 val otherRecStall = recStall && !(ctrlRecStall || mvioRecStall) 473 XSPerfAccumulate("recovery_stall", recStall) 474 XSPerfAccumulate("control_recovery_stall", ctrlRecStall) 475 XSPerfAccumulate("mem_violation_recovery_stall", mvioRecStall) 476 XSPerfAccumulate("other_recovery_stall", otherRecStall) 477 // freelist stall 478 val notRecStall = !io.out.head.valid && !recStall 479 val intFlStall = notRecStall && inHeadValid && !intFreeList.io.canAllocate 480 val fpFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && !fpFreeList.io.canAllocate 481 val vecFlStall = notRecStall && inHeadValid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !vecFreeList.io.canAllocate 482 // other stall 483 val otherStall = notRecStall && !intFlStall && !fpFlStall && !vecFlStall 484 485 io.stallReason.in.backReason.valid := io.stallReason.out.backReason.valid || !io.in.head.ready 486 io.stallReason.in.backReason.bits := Mux(io.stallReason.out.backReason.valid, io.stallReason.out.backReason.bits, 487 MuxCase(TopDownCounters.OtherCoreStall.id.U, Seq( 488 ctrlRecStall -> TopDownCounters.ControlRecoveryStall.id.U, 489 mvioRecStall -> TopDownCounters.MemVioRecoveryStall.id.U, 490 otherRecStall -> TopDownCounters.OtherRecoveryStall.id.U, 491 intFlStall -> TopDownCounters.IntFlStall.id.U, 492 fpFlStall -> TopDownCounters.FpFlStall.id.U, 493 vecFlStall -> TopDownCounters.VecFlStall.id.U, 494 ) 495 )) 496 io.stallReason.out.reason.zip(io.stallReason.in.reason).zip(io.in.map(_.valid)).foreach { case ((out, in), valid) => 497 out := Mux(io.stallReason.in.backReason.valid, io.stallReason.in.backReason.bits, in) 498 } 499 500 XSDebug(io.rabCommits.isWalk, p"Walk Recovery Enabled\n") 501 XSDebug(io.rabCommits.isWalk, p"validVec:${Binary(io.rabCommits.walkValid.asUInt)}\n") 502 for (i <- 0 until RabCommitWidth) { 503 val info = io.rabCommits.info(i) 504 XSDebug(io.rabCommits.isWalk && io.rabCommits.walkValid(i), p"[#$i walk info] " + 505 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} vecWen:${info.vecWen}") 506 } 507 508 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 509 510 XSPerfAccumulate("in_valid_count", PopCount(io.in.map(_.valid))) 511 XSPerfAccumulate("in_fire_count", PopCount(io.in.map(_.fire))) 512 XSPerfAccumulate("in_valid_not_ready_count", PopCount(io.in.map(x => x.valid && !x.ready))) 513 XSPerfAccumulate("wait_cycle", !io.in.head.valid && dispatchCanAcc) 514 515 // These stall reasons could overlap each other, but we configure the priority as fellows. 516 // walk stall > dispatch stall > int freelist stall > fp freelist stall 517 private val inHeadStall = io.in.head match { case x => x.valid && !x.ready } 518 private val stallForWalk = inHeadValid && io.rabCommits.isWalk 519 private val stallForDispatch = inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc 520 private val stallForIntFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && !intFreeList.io.canAllocate 521 private val stallForFpFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && !fpFreeList.io.canAllocate 522 private val stallForVecFL = inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !vecFreeList.io.canAllocate 523 XSPerfAccumulate("stall_cycle", inHeadStall) 524 XSPerfAccumulate("stall_cycle_walk", stallForWalk) 525 XSPerfAccumulate("stall_cycle_dispatch", stallForDispatch) 526 XSPerfAccumulate("stall_cycle_int", stallForIntFL) 527 XSPerfAccumulate("stall_cycle_fp", stallForFpFL) 528 XSPerfAccumulate("stall_cycle_vec", stallForVecFL) 529 530 XSPerfHistogram("in_valid_range", PopCount(io.in.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 531 XSPerfHistogram("in_fire_range", PopCount(io.in.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 532 XSPerfHistogram("out_valid_range", PopCount(io.out.map(_.valid)), true.B, 0, DecodeWidth + 1, 1) 533 XSPerfHistogram("out_fire_range", PopCount(io.out.map(_.fire)), true.B, 0, DecodeWidth + 1, 1) 534 535 XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire && out.bits.isMove))) 536 val is_fused_lui_load = io.out.map(o => o.fire && o.bits.fuType === FuType.ldu.U && o.bits.srcType(0) === SrcType.imm) 537 XSPerfAccumulate("fused_lui_load_instr_count", PopCount(is_fused_lui_load)) 538 539 val renamePerf = Seq( 540 ("rename_in ", PopCount(io.in.map(_.valid & io.in(0).ready )) ), 541 ("rename_waitinstr ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready)) ), 542 ("rename_stall ", inHeadStall), 543 ("rename_stall_cycle_walk ", inHeadValid && io.rabCommits.isWalk), 544 ("rename_stall_cycle_dispatch", inHeadValid && !io.rabCommits.isWalk && !dispatchCanAcc), 545 ("rename_stall_cycle_int ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && !intFreeList.io.canAllocate), 546 ("rename_stall_cycle_fp ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && vecFreeList.io.canAllocate && !fpFreeList.io.canAllocate), 547 ("rename_stall_cycle_vec ", inHeadValid && !io.rabCommits.isWalk && dispatchCanAcc && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !vecFreeList.io.canAllocate), 548 ) 549 val intFlPerf = intFreeList.getPerfEvents 550 val fpFlPerf = fpFreeList.getPerfEvents 551 val vecFlPerf = vecFreeList.getPerfEvents 552 val perfEvents = renamePerf ++ intFlPerf ++ fpFlPerf ++ vecFlPerf 553 generatePerfEvent() 554} 555