1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.backend.rename 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import xiangshan.backend.rob.RobPtr 25import xiangshan.backend.rename.freelist._ 26import xiangshan.mem.mdp._ 27 28class Rename(implicit p: Parameters) extends XSModule { 29 val io = IO(new Bundle() { 30 val redirect = Flipped(ValidIO(new Redirect)) 31 val robCommits = Flipped(new RobCommitIO) 32 // from decode 33 val in = Vec(RenameWidth, Flipped(DecoupledIO(new CfCtrl))) 34 // ssit read result 35 val ssit = Flipped(Vec(RenameWidth, Output(new SSITEntry))) 36 // waittable read result 37 val waittable = Flipped(Vec(RenameWidth, Output(Bool()))) 38 // to rename table 39 val intReadPorts = Vec(RenameWidth, Vec(3, Input(UInt(PhyRegIdxWidth.W)))) 40 val fpReadPorts = Vec(RenameWidth, Vec(4, Input(UInt(PhyRegIdxWidth.W)))) 41 val intRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 42 val fpRenamePorts = Vec(RenameWidth, Output(new RatWritePort)) 43 // to dispatch1 44 val out = Vec(RenameWidth, DecoupledIO(new MicroOp)) 45 }) 46 47 // create free list and rat 48 val intFreeList = Module(new MEFreeList(NRPhyRegs)) 49 val intRefCounter = Module(new RefCounter(NRPhyRegs)) 50 val fpFreeList = Module(new StdFreeList(NRPhyRegs - 32)) 51 52 // decide if given instruction needs allocating a new physical register (CfCtrl: from decode; RobCommitInfo: from rob) 53 def needDestReg[T <: CfCtrl](fp: Boolean, x: T): Bool = { 54 {if(fp) x.ctrl.fpWen else x.ctrl.rfWen && (x.ctrl.ldest =/= 0.U)} 55 } 56 def needDestRegCommit[T <: RobCommitInfo](fp: Boolean, x: T): Bool = { 57 if(fp) x.fpWen else x.rfWen 58 } 59 60 // connect [redirect + walk] ports for __float point__ & __integer__ free list 61 Seq((fpFreeList, true), (intFreeList, false)).foreach{ case (fl, isFp) => 62 fl.io.redirect := io.redirect.valid 63 fl.io.walk := io.robCommits.isWalk 64 // when isWalk, use stepBack to restore head pointer of free list 65 // (if ME enabled, stepBack of intFreeList should be useless thus optimized out) 66 fl.io.stepBack := PopCount(io.robCommits.valid.zip(io.robCommits.info).map{case (v, i) => v && needDestRegCommit(isFp, i)}) 67 } 68 // walk has higher priority than allocation and thus we don't use isWalk here 69 // only when both fp and int free list and dispatch1 has enough space can we do allocation 70 intFreeList.io.doAllocate := fpFreeList.io.canAllocate && io.out(0).ready 71 fpFreeList.io.doAllocate := intFreeList.io.canAllocate && io.out(0).ready 72 73 // dispatch1 ready ++ float point free list ready ++ int free list ready ++ not walk 74 val canOut = io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk 75 76 77 // speculatively assign the instruction with an robIdx 78 val validCount = PopCount(io.in.map(_.valid)) // number of instructions waiting to enter rob (from decode) 79 val robIdxHead = RegInit(0.U.asTypeOf(new RobPtr)) 80 val lastCycleMisprediction = RegNext(io.redirect.valid && !io.redirect.bits.flushItself()) 81 val robIdxHeadNext = Mux(io.redirect.valid, io.redirect.bits.robIdx, // redirect: move ptr to given rob index 82 Mux(lastCycleMisprediction, robIdxHead + 1.U, // mis-predict: not flush robIdx itself 83 Mux(canOut, robIdxHead + validCount, // instructions successfully entered next stage: increase robIdx 84 /* default */ robIdxHead))) // no instructions passed by this cycle: stick to old value 85 robIdxHead := robIdxHeadNext 86 87 /** 88 * Rename: allocate free physical register and update rename table 89 */ 90 val uops = Wire(Vec(RenameWidth, new MicroOp)) 91 uops.foreach( uop => { 92 uop.srcState(0) := DontCare 93 uop.srcState(1) := DontCare 94 uop.srcState(2) := DontCare 95 uop.robIdx := DontCare 96 uop.diffTestDebugLrScValid := DontCare 97 uop.debugInfo := DontCare 98 uop.lqIdx := DontCare 99 uop.sqIdx := DontCare 100 }) 101 102 val needFpDest = Wire(Vec(RenameWidth, Bool())) 103 val needIntDest = Wire(Vec(RenameWidth, Bool())) 104 val hasValid = Cat(io.in.map(_.valid)).orR 105 106 val isMove = io.in.map(_.bits.ctrl.isMove) 107 val intPsrc = Wire(Vec(RenameWidth, UInt())) 108 109 val intSpecWen = Wire(Vec(RenameWidth, Bool())) 110 val fpSpecWen = Wire(Vec(RenameWidth, Bool())) 111 112 // uop calculation 113 for (i <- 0 until RenameWidth) { 114 uops(i).cf := io.in(i).bits.cf 115 uops(i).ctrl := io.in(i).bits.ctrl 116 117 // update cf according to ssit result 118 uops(i).cf.storeSetHit := io.ssit(i).valid 119 uops(i).cf.loadWaitStrict := io.ssit(i).strict && io.ssit(i).valid 120 uops(i).cf.ssid := io.ssit(i).ssid 121 122 // update cf according to waittable result 123 uops(i).cf.loadWaitBit := io.waittable(i) 124 125 val inValid = io.in(i).valid 126 127 // alloc a new phy reg 128 needFpDest(i) := inValid && needDestReg(fp = true, io.in(i).bits) 129 needIntDest(i) := inValid && needDestReg(fp = false, io.in(i).bits) 130 fpFreeList.io.allocateReq(i) := needFpDest(i) 131 intFreeList.io.allocateReq(i) := needIntDest(i) && !isMove(i) 132 133 // no valid instruction from decode stage || all resources (dispatch1 + both free lists) ready 134 io.in(i).ready := !hasValid || canOut 135 136 uops(i).robIdx := robIdxHead + PopCount(io.in.take(i).map(_.valid)) 137 138 val intPhySrcVec = io.intReadPorts(i).take(2) 139 val intOldPdest = io.intReadPorts(i).last 140 intPsrc(i) := intPhySrcVec(0) 141 val fpPhySrcVec = io.fpReadPorts(i).take(3) 142 val fpOldPdest = io.fpReadPorts(i).last 143 uops(i).psrc(0) := Mux(uops(i).ctrl.srcType(0) === SrcType.reg, intPhySrcVec(0), fpPhySrcVec(0)) 144 uops(i).psrc(1) := Mux(uops(i).ctrl.srcType(1) === SrcType.reg, intPhySrcVec(1), fpPhySrcVec(1)) 145 uops(i).psrc(2) := fpPhySrcVec(2) 146 uops(i).old_pdest := Mux(uops(i).ctrl.rfWen, intOldPdest, fpOldPdest) 147 uops(i).eliminatedMove := isMove(i) 148 149 // update pdest 150 uops(i).pdest := Mux(needIntDest(i), intFreeList.io.allocatePhyReg(i), // normal int inst 151 // normal fp inst 152 Mux(needFpDest(i), fpFreeList.io.allocatePhyReg(i), 153 /* default */0.U)) 154 155 // Assign performance counters 156 uops(i).debugInfo.renameTime := GTimer() 157 158 io.out(i).valid := io.in(i).valid && intFreeList.io.canAllocate && fpFreeList.io.canAllocate && !io.robCommits.isWalk 159 io.out(i).bits := uops(i) 160 when (io.out(i).bits.ctrl.fuType === FuType.fence) { 161 io.out(i).bits.ctrl.imm := Cat(io.in(i).bits.ctrl.lsrc(1), io.in(i).bits.ctrl.lsrc(0)) 162 } 163 164 // write speculative rename table 165 // we update rat later inside commit code 166 intSpecWen(i) := needIntDest(i) && intFreeList.io.canAllocate && intFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 167 fpSpecWen(i) := needFpDest(i) && fpFreeList.io.canAllocate && fpFreeList.io.doAllocate && !io.robCommits.isWalk && !io.redirect.valid 168 169 intRefCounter.io.allocate(i).valid := intSpecWen(i) 170 intRefCounter.io.allocate(i).bits := io.out(i).bits.pdest 171 } 172 173 /** 174 * How to set psrc: 175 * - bypass the pdest to psrc if previous instructions write to the same ldest as lsrc 176 * - default: psrc from RAT 177 * How to set pdest: 178 * - Mux(isMove, psrc, pdest_from_freelist). 179 * 180 * The critical path of rename lies here: 181 * When move elimination is enabled, we need to update the rat with psrc. 182 * However, psrc maybe comes from previous instructions' pdest, which comes from freelist. 183 * 184 * If we expand these logic for pdest(N): 185 * pdest(N) = Mux(isMove(N), psrc(N), freelist_out(N)) 186 * = Mux(isMove(N), Mux(bypass(N, N - 1), pdest(N - 1), 187 * Mux(bypass(N, N - 2), pdest(N - 2), 188 * ... 189 * Mux(bypass(N, 0), pdest(0), 190 * rat_out(N))...)), 191 * freelist_out(N)) 192 */ 193 // a simple functional model for now 194 io.out(0).bits.pdest := Mux(isMove(0), uops(0).psrc.head, uops(0).pdest) 195 val bypassCond = Wire(Vec(4, MixedVec(List.tabulate(RenameWidth-1)(i => UInt((i+1).W))))) 196 for (i <- 1 until RenameWidth) { 197 val fpCond = io.in(i).bits.ctrl.srcType.map(_ === SrcType.fp) :+ needFpDest(i) 198 val intCond = io.in(i).bits.ctrl.srcType.map(_ === SrcType.reg) :+ needIntDest(i) 199 val target = io.in(i).bits.ctrl.lsrc :+ io.in(i).bits.ctrl.ldest 200 for ((((cond1, cond2), t), j) <- fpCond.zip(intCond).zip(target).zipWithIndex) { 201 val destToSrc = io.in.take(i).zipWithIndex.map { case (in, j) => 202 val indexMatch = in.bits.ctrl.ldest === t 203 val writeMatch = cond2 && needIntDest(j) || cond1 && needFpDest(j) 204 indexMatch && writeMatch 205 } 206 bypassCond(j)(i - 1) := VecInit(destToSrc).asUInt 207 } 208 io.out(i).bits.psrc(0) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(0)(i-1).asBools).foldLeft(uops(i).psrc(0)) { 209 (z, next) => Mux(next._2, next._1, z) 210 } 211 io.out(i).bits.psrc(1) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(1)(i-1).asBools).foldLeft(uops(i).psrc(1)) { 212 (z, next) => Mux(next._2, next._1, z) 213 } 214 io.out(i).bits.psrc(2) := io.out.take(i).map(_.bits.pdest).zip(bypassCond(2)(i-1).asBools).foldLeft(uops(i).psrc(2)) { 215 (z, next) => Mux(next._2, next._1, z) 216 } 217 io.out(i).bits.old_pdest := io.out.take(i).map(_.bits.pdest).zip(bypassCond(3)(i-1).asBools).foldLeft(uops(i).old_pdest) { 218 (z, next) => Mux(next._2, next._1, z) 219 } 220 io.out(i).bits.pdest := Mux(isMove(i), io.out(i).bits.psrc(0), uops(i).pdest) 221 } 222 223 /** 224 * Instructions commit: update freelist and rename table 225 */ 226 for (i <- 0 until CommitWidth) { 227 228 Seq((io.intRenamePorts, false), (io.fpRenamePorts, true)) foreach { case (rat, fp) => 229 // is valid commit req and given instruction has destination register 230 val commitDestValid = io.robCommits.valid(i) && needDestRegCommit(fp, io.robCommits.info(i)) 231 XSDebug(p"isFp[${fp}]index[$i]-commitDestValid:$commitDestValid,isWalk:${io.robCommits.isWalk}\n") 232 233 /* 234 I. RAT Update 235 */ 236 237 // walk back write - restore spec state : ldest => old_pdest 238 if (fp && i < RenameWidth) { 239 // When redirect happens (mis-prediction), don't update the rename table 240 rat(i).wen := fpSpecWen(i) 241 rat(i).addr := uops(i).ctrl.ldest 242 rat(i).data := fpFreeList.io.allocatePhyReg(i) 243 } else if (!fp && i < RenameWidth) { 244 rat(i).wen := intSpecWen(i) 245 rat(i).addr := uops(i).ctrl.ldest 246 rat(i).data := io.out(i).bits.pdest 247 } 248 249 /* 250 II. Free List Update 251 */ 252 if (fp) { // Float Point free list 253 fpFreeList.io.freeReq(i) := commitDestValid && !io.robCommits.isWalk 254 fpFreeList.io.freePhyReg(i) := io.robCommits.info(i).old_pdest 255 } else { // Integer free list 256 intFreeList.io.freeReq(i) := intRefCounter.io.freeRegs(i).valid 257 intFreeList.io.freePhyReg(i) := intRefCounter.io.freeRegs(i).bits 258 } 259 } 260 intRefCounter.io.deallocate(i).valid := io.robCommits.valid(i) && needDestRegCommit(false, io.robCommits.info(i)) 261 intRefCounter.io.deallocate(i).bits := Mux(io.robCommits.isWalk, io.robCommits.info(i).pdest, io.robCommits.info(i).old_pdest) 262 } 263 264 /* 265 Debug and performance counters 266 */ 267 def printRenameInfo(in: DecoupledIO[CfCtrl], out: DecoupledIO[MicroOp]) = { 268 XSInfo(out.fire, p"pc:${Hexadecimal(in.bits.cf.pc)} in(${in.valid},${in.ready}) " + 269 p"lsrc(0):${in.bits.ctrl.lsrc(0)} -> psrc(0):${out.bits.psrc(0)} " + 270 p"lsrc(1):${in.bits.ctrl.lsrc(1)} -> psrc(1):${out.bits.psrc(1)} " + 271 p"lsrc(2):${in.bits.ctrl.lsrc(2)} -> psrc(2):${out.bits.psrc(2)} " + 272 p"ldest:${in.bits.ctrl.ldest} -> pdest:${out.bits.pdest} " + 273 p"old_pdest:${out.bits.old_pdest}\n" 274 ) 275 } 276 277 for((x,y) <- io.in.zip(io.out)){ 278 printRenameInfo(x, y) 279 } 280 281 XSDebug(io.robCommits.isWalk, p"Walk Recovery Enabled\n") 282 XSDebug(io.robCommits.isWalk, p"validVec:${Binary(io.robCommits.valid.asUInt)}\n") 283 for (i <- 0 until CommitWidth) { 284 val info = io.robCommits.info(i) 285 XSDebug(io.robCommits.isWalk && io.robCommits.valid(i), p"[#$i walk info] pc:${Hexadecimal(info.pc)} " + 286 p"ldest:${info.ldest} rfWen:${info.rfWen} fpWen:${info.fpWen} " + 287 p"pdest:${info.pdest} old_pdest:${info.old_pdest}\n") 288 } 289 290 XSDebug(p"inValidVec: ${Binary(Cat(io.in.map(_.valid)))}\n") 291 292 XSPerfAccumulate("in", Mux(RegNext(io.in(0).ready), PopCount(io.in.map(_.valid)), 0.U)) 293 XSPerfAccumulate("utilization", PopCount(io.in.map(_.valid))) 294 XSPerfAccumulate("waitInstr", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready))) 295 XSPerfAccumulate("stall_cycle_dispatch", hasValid && !io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk) 296 XSPerfAccumulate("stall_cycle_fp", hasValid && io.out(0).ready && !fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk) 297 XSPerfAccumulate("stall_cycle_int", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && !intFreeList.io.canAllocate && !io.robCommits.isWalk) 298 XSPerfAccumulate("stall_cycle_walk", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && io.robCommits.isWalk) 299 300 XSPerfAccumulate("move_instr_count", PopCount(io.out.map(out => out.fire() && out.bits.ctrl.isMove))) 301 302 303 val intfl_perf = intFreeList.perfEvents.map(_._1).zip(intFreeList.perfinfo.perfEvents.perf_events) 304 val fpfl_perf = fpFreeList.perfEvents.map(_._1).zip(fpFreeList.perfinfo.perfEvents.perf_events) 305 val perf_list = Wire(new PerfEventsBundle(6)) 306 val perf_seq = Seq( 307 ("rename_in ", PopCount(io.in.map(_.valid & io.in(0).ready )) ), 308 ("rename_waitinstr ", PopCount((0 until RenameWidth).map(i => io.in(i).valid && !io.in(i).ready)) ), 309 ("rename_stall_cycle_dispatch ", hasValid && !io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk ), 310 ("rename_stall_cycle_fp ", hasValid && io.out(0).ready && !fpFreeList.io.canAllocate && intFreeList.io.canAllocate && !io.robCommits.isWalk ), 311 ("rename_stall_cycle_int ", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && !intFreeList.io.canAllocate && !io.robCommits.isWalk ), 312 ("rename_stall_cycle_walk ", hasValid && io.out(0).ready && fpFreeList.io.canAllocate && intFreeList.io.canAllocate && io.robCommits.isWalk ), 313 ) 314 for (((perf_out,(perf_name,perf)),i) <- perf_list.perf_events.zip(perf_seq).zipWithIndex) { 315 perf_out.incr_step := RegNext(perf) 316 } 317 318 val perfEvents_list = perf_list.perf_events ++ 319 intFreeList.asInstanceOf[freelist.MEFreeList].perfinfo.perfEvents.perf_events ++ 320 fpFreeList.perfinfo.perfEvents.perf_events 321 322 val perfEvents = perf_seq ++ intfl_perf ++ fpfl_perf 323 val perfinfo = IO(new Bundle(){ 324 val perfEvents = Output(new PerfEventsBundle(perfEvents_list.length)) 325 }) 326 perfinfo.perfEvents.perf_events := perfEvents_list 327} 328