1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.experimental.chiselName 22import chisel3.util._ 23import xiangshan._ 24import utils._ 25 26import scala.math.min 27 28trait HasBPUConst extends HasXSParameter { 29 val MaxMetaLength = 512 // TODO: Reduce meta length 30 val MaxBasicBlockSize = 32 31 val LHistoryLength = 32 32 // val numBr = 2 33 val useBPD = true 34 val useLHist = true 35 val numBrSlot = numBr-1 36 val totalSlot = numBrSlot + 1 37 38 def BP_STAGES = (0 until 3).map(_.U(2.W)) 39 def BP_S1 = BP_STAGES(0) 40 def BP_S2 = BP_STAGES(1) 41 def BP_S3 = BP_STAGES(2) 42 val numBpStages = BP_STAGES.length 43 44 val debug = true 45 val resetVector = 0x10000000L 46 // TODO: Replace log2Up by log2Ceil 47} 48 49trait HasBPUParameter extends HasXSParameter with HasBPUConst { 50 val BPUDebug = true && !env.FPGAPlatform && env.EnablePerfDebug 51 val EnableCFICommitLog = true 52 val EnbaleCFIPredLog = true 53 val EnableBPUTimeRecord = (EnableCFICommitLog || EnbaleCFIPredLog) && !env.FPGAPlatform 54 val EnableCommit = false 55} 56 57class BPUCtrl(implicit p: Parameters) extends XSBundle { 58 val ubtb_enable = Bool() 59 val btb_enable = Bool() 60 val bim_enable = Bool() 61 val tage_enable = Bool() 62 val sc_enable = Bool() 63 val ras_enable = Bool() 64 val loop_enable = Bool() 65} 66 67trait BPUUtils extends HasXSParameter { 68 // circular shifting 69 def circularShiftLeft(source: UInt, len: Int, shamt: UInt): UInt = { 70 val res = Wire(UInt(len.W)) 71 val higher = source << shamt 72 val lower = source >> (len.U - shamt) 73 res := higher | lower 74 res 75 } 76 77 def circularShiftRight(source: UInt, len: Int, shamt: UInt): UInt = { 78 val res = Wire(UInt(len.W)) 79 val higher = source << (len.U - shamt) 80 val lower = source >> shamt 81 res := higher | lower 82 res 83 } 84 85 // To be verified 86 def satUpdate(old: UInt, len: Int, taken: Bool): UInt = { 87 val oldSatTaken = old === ((1 << len)-1).U 88 val oldSatNotTaken = old === 0.U 89 Mux(oldSatTaken && taken, ((1 << len)-1).U, 90 Mux(oldSatNotTaken && !taken, 0.U, 91 Mux(taken, old + 1.U, old - 1.U))) 92 } 93 94 def signedSatUpdate(old: SInt, len: Int, taken: Bool): SInt = { 95 val oldSatTaken = old === ((1 << (len-1))-1).S 96 val oldSatNotTaken = old === (-(1 << (len-1))).S 97 Mux(oldSatTaken && taken, ((1 << (len-1))-1).S, 98 Mux(oldSatNotTaken && !taken, (-(1 << (len-1))).S, 99 Mux(taken, old + 1.S, old - 1.S))) 100 } 101 102 def getFallThroughAddr(start: UInt, carry: Bool, pft: UInt) = { 103 val higher = start.head(VAddrBits-log2Ceil(PredictWidth)-instOffsetBits-1) 104 Cat(Mux(carry, higher+1.U, higher), pft, 0.U(instOffsetBits.W)) 105 } 106 107 def foldTag(tag: UInt, l: Int): UInt = { 108 val nChunks = (tag.getWidth + l - 1) / l 109 val chunks = (0 until nChunks).map { i => 110 tag(min((i+1)*l, tag.getWidth)-1, i*l) 111 } 112 ParallelXOR(chunks) 113 } 114} 115 116// class BranchPredictionUpdate(implicit p: Parameters) extends XSBundle with HasBPUConst { 117// val pc = UInt(VAddrBits.W) 118// val br_offset = Vec(num_br, UInt(log2Up(MaxBasicBlockSize).W)) 119// val br_mask = Vec(MaxBasicBlockSize, Bool()) 120// 121// val jmp_valid = Bool() 122// val jmp_type = UInt(3.W) 123// 124// val is_NextMask = Vec(FetchWidth*2, Bool()) 125// 126// val cfi_idx = Valid(UInt(log2Ceil(MaxBasicBlockSize).W)) 127// val cfi_mispredict = Bool() 128// val cfi_is_br = Bool() 129// val cfi_is_jal = Bool() 130// val cfi_is_jalr = Bool() 131// 132// val ghist = new ShiftingGlobalHistory() 133// 134// val target = UInt(VAddrBits.W) 135// 136// val meta = UInt(MaxMetaLength.W) 137// val spec_meta = UInt(MaxMetaLength.W) 138// 139// def taken = cfi_idx.valid 140// } 141 142class AllFoldedHistories(val gen: Seq[Tuple2[Int, Int]])(implicit p: Parameters) extends XSBundle with HasBPUConst { 143 val hist = MixedVec(gen.map{case (l, cl) => new FoldedHistory(l, cl, numBr)}) 144 // println(gen.mkString) 145 require(gen.toSet.toList.equals(gen)) 146 def getHistWithInfo(info: Tuple2[Int, Int]) = { 147 val selected = hist.filter(_.info.equals(info)) 148 require(selected.length == 1) 149 selected(0) 150 } 151 def autoConnectFrom(that: AllFoldedHistories) = { 152 require(this.hist.length <= that.hist.length) 153 for (h <- this.hist) { 154 h := that.getHistWithInfo(h.info) 155 } 156 } 157 def update(ghv: Vec[Bool], ptr: CGHPtr, shift: Int, taken: Bool): AllFoldedHistories = { 158 val res = WireInit(this) 159 for (i <- 0 until this.hist.length) { 160 res.hist(i) := this.hist(i).update(ghv, ptr, shift, taken) 161 } 162 res 163 } 164 165 def display(cond: Bool) = { 166 for (h <- hist) { 167 XSDebug(cond, p"hist len ${h.len}, folded len ${h.compLen}, value ${Binary(h.folded_hist)}\n") 168 } 169 } 170} 171 172class BasePredictorInput (implicit p: Parameters) extends XSBundle with HasBPUConst { 173 def nInputs = 1 174 175 val s0_pc = UInt(VAddrBits.W) 176 177 val folded_hist = new AllFoldedHistories(foldedGHistInfos) 178 val ghist = UInt(HistoryLength.W) 179 180 val resp_in = Vec(nInputs, new BranchPredictionResp) 181 182 // val final_preds = Vec(numBpStages, new) 183 // val toFtq_fire = Bool() 184 185 // val s0_all_ready = Bool() 186} 187 188class BasePredictorOutput (implicit p: Parameters) extends XSBundle with HasBPUConst { 189 val last_stage_meta = UInt(MaxMetaLength.W) // This is use by composer 190 val resp = new BranchPredictionResp 191 192 // These store in meta, extract in composer 193 // val rasSp = UInt(log2Ceil(RasSize).W) 194 // val rasTop = new RASEntry 195 // val specCnt = Vec(PredictWidth, UInt(10.W)) 196} 197 198class BasePredictorIO (implicit p: Parameters) extends XSBundle with HasBPUConst { 199 val in = Flipped(DecoupledIO(new BasePredictorInput)) // TODO: Remove DecoupledIO 200 // val out = DecoupledIO(new BasePredictorOutput) 201 val out = Output(new BasePredictorOutput) 202 // val flush_out = Valid(UInt(VAddrBits.W)) 203 204 // val ctrl = Input(new BPUCtrl()) 205 206 val s0_fire = Input(Bool()) 207 val s1_fire = Input(Bool()) 208 val s2_fire = Input(Bool()) 209 val s3_fire = Input(Bool()) 210 211 val s2_redirect = Input(Bool()) 212 val s3_redirect = Input(Bool()) 213 214 val s1_ready = Output(Bool()) 215 val s2_ready = Output(Bool()) 216 val s3_ready = Output(Bool()) 217 218 val update = Flipped(Valid(new BranchPredictionUpdate)) 219 val redirect = Flipped(Valid(new BranchPredictionRedirect)) 220} 221 222abstract class BasePredictor(implicit p: Parameters) extends XSModule 223 with HasBPUConst with BPUUtils with HasPerfEvents { 224 val meta_size = 0 225 val spec_meta_size = 0 226 val io = IO(new BasePredictorIO()) 227 228 io.out.resp := io.in.bits.resp_in(0) 229 230 io.out.last_stage_meta := 0.U 231 232 io.in.ready := !io.redirect.valid 233 234 io.s1_ready := true.B 235 io.s2_ready := true.B 236 io.s3_ready := true.B 237 238 val s0_pc = WireInit(io.in.bits.s0_pc) // fetchIdx(io.f0_pc) 239 val s1_pc = RegEnable(s0_pc, resetVector.U, io.s0_fire) 240 val s2_pc = RegEnable(s1_pc, io.s1_fire) 241 val s3_pc = RegEnable(s2_pc, io.s2_fire) 242 243 io.out.resp.s1.pc := s1_pc 244 io.out.resp.s2.pc := s2_pc 245 io.out.resp.s3.pc := s3_pc 246 247 val perfEvents: Seq[(String, UInt)] = Seq() 248 249 250 def getFoldedHistoryInfo: Option[Set[FoldedHistoryInfo]] = None 251} 252 253class FakePredictor(implicit p: Parameters) extends BasePredictor { 254 io.in.ready := true.B 255 io.out.last_stage_meta := 0.U 256 io.out.resp := io.in.bits.resp_in(0) 257} 258 259class BpuToFtqIO(implicit p: Parameters) extends XSBundle { 260 val resp = DecoupledIO(new BpuToFtqBundle()) 261} 262 263class PredictorIO(implicit p: Parameters) extends XSBundle { 264 val bpu_to_ftq = new BpuToFtqIO() 265 val ftq_to_bpu = Flipped(new FtqToBpuIO()) 266} 267 268@chiselName 269class Predictor(implicit p: Parameters) extends XSModule with HasBPUConst with HasPerfEvents { 270 val io = IO(new PredictorIO) 271 272 val predictors = Module(if (useBPD) new Composer else new FakePredictor) 273 274 val folded_hist_infos = predictors.getFoldedHistoryInfo.getOrElse(Set()).toList 275 for ((len, compLen) <- folded_hist_infos) { 276 println(f"folded hist info: len $len, compLen $compLen") 277 } 278 279 val s0_fire, s1_fire, s2_fire, s3_fire = Wire(Bool()) 280 val s1_valid, s2_valid, s3_valid = RegInit(false.B) 281 val s1_ready, s2_ready, s3_ready = Wire(Bool()) 282 val s1_components_ready, s2_components_ready, s3_components_ready = Wire(Bool()) 283 284 val s0_pc = WireInit(resetVector.U) 285 val s0_pc_reg = RegNext(s0_pc, init=resetVector.U) 286 val s1_pc = RegEnable(s0_pc, s0_fire) 287 val s2_pc = RegEnable(s1_pc, s1_fire) 288 val s3_pc = RegEnable(s2_pc, s2_fire) 289 290 val s0_folded_gh = Wire(new AllFoldedHistories(foldedGHistInfos)) 291 val s0_folded_gh_reg = RegNext(s0_folded_gh, init=0.U.asTypeOf(s0_folded_gh)) 292 val s1_folded_gh = RegEnable(s0_folded_gh, 0.U.asTypeOf(s0_folded_gh), s0_fire) 293 val s2_folded_gh = RegEnable(s1_folded_gh, 0.U.asTypeOf(s0_folded_gh), s1_fire) 294 val s3_folded_gh = RegEnable(s2_folded_gh, 0.U.asTypeOf(s0_folded_gh), s2_fire) 295 296 val npcGen = new PhyPriorityMuxGenerator[UInt] 297 val foldedGhGen = new PhyPriorityMuxGenerator[AllFoldedHistories] 298 val ghistPtrGen = new PhyPriorityMuxGenerator[CGHPtr] 299 val ghvBitWriteGens = Seq.tabulate(HistoryLength)(n => new PhyPriorityMuxGenerator[Bool]) 300 // val ghistGen = new PhyPriorityMuxGenerator[UInt] 301 302 val ghv = RegInit(0.U.asTypeOf(Vec(HistoryLength, Bool()))) 303 val ghv_wire = WireInit(ghv) 304 305 val s0_ghist = WireInit(0.U.asTypeOf(UInt(HistoryLength.W))) 306 307 308 val ghv_write_datas = Wire(Vec(HistoryLength, Bool())) 309 val ghv_wens = Wire(Vec(HistoryLength, Bool())) 310 311 val s0_ghist_ptr = Wire(new CGHPtr) 312 val s0_ghist_ptr_reg = RegNext(s0_ghist_ptr, init=0.U.asTypeOf(new CGHPtr)) 313 val s1_ghist_ptr = RegEnable(s0_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s0_fire) 314 val s2_ghist_ptr = RegEnable(s1_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s1_fire) 315 val s3_ghist_ptr = RegEnable(s2_ghist_ptr, 0.U.asTypeOf(new CGHPtr), s2_fire) 316 317 def getHist(ptr: CGHPtr): UInt = (Cat(ghv_wire.asUInt, ghv_wire.asUInt) >> (ptr.value+1.U))(HistoryLength-1, 0) 318 s0_ghist := getHist(s0_ghist_ptr) 319 320 val resp = predictors.io.out.resp 321 322 323 val toFtq_fire = io.bpu_to_ftq.resp.valid && io.bpu_to_ftq.resp.ready 324 325 val s1_flush, s2_flush, s3_flush = Wire(Bool()) 326 val s2_redirect, s3_redirect = Wire(Bool()) 327 328 // predictors.io := DontCare 329 predictors.io.in.valid := s0_fire 330 predictors.io.in.bits.s0_pc := s0_pc 331 predictors.io.in.bits.ghist := s0_ghist 332 predictors.io.in.bits.folded_hist := s0_folded_gh 333 predictors.io.in.bits.resp_in(0) := (0.U).asTypeOf(new BranchPredictionResp) 334 // predictors.io.in.bits.resp_in(0).s1.pc := s0_pc 335 // predictors.io.in.bits.toFtq_fire := toFtq_fire 336 337 // predictors.io.out.ready := io.bpu_to_ftq.resp.ready 338 339 // Pipeline logic 340 s2_redirect := false.B 341 s3_redirect := false.B 342 343 s3_flush := io.ftq_to_bpu.redirect.valid 344 s2_flush := s3_flush || s3_redirect 345 s1_flush := s2_flush || s2_redirect 346 347 s1_components_ready := predictors.io.s1_ready 348 s1_ready := s1_fire || !s1_valid 349 s0_fire := !reset.asBool && s1_components_ready && s1_ready 350 predictors.io.s0_fire := s0_fire 351 352 s2_components_ready := predictors.io.s2_ready 353 s2_ready := s2_fire || !s2_valid 354 s1_fire := s1_valid && s2_components_ready && s2_ready && io.bpu_to_ftq.resp.ready 355 356 s3_components_ready := predictors.io.s3_ready 357 s3_ready := s3_fire || !s3_valid 358 s2_fire := s2_valid && s3_components_ready && s3_ready 359 360 when(s0_fire) { s1_valid := true.B } 361 .elsewhen(s1_flush) { s1_valid := false.B } 362 .elsewhen(s1_fire) { s1_valid := false.B } 363 364 predictors.io.s1_fire := s1_fire 365 366 s2_fire := s2_valid 367 368 when(s2_flush) { s2_valid := false.B } 369 .elsewhen(s1_fire) { s2_valid := !s1_flush } 370 .elsewhen(s2_fire) { s2_valid := false.B } 371 372 predictors.io.s2_fire := s2_fire 373 predictors.io.s2_redirect := s2_redirect 374 375 s3_fire := s3_valid 376 377 when(s3_flush) { s3_valid := false.B } 378 .elsewhen(s2_fire) { s3_valid := !s2_flush } 379 .elsewhen(s3_fire) { s3_valid := false.B } 380 381 predictors.io.s3_fire := s3_fire 382 predictors.io.s3_redirect := s3_redirect 383 384 385 io.bpu_to_ftq.resp.valid := 386 s1_valid && s2_components_ready && s2_ready || 387 s2_fire && s2_redirect || 388 s3_fire && s3_redirect 389 io.bpu_to_ftq.resp.bits := BpuToFtqBundle(predictors.io.out.resp) 390 io.bpu_to_ftq.resp.bits.meta := predictors.io.out.last_stage_meta // TODO: change to lastStageMeta 391 io.bpu_to_ftq.resp.bits.s3.folded_hist := s3_folded_gh 392 io.bpu_to_ftq.resp.bits.s3.histPtr := s3_ghist_ptr 393 394 npcGen.register(true.B, s0_pc_reg, Some("stallPC"), 0) 395 foldedGhGen.register(true.B, s0_folded_gh_reg, Some("stallFGH"), 0) 396 ghistPtrGen.register(true.B, s0_ghist_ptr_reg, Some("stallGHPtr"), 0) 397 398 // History manage 399 // s1 400 val s1_possible_predicted_ghist_ptrs = (0 to numBr).map(s1_ghist_ptr - _.U) 401 val s1_predicted_ghist_ptr = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_ghist_ptrs) 402 403 val s1_possible_predicted_fhs = (0 to numBr).map(i => 404 s1_folded_gh.update(ghv, s1_ghist_ptr, i, resp.s1.brTaken && resp.s1.lastBrPosOH(i))) 405 val s1_predicted_fh = Mux1H(resp.s1.lastBrPosOH, s1_possible_predicted_fhs) 406 407 if (EnableGHistDiff) { 408 val s1_predicted_ghist = WireInit(getHist(s1_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 409 for (i <- 0 until numBr) { 410 when (resp.s1.shouldShiftVec(i)) { 411 s1_predicted_ghist(i) := resp.s1.brTaken && (i==0).B 412 } 413 } 414 when (s1_valid) { 415 s0_ghist := s1_predicted_ghist.asUInt 416 } 417 } 418 419 require(isPow2(HistoryLength)) 420 val s1_ghv_wens = (0 until HistoryLength).map(n => 421 (0 until numBr).map(b => (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b) && s1_valid)) 422 val s1_ghv_wdatas = (0 until HistoryLength).map(n => 423 Mux1H( 424 (0 until numBr).map(b => ( 425 (s1_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s1.shouldShiftVec(b), 426 resp.s1.brTaken && resp.s1.lastBrPosOH(b+1) 427 )) 428 ) 429 ) 430 431 XSError(!resp.s1.is_minimal, "s1 should be minimal!\n") 432 433 npcGen.register(s1_valid, resp.s1.getTarget, Some("s1_target"), 4) 434 foldedGhGen.register(s1_valid, s1_predicted_fh, Some("s1_FGH"), 4) 435 ghistPtrGen.register(s1_valid, s1_predicted_ghist_ptr, Some("s1_GHPtr"), 4) 436 ghvBitWriteGens.zip(s1_ghv_wens).zipWithIndex.map{case ((b, w), i) => 437 b.register(w.reduce(_||_), s1_ghv_wdatas(i), Some(s"s1_new_bit_$i"), 4) 438 } 439 440 def preds_needs_redirect_vec(x: BranchPredictionBundle, y: BranchPredictionBundle) = { 441 VecInit( 442 x.getTarget =/= y.getTarget, 443 x.lastBrPosOH.asUInt =/= y.lastBrPosOH.asUInt, 444 x.taken =/= y.taken, 445 (x.taken && y.taken) && x.cfiIndex.bits =/= y.cfiIndex.bits, 446 (!x.taken && !y.taken) && x.oversize =/= y.oversize 447 // x.shouldShiftVec.asUInt =/= y.shouldShiftVec.asUInt, 448 // x.brTaken =/= y.brTaken 449 ) 450 } 451 452 // s2 453 val s2_possible_predicted_ghist_ptrs = (0 to numBr).map(s2_ghist_ptr - _.U) 454 val s2_predicted_ghist_ptr = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_ghist_ptrs) 455 456 val s2_possible_predicted_fhs = (0 to numBr).map(i => 457 s2_folded_gh.update(ghv, s2_ghist_ptr, i, if (i > 0) resp.s2.full_pred.br_taken_mask(i-1) else false.B)) 458 val s2_predicted_fh = Mux1H(resp.s2.lastBrPosOH, s2_possible_predicted_fhs) 459 460 if (EnableGHistDiff) { 461 val s2_predicted_ghist = WireInit(getHist(s2_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 462 for (i <- 0 until numBr) { 463 when (resp.s2.shouldShiftVec(i)) { 464 s2_predicted_ghist(i) := resp.s2.brTaken && (i==0).B 465 } 466 } 467 when(s2_redirect) { 468 s0_ghist := s2_predicted_ghist.asUInt 469 } 470 } 471 472 val s2_ghv_wens = (0 until HistoryLength).map(n => 473 (0 until numBr).map(b => (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b) && s2_redirect)) 474 val s2_ghv_wdatas = (0 until HistoryLength).map(n => 475 Mux1H( 476 (0 until numBr).map(b => ( 477 (s2_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s2.shouldShiftVec(b), 478 resp.s2.full_pred.real_br_taken_mask()(b) 479 )) 480 ) 481 ) 482 483 val previous_s1_pred = RegEnable(resp.s1, init=0.U.asTypeOf(resp.s1), s1_fire) 484 485 val s2_redirect_s1_last_pred_vec = preds_needs_redirect_vec(previous_s1_pred, resp.s2) 486 487 s2_redirect := s2_fire && (s2_redirect_s1_last_pred_vec.reduce(_||_) || resp.s2.fallThruError) 488 489 XSError(resp.s2.is_minimal, "s2 should not be minimal!\n") 490 491 npcGen.register(s2_redirect, resp.s2.getTarget, Some("s2_target"), 5) 492 foldedGhGen.register(s2_redirect, s2_predicted_fh, Some("s2_FGH"), 5) 493 ghistPtrGen.register(s2_redirect, s2_predicted_ghist_ptr, Some("s2_GHPtr"), 5) 494 ghvBitWriteGens.zip(s2_ghv_wens).zipWithIndex.map{case ((b, w), i) => 495 b.register(w.reduce(_||_), s2_ghv_wdatas(i), Some(s"s2_new_bit_$i"), 5) 496 } 497 498 XSPerfAccumulate("s2_redirect_because_target_diff", s2_fire && s2_redirect_s1_last_pred_vec(0)) 499 XSPerfAccumulate("s2_redirect_because_branch_num_diff", s2_fire && s2_redirect_s1_last_pred_vec(1)) 500 XSPerfAccumulate("s2_redirect_because_direction_diff", s2_fire && s2_redirect_s1_last_pred_vec(2)) 501 XSPerfAccumulate("s2_redirect_because_cfi_idx_diff", s2_fire && s2_redirect_s1_last_pred_vec(3)) 502 // XSPerfAccumulate("s2_redirect_because_shouldShiftVec_diff", s2_fire && s2_redirect_s1_last_pred_vec(4)) 503 // XSPerfAccumulate("s2_redirect_because_brTaken_diff", s2_fire && s2_redirect_s1_last_pred_vec(5)) 504 XSPerfAccumulate("s2_redirect_because_fallThroughError", s2_fire && resp.s2.fallThruError) 505 506 XSPerfAccumulate("s2_redirect_when_taken", s2_redirect && resp.s2.taken && resp.s2.full_pred.hit) 507 XSPerfAccumulate("s2_redirect_when_not_taken", s2_redirect && !resp.s2.taken && resp.s2.full_pred.hit) 508 XSPerfAccumulate("s2_redirect_when_not_hit", s2_redirect && !resp.s2.full_pred.hit) 509 510 511 // s3 512 val s3_possible_predicted_ghist_ptrs = (0 to numBr).map(s3_ghist_ptr - _.U) 513 val s3_predicted_ghist_ptr = Mux1H(resp.s3.lastBrPosOH, s3_possible_predicted_ghist_ptrs) 514 515 val s3_possible_predicted_fhs = (0 to numBr).map(i => 516 s3_folded_gh.update(ghv, s3_ghist_ptr, i, if (i > 0) resp.s3.full_pred.br_taken_mask(i-1) else false.B)) 517 val s3_predicted_fh = Mux1H(resp.s3.lastBrPosOH, s3_possible_predicted_fhs) 518 519 if (EnableGHistDiff) { 520 val s3_predicted_ghist = WireInit(getHist(s3_predicted_ghist_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 521 for (i <- 0 until numBr) { 522 when (resp.s3.shouldShiftVec(i)) { 523 s3_predicted_ghist(i) := resp.s3.brTaken && (i==0).B 524 } 525 } 526 when(s3_redirect) { 527 s0_ghist := s3_predicted_ghist.asUInt 528 } 529 } 530 531 val s3_ghv_wens = (0 until HistoryLength).map(n => 532 (0 until numBr).map(b => (s3_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s3.shouldShiftVec(b) && s3_redirect)) 533 val s3_ghv_wdatas = (0 until HistoryLength).map(n => 534 Mux1H( 535 (0 until numBr).map(b => ( 536 (s3_ghist_ptr).value === n.U(log2Ceil(HistoryLength).W) + b.U && resp.s3.shouldShiftVec(b), 537 resp.s3.full_pred.real_br_taken_mask()(b) 538 )) 539 ) 540 ) 541 542 val previous_s2_pred = RegEnable(resp.s2, init=0.U.asTypeOf(resp.s2), s2_fire) 543 544 val s3_redirect_s2_last_pred_vec = preds_needs_redirect_vec(previous_s1_pred, resp.s2) 545 // TODO: 546 547 s3_redirect := s3_fire && !previous_s2_pred.fallThruError && ( 548 resp.s3.full_pred.real_br_taken_mask().asUInt =/= previous_s2_pred.full_pred.real_br_taken_mask().asUInt || 549 resp.s3.getTarget =/= previous_s2_pred.getTarget 550 ) 551 552 npcGen.register(s3_redirect, resp.s3.getTarget, Some("s3_target"), 3) 553 foldedGhGen.register(s3_redirect, s3_predicted_fh, Some("s3_FGH"), 3) 554 ghistPtrGen.register(s3_redirect, s3_predicted_ghist_ptr, Some("s3_GHPtr"), 3) 555 ghvBitWriteGens.zip(s3_ghv_wens).zipWithIndex.map{case ((b, w), i) => 556 b.register(w.reduce(_||_), s3_ghv_wdatas(i), Some(s"s3_new_bit_$i"), 3) 557 } 558 559 // Send signal tell Ftq override 560 val s2_ftq_idx = RegEnable(io.ftq_to_bpu.enq_ptr, s1_fire) 561 val s3_ftq_idx = RegEnable(s2_ftq_idx, s2_fire) 562 563 io.bpu_to_ftq.resp.bits.s1.valid := s1_fire && !s1_flush 564 io.bpu_to_ftq.resp.bits.s1.hasRedirect := false.B 565 io.bpu_to_ftq.resp.bits.s1.ftq_idx := DontCare 566 io.bpu_to_ftq.resp.bits.s2.valid := s2_fire && !s2_flush 567 io.bpu_to_ftq.resp.bits.s2.hasRedirect := s2_redirect 568 io.bpu_to_ftq.resp.bits.s2.ftq_idx := s2_ftq_idx 569 io.bpu_to_ftq.resp.bits.s3.valid := s3_fire && !s3_flush 570 io.bpu_to_ftq.resp.bits.s3.hasRedirect := s3_redirect 571 io.bpu_to_ftq.resp.bits.s3.ftq_idx := s3_ftq_idx 572 573 val redirect = io.ftq_to_bpu.redirect.bits 574 575 predictors.io.update := io.ftq_to_bpu.update 576 predictors.io.update.bits.ghist := getHist(io.ftq_to_bpu.update.bits.histPtr) 577 predictors.io.redirect := io.ftq_to_bpu.redirect 578 579 // Redirect logic 580 val shift = redirect.cfiUpdate.shift 581 val addIntoHist = redirect.cfiUpdate.addIntoHist 582 // TODO: remove these below 583 val shouldShiftVec = Mux(shift === 0.U, VecInit(0.U((1 << (log2Ceil(numBr) + 1)).W).asBools), VecInit((LowerMask(1.U << (shift-1.U))).asBools())) 584 // TODO end 585 586 val isBr = redirect.cfiUpdate.pd.isBr 587 val taken = redirect.cfiUpdate.taken 588 val real_br_taken_mask = (0 until numBr).map(i => shift === (i+1).U && taken && addIntoHist ) 589 590 val oldPtr = redirect.cfiUpdate.histPtr 591 val oldFh = redirect.cfiUpdate.folded_hist 592 val updated_ptr = oldPtr - shift 593 val updated_fh = VecInit((0 to numBr).map(i => oldFh.update(ghv, oldPtr, i, taken && addIntoHist)))(shift) 594 val redirect_ghv_wens = (0 until HistoryLength).map(n => 595 (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b) && io.ftq_to_bpu.redirect.valid)) 596 val redirect_ghv_wdatas = (0 until HistoryLength).map(n => 597 Mux1H( 598 (0 until numBr).map(b => oldPtr.value === (n.U(log2Ceil(HistoryLength).W) + b.U) && shouldShiftVec(b)), 599 real_br_taken_mask 600 ) 601 ) 602 603 if (EnableGHistDiff) { 604 val updated_ghist = WireInit(getHist(updated_ptr).asTypeOf(Vec(HistoryLength, Bool()))) 605 for (i <- 0 until numBr) { 606 when (shift >= (i+1).U) { 607 updated_ghist(i) := taken && addIntoHist && (i==0).B 608 } 609 } 610 when(io.ftq_to_bpu.redirect.valid) { 611 s0_ghist := updated_ghist.asUInt 612 } 613 } 614 615 616 // val updatedGh = oldGh.update(shift, taken && addIntoHist) 617 618 npcGen.register(io.ftq_to_bpu.redirect.valid, redirect.cfiUpdate.target, Some("redirect_target"), 2) 619 foldedGhGen.register(io.ftq_to_bpu.redirect.valid, updated_fh, Some("redirect_FGHT"), 2) 620 ghistPtrGen.register(io.ftq_to_bpu.redirect.valid, updated_ptr, Some("redirect_GHPtr"), 2) 621 ghvBitWriteGens.zip(redirect_ghv_wens).zipWithIndex.map{case ((b, w), i) => 622 b.register(w.reduce(_||_), redirect_ghv_wdatas(i), Some(s"redirect_new_bit_$i"), 2) 623 } 624 // no need to assign s0_last_pred 625 626 val need_reset = RegNext(reset.asBool) && !reset.asBool 627 628 // Reset 629 npcGen.register(need_reset, resetVector.U, Some("reset_pc"), 1) 630 foldedGhGen.register(need_reset, 0.U.asTypeOf(s0_folded_gh), Some("reset_FGH"), 1) 631 ghistPtrGen.register(need_reset, 0.U.asTypeOf(new CGHPtr), Some("reset_GHPtr"), 1) 632 633 s0_pc := npcGen() 634 s0_pc_reg := s0_pc 635 s0_folded_gh := foldedGhGen() 636 s0_ghist_ptr := ghistPtrGen() 637 (ghv_write_datas zip ghvBitWriteGens).map{case (wd, d) => wd := d()} 638 for (i <- 0 until HistoryLength) { 639 ghv_wens(i) := Seq(s1_ghv_wens, s2_ghv_wens, s3_ghv_wens, redirect_ghv_wens).map(_(i).reduce(_||_)).reduce(_||_) 640 when (ghv_wens(i)) { 641 ghv(i) := ghv_write_datas(i) 642 } 643 } 644 645 XSDebug(RegNext(reset.asBool) && !reset.asBool, "Reseting...\n") 646 XSDebug(io.ftq_to_bpu.update.valid, p"Update from ftq\n") 647 XSDebug(io.ftq_to_bpu.redirect.valid, p"Redirect from ftq\n") 648 649 XSDebug("[BP0] fire=%d pc=%x\n", s0_fire, s0_pc) 650 XSDebug("[BP1] v=%d r=%d cr=%d fire=%d flush=%d pc=%x\n", 651 s1_valid, s1_ready, s1_components_ready, s1_fire, s1_flush, s1_pc) 652 XSDebug("[BP2] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 653 s2_valid, s2_ready, s2_components_ready, s2_fire, s2_redirect, s2_flush, s2_pc) 654 XSDebug("[BP3] v=%d r=%d cr=%d fire=%d redirect=%d flush=%d pc=%x\n", 655 s3_valid, s3_ready, s3_components_ready, s3_fire, s3_redirect, s3_flush, s3_pc) 656 XSDebug("[FTQ] ready=%d\n", io.bpu_to_ftq.resp.ready) 657 XSDebug("resp.s1.target=%x\n", resp.s1.getTarget) 658 XSDebug("resp.s2.target=%x\n", resp.s2.getTarget) 659 // XSDebug("s0_ghist: %b\n", s0_ghist.predHist) 660 // XSDebug("s1_ghist: %b\n", s1_ghist.predHist) 661 // XSDebug("s2_ghist: %b\n", s2_ghist.predHist) 662 // XSDebug("s2_predicted_ghist: %b\n", s2_predicted_ghist.predHist) 663 XSDebug(p"s0_ghist_ptr: $s0_ghist_ptr\n") 664 XSDebug(p"s1_ghist_ptr: $s1_ghist_ptr\n") 665 XSDebug(p"s2_ghist_ptr: $s2_ghist_ptr\n") 666 XSDebug(p"s3_ghist_ptr: $s3_ghist_ptr\n") 667 668 io.ftq_to_bpu.update.bits.display(io.ftq_to_bpu.update.valid) 669 io.ftq_to_bpu.redirect.bits.display(io.ftq_to_bpu.redirect.valid) 670 671 672 XSPerfAccumulate("s2_redirect", s2_redirect) 673 XSPerfAccumulate("s3_redirect", s3_redirect) 674 675 val perfEvents = predictors.asInstanceOf[Composer].getPerfEvents 676 generatePerfEvent() 677} 678