1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.frontend 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import xiangshan._ 23import utils._ 24import utility._ 25import xiangshan.ExceptionNO._ 26 27class IBufPtr(implicit p: Parameters) extends CircularQueuePtr[IBufPtr]( 28 p => p(XSCoreParamsKey).IBufSize 29) { 30} 31 32class IBufInBankPtr(implicit p: Parameters) extends CircularQueuePtr[IBufInBankPtr]( 33 p => p(XSCoreParamsKey).IBufSize / p(XSCoreParamsKey).IBufNBank 34) { 35} 36 37class IBufBankPtr(implicit p: Parameters) extends CircularQueuePtr[IBufBankPtr]( 38 p => p(XSCoreParamsKey).IBufNBank 39) { 40} 41 42class IBufferIO(implicit p: Parameters) extends XSBundle { 43 val flush = Input(Bool()) 44 val ControlRedirect = Input(Bool()) 45 val ControlBTBMissBubble = Input(Bool()) 46 val TAGEMissBubble = Input(Bool()) 47 val SCMissBubble = Input(Bool()) 48 val ITTAGEMissBubble = Input(Bool()) 49 val RASMissBubble = Input(Bool()) 50 val MemVioRedirect = Input(Bool()) 51 val in = Flipped(DecoupledIO(new FetchToIBuffer)) 52 val out = Vec(DecodeWidth, DecoupledIO(new CtrlFlow)) 53 val full = Output(Bool()) 54 val decodeCanAccept = Input(Bool()) 55 val stallReason = new StallReasonIO(DecodeWidth) 56} 57 58class IBufEntry(implicit p: Parameters) extends XSBundle { 59 val inst = UInt(32.W) 60 val pc = UInt(VAddrBits.W) 61 val foldpc = UInt(MemPredPCWidth.W) 62 val pd = new PreDecodeInfo 63 val pred_taken = Bool() 64 val ftqPtr = new FtqPtr 65 val ftqOffset = UInt(log2Ceil(PredictWidth).W) 66 val exceptionType = UInt(ExceptionType.width.W) 67 val crossPageIPFFix = Bool() 68 val triggered = new TriggerCf 69 70 def fromFetch(fetch: FetchToIBuffer, i: Int): IBufEntry = { 71 inst := fetch.instrs(i) 72 pc := fetch.pc(i) 73 foldpc := fetch.foldpc(i) 74 pd := fetch.pd(i) 75 pred_taken := fetch.ftqOffset(i).valid 76 ftqPtr := fetch.ftqPtr 77 ftqOffset := fetch.ftqOffset(i).bits 78 exceptionType := fetch.exceptionType(i) 79 crossPageIPFFix := fetch.crossPageIPFFix(i) 80 triggered := fetch.triggered(i) 81 this 82 } 83 84 def toCtrlFlow: CtrlFlow = { 85 val cf = Wire(new CtrlFlow) 86 cf.instr := inst 87 cf.pc := pc 88 cf.foldpc := foldpc 89 cf.exceptionVec := 0.U.asTypeOf(ExceptionVec()) 90 cf.exceptionVec(instrPageFault) := exceptionType === ExceptionType.pf 91 cf.exceptionVec(instrGuestPageFault) := exceptionType === ExceptionType.gpf 92 cf.exceptionVec(instrAccessFault) := exceptionType === ExceptionType.af 93 cf.trigger := triggered 94 cf.pd := pd 95 cf.pred_taken := pred_taken 96 cf.crossPageIPFFix := crossPageIPFFix 97 cf.storeSetHit := DontCare 98 cf.waitForRobIdx := DontCare 99 cf.loadWaitBit := DontCare 100 cf.loadWaitStrict := DontCare 101 cf.ssid := DontCare 102 cf.ftqPtr := ftqPtr 103 cf.ftqOffset := ftqOffset 104 cf 105 } 106} 107 108class IBuffer(implicit p: Parameters) extends XSModule with HasCircularQueuePtrHelper with HasPerfEvents { 109 val io = IO(new IBufferIO) 110 111 // io alias 112 private val decodeCanAccept = io.decodeCanAccept 113 114 // Parameter Check 115 private val bankSize = IBufSize / IBufNBank 116 require(IBufSize % IBufNBank == 0, s"IBufNBank should divide IBufSize, IBufNBank: $IBufNBank, IBufSize: $IBufSize") 117 require(IBufNBank >= DecodeWidth, 118 s"IBufNBank should be equal or larger than DecodeWidth, IBufNBank: $IBufNBank, DecodeWidth: $DecodeWidth") 119 120 // IBuffer is organized as raw registers 121 // This is due to IBuffer is a huge queue, read & write port logic should be precisely controlled 122 // . + + E E E - . 123 // . + + E E E - . 124 // . . + E E E - . 125 // . . + E E E E - 126 // As shown above, + means enqueue, - means dequeue, E is current content 127 // When dequeue, read port is organized like a banked FIFO 128 // Dequeue reads no more than 1 entry from each bank sequentially, this can be exploit to reduce area 129 // Enqueue writes cannot benefit from this characteristic unless use a SRAM 130 // For detail see Enqueue and Dequeue below 131 private val ibuf: Vec[IBufEntry] = RegInit(VecInit.fill(IBufSize)(0.U.asTypeOf(new IBufEntry))) 132 private val bankedIBufView: Vec[Vec[IBufEntry]] = VecInit.tabulate(IBufNBank)( 133 bankID => VecInit.tabulate(bankSize)( 134 inBankOffset => ibuf(bankID + inBankOffset * IBufNBank) 135 ) 136 ) 137 138 139 // Bypass wire 140 private val bypassEntries = WireDefault(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry)))) 141 // Normal read wire 142 private val deqEntries = WireDefault(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry)))) 143 // Output register 144 private val outputEntries = RegInit(VecInit.fill(DecodeWidth)(0.U.asTypeOf(Valid(new IBufEntry)))) 145 private val outputEntriesValidNum = PriorityMuxDefault(outputEntries.map(_.valid).zip(Seq.range(1, DecodeWidth).map(_.U)).reverse.toSeq, 0.U) 146 147 // Between Bank 148 private val deqBankPtrVec: Vec[IBufBankPtr] = RegInit(VecInit.tabulate(DecodeWidth)(_.U.asTypeOf(new IBufBankPtr))) 149 private val deqBankPtr: IBufBankPtr = deqBankPtrVec(0) 150 private val deqBankPtrVecNext = Wire(deqBankPtrVec.cloneType) 151 // Inside Bank 152 private val deqInBankPtr: Vec[IBufInBankPtr] = RegInit(VecInit.fill(IBufNBank)(0.U.asTypeOf(new IBufInBankPtr))) 153 private val deqInBankPtrNext = Wire(deqInBankPtr.cloneType) 154 155 val deqPtr = RegInit(0.U.asTypeOf(new IBufPtr)) 156 val deqPtrNext = Wire(deqPtr.cloneType) 157 158 val enqPtrVec = RegInit(VecInit.tabulate(PredictWidth)(_.U.asTypeOf(new IBufPtr))) 159 val enqPtr = enqPtrVec(0) 160 161 val numTryEnq = WireDefault(0.U) 162 val numEnq = Mux(io.in.fire, numTryEnq, 0.U) 163 164 // empty and decode can accept insts 165 val useBypass = enqPtr === deqPtr && decodeCanAccept 166 167 // The number of decode accepted insts. 168 // Since decode promises accepting insts in order, use priority encoder to simplify the accumulation. 169 private val numOut = Wire(UInt(log2Ceil(DecodeWidth).W)) 170 private val numDeq = numOut 171 172 // counter current number of valid 173 val numValid = distanceBetween(enqPtr, deqPtr) 174 val numValidAfterDeq = numValid - numDeq 175 // counter next number of valid 176 val numValidNext = numValid + numEnq - numDeq 177 val allowEnq = RegInit(true.B) 178 val numFromFetch = Mux(io.in.valid, PopCount(io.in.bits.enqEnable), 0.U) 179 180 allowEnq := (IBufSize - PredictWidth).U >= numValidNext // Disable when almost full 181 182 val enqOffset = VecInit.tabulate(PredictWidth)(i => PopCount(io.in.bits.valid.asBools.take(i))) 183 val enqData = VecInit.tabulate(PredictWidth)(i => Wire(new IBufEntry).fromFetch(io.in.bits, i)) 184 185 val outputEntriesIsNotFull = !outputEntries(DecodeWidth-1).valid 186 when(decodeCanAccept) { 187 numOut := Mux(numValid >= DecodeWidth.U, DecodeWidth.U, numValid) 188 }.elsewhen(outputEntriesIsNotFull) { 189 numOut := Mux(numValid >= DecodeWidth.U - outputEntriesValidNum, DecodeWidth.U - outputEntriesValidNum, numValid) 190 }.otherwise { 191 numOut := 0.U 192 } 193 val numBypass = Wire(UInt(log2Ceil(DecodeWidth).W)) 194 // when using bypass, bypassed entries do not enqueue 195 when(useBypass) { 196 when(numFromFetch >= DecodeWidth.U) { 197 numTryEnq := numFromFetch - DecodeWidth.U 198 numBypass := DecodeWidth.U 199 } .otherwise { 200 numTryEnq := 0.U 201 numBypass := numFromFetch 202 } 203 } .otherwise { 204 numTryEnq := numFromFetch 205 numBypass := 0.U 206 } 207 208 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 209 // Bypass 210 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 211 bypassEntries.zipWithIndex.foreach { 212 case (entry, idx) => 213 // Select 214 val validOH = Range(0, PredictWidth).map { 215 i => 216 io.in.bits.valid(i) && 217 io.in.bits.enqEnable(i) && 218 enqOffset(i) === idx.asUInt 219 } // Should be OneHot 220 entry.valid := validOH.reduce(_ || _) && io.in.fire && !io.flush 221 entry.bits := Mux1H(validOH, enqData) 222 223 // Debug Assertion 224 XSError(io.in.valid && PopCount(validOH) > 1.asUInt, "validOH is not OneHot") 225 } 226 227 // => Decode Output 228 // clean register output 229 io.out zip outputEntries foreach { 230 case (io, reg) => 231 io.valid := reg.valid 232 io.bits := reg.bits.toCtrlFlow 233 } 234 (outputEntries zip bypassEntries).zipWithIndex.foreach { 235 case ((out, bypass), i) => 236 when(decodeCanAccept) { 237 when(useBypass && io.in.valid) { 238 out := bypass 239 }.otherwise { 240 out := deqEntries(i) 241 } 242 }.elsewhen(outputEntriesIsNotFull){ 243 out.valid := deqEntries(i).valid 244 out.bits := Mux(i.U < outputEntriesValidNum, out.bits, VecInit(deqEntries.take(i + 1).map(_.bits))(i.U - outputEntriesValidNum)) 245 } 246 } 247 248 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 249 // Enqueue 250 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 251 io.in.ready := allowEnq 252 // Data 253 ibuf.zipWithIndex.foreach { 254 case (entry, idx) => { 255 // Select 256 val validOH = Range(0, PredictWidth).map { 257 i => 258 val useBypassMatch = enqOffset(i) >= DecodeWidth.U && 259 enqPtrVec(enqOffset(i) - DecodeWidth.U).value === idx.asUInt 260 val normalMatch = enqPtrVec(enqOffset(i)).value === idx.asUInt 261 val m = Mux(useBypass, useBypassMatch, normalMatch) // when using bypass, bypassed entries do not enqueue 262 263 io.in.bits.valid(i) && io.in.bits.enqEnable(i) && m 264 } // Should be OneHot 265 val wen = validOH.reduce(_ || _) && io.in.fire && !io.flush 266 267 // Write port 268 // Each IBuffer entry has a PredictWidth -> 1 Mux 269 val writeEntry = Mux1H(validOH, enqData) 270 entry := Mux(wen, writeEntry, entry) 271 272 // Debug Assertion 273 XSError(io.in.valid && PopCount(validOH) > 1.asUInt, "validOH is not OneHot") 274 } 275 } 276 // Pointer maintenance 277 when (io.in.fire && !io.flush) { 278 enqPtrVec := VecInit(enqPtrVec.map(_ + numTryEnq)) 279 } 280 281 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 282 // Dequeue 283 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 284 val outputEntriesValidNumNext = Wire(UInt(log2Ceil(DecodeWidth).W)) 285 XSError(outputEntriesValidNumNext > DecodeWidth.U, "Ibuffer: outputEntriesValidNumNext > DecodeWidth.U") 286 val validVec = UIntToMask(outputEntriesValidNumNext(log2Ceil(DecodeWidth) - 1, 0), DecodeWidth) 287 when(decodeCanAccept) { 288 outputEntriesValidNumNext := Mux(useBypass, numBypass, numDeq) 289 }.elsewhen(outputEntriesIsNotFull) { 290 outputEntriesValidNumNext := outputEntriesValidNum + numDeq 291 }.otherwise { 292 outputEntriesValidNumNext := outputEntriesValidNum 293 } 294 // Data 295 // Read port 296 // 2-stage, IBufNBank * (bankSize -> 1) + IBufNBank -> 1 297 // Should be better than IBufSize -> 1 in area, with no significant latency increase 298 private val readStage1: Vec[IBufEntry] = VecInit.tabulate(IBufNBank)( 299 bankID => Mux1H(UIntToOH(deqInBankPtr(bankID).value), bankedIBufView(bankID)) 300 ) 301 for (i <- 0 until DecodeWidth) { 302 deqEntries(i).valid := validVec(i) 303 deqEntries(i).bits := Mux1H(UIntToOH(deqBankPtrVec(i).value), readStage1) 304 } 305 // Pointer maintenance 306 deqBankPtrVecNext := VecInit(deqBankPtrVec.map(_ + numDeq)) 307 deqPtrNext := deqPtr + numDeq 308 deqInBankPtrNext.zip(deqInBankPtr).zipWithIndex.foreach { 309 case ((ptrNext, ptr), idx) => { 310 // validVec[k] == bankValid[deqBankPtr + k] 311 // So bankValid[n] == validVec[n - deqBankPtr] 312 val validIdx = Mux(idx.asUInt >= deqBankPtr.value, 313 idx.asUInt - deqBankPtr.value, 314 ((idx + IBufNBank).asUInt - deqBankPtr.value)(log2Ceil(IBufNBank) - 1, 0) 315 )(log2Ceil(DecodeWidth) - 1, 0) 316 val bankAdvance = numOut > validIdx 317 ptrNext := Mux(bankAdvance , ptr + 1.U, ptr) 318 } 319 } 320 321 // Flush 322 when (io.flush) { 323 allowEnq := true.B 324 enqPtrVec := enqPtrVec.indices.map(_.U.asTypeOf(new IBufPtr)) 325 deqBankPtrVec := deqBankPtrVec.indices.map(_.U.asTypeOf(new IBufBankPtr)) 326 deqInBankPtr := VecInit.fill(IBufNBank)(0.U.asTypeOf(new IBufInBankPtr)) 327 deqPtr := 0.U.asTypeOf(new IBufPtr()) 328 outputEntries.foreach(_.valid := false.B) 329 }.otherwise { 330 deqPtr := deqPtrNext 331 deqInBankPtr := deqInBankPtrNext 332 deqBankPtrVec := deqBankPtrVecNext 333 } 334 io.full := !allowEnq 335 336 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 337 // TopDown 338 ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 339 val topdown_stage = RegInit(0.U.asTypeOf(new FrontendTopDownBundle)) 340 topdown_stage := io.in.bits.topdown_info 341 when(io.flush) { 342 when(io.ControlRedirect) { 343 when(io.ControlBTBMissBubble) { 344 topdown_stage.reasons(TopDownCounters.BTBMissBubble.id) := true.B 345 }.elsewhen(io.TAGEMissBubble) { 346 topdown_stage.reasons(TopDownCounters.TAGEMissBubble.id) := true.B 347 }.elsewhen(io.SCMissBubble) { 348 topdown_stage.reasons(TopDownCounters.SCMissBubble.id) := true.B 349 }.elsewhen(io.ITTAGEMissBubble) { 350 topdown_stage.reasons(TopDownCounters.ITTAGEMissBubble.id) := true.B 351 }.elsewhen(io.RASMissBubble) { 352 topdown_stage.reasons(TopDownCounters.RASMissBubble.id) := true.B 353 } 354 }.elsewhen(io.MemVioRedirect) { 355 topdown_stage.reasons(TopDownCounters.MemVioRedirectBubble.id) := true.B 356 }.otherwise { 357 topdown_stage.reasons(TopDownCounters.OtherRedirectBubble.id) := true.B 358 } 359 } 360 361 362 val dequeueInsufficient = Wire(Bool()) 363 val matchBubble = Wire(UInt(log2Up(TopDownCounters.NumStallReasons.id).W)) 364 val deqValidCount = PopCount(validVec.asBools) 365 val deqWasteCount = DecodeWidth.U - deqValidCount 366 dequeueInsufficient := deqValidCount < DecodeWidth.U 367 matchBubble := (TopDownCounters.NumStallReasons.id - 1).U - PriorityEncoder(topdown_stage.reasons.reverse) 368 369 io.stallReason.reason.map(_ := 0.U) 370 for (i <- 0 until DecodeWidth) { 371 when(i.U < deqWasteCount) { 372 io.stallReason.reason(DecodeWidth - i - 1) := matchBubble 373 } 374 } 375 376 when(!(deqWasteCount === DecodeWidth.U || topdown_stage.reasons.asUInt.orR)) { 377 // should set reason for FetchFragmentationStall 378 // topdown_stage.reasons(TopDownCounters.FetchFragmentationStall.id) := true.B 379 for (i <- 0 until DecodeWidth) { 380 when(i.U < deqWasteCount) { 381 io.stallReason.reason(DecodeWidth - i - 1) := TopDownCounters.FetchFragBubble.id.U 382 } 383 } 384 } 385 386 when(io.stallReason.backReason.valid) { 387 io.stallReason.reason.map(_ := io.stallReason.backReason.bits) 388 } 389 390 // Debug info 391 XSError( 392 deqPtr.value =/= deqBankPtr.value + deqInBankPtr(deqBankPtr.value).value * IBufNBank.asUInt, 393 "Dequeue PTR mismatch" 394 ) 395 XSError(isBefore(enqPtr, deqPtr) && !isFull(enqPtr, deqPtr), "\ndeqPtr is older than enqPtr!\n") 396 397 XSDebug(io.flush, "IBuffer Flushed\n") 398 399 when(io.in.fire) { 400 XSDebug("Enque:\n") 401 XSDebug(p"MASK=${Binary(io.in.bits.valid)}\n") 402 for(i <- 0 until PredictWidth){ 403 XSDebug(p"PC=${Hexadecimal(io.in.bits.pc(i))} ${Hexadecimal(io.in.bits.instrs(i))}\n") 404 } 405 } 406 407 for (i <- 0 until DecodeWidth) { 408 XSDebug(io.out(i).fire, 409 p"deq: ${Hexadecimal(io.out(i).bits.instr)} PC=${Hexadecimal(io.out(i).bits.pc)}" + 410 p"v=${io.out(i).valid} r=${io.out(i).ready} " + 411 p"excpVec=${Binary(io.out(i).bits.exceptionVec.asUInt)} crossPageIPF=${io.out(i).bits.crossPageIPFFix}\n") 412 } 413 414 XSDebug(p"numValid: ${numValid}\n") 415 XSDebug(p"EnqNum: ${numEnq}\n") 416 XSDebug(p"DeqNum: ${numDeq}\n") 417 418 val afterInit = RegInit(false.B) 419 val headBubble = RegInit(false.B) 420 when (io.in.fire) { afterInit := true.B } 421 when (io.flush) { 422 headBubble := true.B 423 } .elsewhen(numValid =/= 0.U) { 424 headBubble := false.B 425 } 426 val instrHungry = afterInit && (numValid === 0.U) && !headBubble 427 428 QueuePerf(IBufSize, numValid, !allowEnq) 429 XSPerfAccumulate("flush", io.flush) 430 XSPerfAccumulate("hungry", instrHungry) 431 432 val ibuffer_IDWidth_hvButNotFull = afterInit && (numValid =/= 0.U) && (numValid < DecodeWidth.U) && !headBubble 433 XSPerfAccumulate("ibuffer_IDWidth_hvButNotFull", ibuffer_IDWidth_hvButNotFull) 434 /* 435 XSPerfAccumulate("ICacheMissBubble", Mux(matchBubbleVec(TopDownCounters.ICacheMissBubble.id), deqWasteCount, 0.U)) 436 XSPerfAccumulate("ITLBMissBubble", Mux(matchBubbleVec(TopDownCounters.ITLBMissBubble.id), deqWasteCount, 0.U)) 437 XSPerfAccumulate("ControlRedirectBubble", Mux(matchBubbleVec(TopDownCounters.ControlRedirectBubble.id), deqWasteCount, 0.U)) 438 XSPerfAccumulate("MemVioRedirectBubble", Mux(matchBubbleVec(TopDownCounters.MemVioRedirectBubble.id), deqWasteCount, 0.U)) 439 XSPerfAccumulate("OtherRedirectBubble", Mux(matchBubbleVec(TopDownCounters.OtherRedirectBubble.id), deqWasteCount, 0.U)) 440 XSPerfAccumulate("BTBMissBubble", Mux(matchBubbleVec(TopDownCounters.BTBMissBubble.id), deqWasteCount, 0.U)) 441 XSPerfAccumulate("OverrideBubble", Mux(matchBubbleVec(TopDownCounters.OverrideBubble.id), deqWasteCount, 0.U)) 442 XSPerfAccumulate("FtqUpdateBubble", Mux(matchBubbleVec(TopDownCounters.FtqUpdateBubble.id), deqWasteCount, 0.U)) 443 XSPerfAccumulate("FtqFullStall", Mux(matchBubbleVec(TopDownCounters.FtqFullStall.id), deqWasteCount, 0.U)) 444 XSPerfAccumulate("FetchFragmentBubble", 445 Mux(deqWasteCount === DecodeWidth.U || topdown_stage.reasons.asUInt.orR, 0.U, deqWasteCount)) 446 XSPerfAccumulate("TAGEMissBubble", Mux(matchBubbleVec(TopDownCounters.TAGEMissBubble.id), deqWasteCount, 0.U)) 447 XSPerfAccumulate("SCMissBubble", Mux(matchBubbleVec(TopDownCounters.SCMissBubble.id), deqWasteCount, 0.U)) 448 XSPerfAccumulate("ITTAGEMissBubble", Mux(matchBubbleVec(TopDownCounters.ITTAGEMissBubble.id), deqWasteCount, 0.U)) 449 XSPerfAccumulate("RASMissBubble", Mux(matchBubbleVec(TopDownCounters.RASMissBubble.id), deqWasteCount, 0.U)) 450 */ 451 452 val perfEvents = Seq( 453 ("IBuffer_Flushed ", io.flush ), 454 ("IBuffer_hungry ", instrHungry ), 455 ("IBuffer_1_4_valid", (numValid > (0*(IBufSize/4)).U) & (numValid < (1*(IBufSize/4)).U) ), 456 ("IBuffer_2_4_valid", (numValid >= (1*(IBufSize/4)).U) & (numValid < (2*(IBufSize/4)).U) ), 457 ("IBuffer_3_4_valid", (numValid >= (2*(IBufSize/4)).U) & (numValid < (3*(IBufSize/4)).U) ), 458 ("IBuffer_4_4_valid", (numValid >= (3*(IBufSize/4)).U) & (numValid < (4*(IBufSize/4)).U) ), 459 ("IBuffer_full ", numValid.andR ), 460 ("Front_Bubble ", PopCount((0 until DecodeWidth).map(i => io.out(i).ready && !io.out(i).valid))) 461 ) 462 generatePerfEvent() 463} 464