1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import freechips.rocketchip.diplomacy.{IdRange, LazyModule, LazyModuleImp, TransferSizes} 25import freechips.rocketchip.tilelink.{TLArbiter, TLBundleA, TLBundleD, TLClientNode, TLEdgeOut, TLMasterParameters, TLMasterPortParameters} 26import xiangshan._ 27import xiangshan.mem._ 28import xiangshan.mem.Bundles._ 29import coupledL2.{MemBackTypeMM, MemBackTypeMMField, MemPageTypeNC, MemPageTypeNCField} 30 31trait HasUncacheBufferParameters extends HasXSParameter with HasDCacheParameters { 32 33 def doMerge(oldData: UInt, oldMask: UInt, newData:UInt, newMask: UInt):(UInt, UInt) = { 34 val resData = VecInit((0 until DataBytes).map(j => 35 Mux(newMask(j), newData(8*(j+1)-1, 8*j), oldData(8*(j+1)-1, 8*j)) 36 )).asUInt 37 val resMask = newMask | oldMask 38 (resData, resMask) 39 } 40 41 def INDEX_WIDTH = log2Up(UncacheBufferSize) 42 def BLOCK_OFFSET = log2Up(XLEN / 8) 43 def getBlockAddr(x: UInt) = x >> BLOCK_OFFSET 44} 45 46abstract class UncacheBundle(implicit p: Parameters) extends XSBundle with HasUncacheBufferParameters 47 48abstract class UncacheModule(implicit p: Parameters) extends XSModule with HasUncacheBufferParameters 49 50 51class UncacheFlushBundle extends Bundle { 52 val valid = Output(Bool()) 53 val empty = Input(Bool()) 54} 55 56class UncacheEntry(implicit p: Parameters) extends UncacheBundle { 57 val cmd = UInt(M_SZ.W) 58 val addr = UInt(PAddrBits.W) 59 val vaddr = UInt(VAddrBits.W) 60 val data = UInt(XLEN.W) 61 val mask = UInt(DataBytes.W) 62 val nc = Bool() 63 val atomic = Bool() 64 val memBackTypeMM = Bool() 65 66 val resp_nderr = Bool() 67 68 /* NOTE: if it support the internal forward logic, here can uncomment */ 69 // val fwd_data = UInt(XLEN.W) 70 // val fwd_mask = UInt(DataBytes.W) 71 72 def set(x: UncacheWordReq): Unit = { 73 cmd := x.cmd 74 addr := x.addr 75 vaddr := x.vaddr 76 data := x.data 77 mask := x.mask 78 nc := x.nc 79 memBackTypeMM := x.memBackTypeMM 80 atomic := x.atomic 81 resp_nderr := false.B 82 // fwd_data := 0.U 83 // fwd_mask := 0.U 84 } 85 86 def update(x: UncacheWordReq): Unit = { 87 val (resData, resMask) = doMerge(data, mask, x.data, x.mask) 88 // mask -> get the first position as 1 -> for address align 89 val (resOffset, resFlag) = PriorityEncoderWithFlag(resMask) 90 data := resData 91 mask := resMask 92 when(resFlag){ 93 addr := (getBlockAddr(addr) << BLOCK_OFFSET) | resOffset 94 vaddr := (getBlockAddr(vaddr) << BLOCK_OFFSET) | resOffset 95 } 96 } 97 98 def update(x: TLBundleD): Unit = { 99 when(cmd === MemoryOpConstants.M_XRD) { 100 data := x.data 101 } 102 resp_nderr := x.denied || x.corrupt 103 } 104 105 // def update(forwardData: UInt, forwardMask: UInt): Unit = { 106 // fwd_data := forwardData 107 // fwd_mask := forwardMask 108 // } 109 110 def toUncacheWordResp(eid: UInt): UncacheWordResp = { 111 // val resp_fwd_data = VecInit((0 until DataBytes).map(j => 112 // Mux(fwd_mask(j), fwd_data(8*(j+1)-1, 8*j), data(8*(j+1)-1, 8*j)) 113 // )).asUInt 114 val resp_fwd_data = data 115 val r = Wire(new UncacheWordResp) 116 r := DontCare 117 r.data := resp_fwd_data 118 r.id := eid 119 r.nderr := resp_nderr 120 r.nc := nc 121 r.is2lq := cmd === MemoryOpConstants.M_XRD 122 r.miss := false.B 123 r.replay := false.B 124 r.tag_error := false.B 125 r.error := false.B 126 r 127 } 128} 129 130class UncacheEntryState(implicit p: Parameters) extends DCacheBundle { 131 // valid (-> waitSame) -> inflight -> waitReturn 132 val valid = Bool() 133 val inflight = Bool() // uncache -> L2 134 val waitSame = Bool() 135 val waitReturn = Bool() // uncache -> LSQ 136 137 def init: Unit = { 138 valid := false.B 139 inflight := false.B 140 waitSame := false.B 141 waitReturn := false.B 142 } 143 144 def isValid(): Bool = valid 145 def isInflight(): Bool = valid && inflight 146 def isWaitReturn(): Bool = valid && waitReturn 147 def isWaitSame(): Bool = valid && waitSame 148 def can2Bus(): Bool = valid && !inflight && !waitSame && !waitReturn 149 def can2Lsq(): Bool = valid && waitReturn 150 def canMerge(): Bool = valid && !inflight 151 def isFwdOld(): Bool = valid && (inflight || waitReturn) 152 def isFwdNew(): Bool = valid && !inflight && !waitReturn 153 154 def setValid(x: Bool): Unit = { valid := x} 155 def setInflight(x: Bool): Unit = { inflight := x} 156 def setWaitReturn(x: Bool): Unit = { waitReturn := x } 157 def setWaitSame(x: Bool): Unit = { waitSame := x} 158 159 def updateUncacheResp(): Unit = { 160 assert(inflight, "The request was not sent and a response was received") 161 inflight := false.B 162 waitReturn := true.B 163 } 164 def updateReturn(): Unit = { 165 valid := false.B 166 inflight := false.B 167 waitSame := false.B 168 waitReturn := false.B 169 } 170} 171 172class UncacheIO(implicit p: Parameters) extends DCacheBundle { 173 val hartId = Input(UInt()) 174 val enableOutstanding = Input(Bool()) 175 val flush = Flipped(new UncacheFlushBundle) 176 val lsq = Flipped(new UncacheWordIO) 177 val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO)) 178} 179 180// convert DCacheIO to TileLink 181// for Now, we only deal with TL-UL 182 183class Uncache()(implicit p: Parameters) extends LazyModule with HasXSParameter { 184 override def shouldBeInlined: Boolean = false 185 def idRange: Int = UncacheBufferSize 186 187 val clientParameters = TLMasterPortParameters.v1( 188 clients = Seq(TLMasterParameters.v1( 189 "uncache", 190 sourceId = IdRange(0, idRange) 191 )), 192 requestFields = Seq(MemBackTypeMMField(), MemPageTypeNCField()) 193 ) 194 val clientNode = TLClientNode(Seq(clientParameters)) 195 196 lazy val module = new UncacheImp(this) 197} 198 199/* Uncache Buffer */ 200class UncacheImp(outer: Uncache)extends LazyModuleImp(outer) 201 with HasTLDump 202 with HasXSParameter 203 with HasUncacheBufferParameters 204 with HasPerfEvents 205{ 206 println(s"Uncahe Buffer Size: $UncacheBufferSize entries") 207 val io = IO(new UncacheIO) 208 209 val (bus, edge) = outer.clientNode.out.head 210 211 val req = io.lsq.req 212 val resp = io.lsq.resp 213 val mem_acquire = bus.a 214 val mem_grant = bus.d 215 val req_ready = WireInit(false.B) 216 217 // assign default values to output signals 218 bus.b.ready := false.B 219 bus.c.valid := false.B 220 bus.c.bits := DontCare 221 bus.d.ready := false.B 222 bus.e.valid := false.B 223 bus.e.bits := DontCare 224 io.lsq.req.ready := req_ready 225 io.lsq.resp.valid := false.B 226 io.lsq.resp.bits := DontCare 227 228 229 /****************************************************************** 230 * Data Structure 231 ******************************************************************/ 232 233 val entries = Reg(Vec(UncacheBufferSize, new UncacheEntry)) 234 val states = RegInit(VecInit(Seq.fill(UncacheBufferSize)(0.U.asTypeOf(new UncacheEntryState)))) 235 val fence = RegInit(Bool(), false.B) 236 val s_idle :: s_inflight :: s_wait_return :: Nil = Enum(3) 237 val uState = RegInit(s_idle) 238 239 // drain buffer 240 val empty = Wire(Bool()) 241 val f1_needDrain = Wire(Bool()) 242 val do_uarch_drain = RegNext(f1_needDrain) 243 244 val q0_entry = Wire(new UncacheEntry) 245 val q0_canSentIdx = Wire(UInt(INDEX_WIDTH.W)) 246 val q0_canSent = Wire(Bool()) 247 248 249 /****************************************************************** 250 * Functions 251 ******************************************************************/ 252 def sizeMap[T <: Data](f: Int => T) = VecInit((0 until UncacheBufferSize).map(f)) 253 def sizeForeach[T <: Data](f: Int => Unit) = (0 until UncacheBufferSize).map(f) 254 def isStore(e: UncacheEntry): Bool = e.cmd === MemoryOpConstants.M_XWR 255 def isStore(x: UInt): Bool = x === MemoryOpConstants.M_XWR 256 def addrMatch(x: UncacheEntry, y: UncacheWordReq) : Bool = getBlockAddr(x.addr) === getBlockAddr(y.addr) 257 def addrMatch(x: UncacheWordReq, y: UncacheEntry) : Bool = getBlockAddr(x.addr) === getBlockAddr(y.addr) 258 def addrMatch(x: UncacheEntry, y: UncacheEntry) : Bool = getBlockAddr(x.addr) === getBlockAddr(y.addr) 259 def addrMatch(x: UInt, y: UInt) : Bool = getBlockAddr(x) === getBlockAddr(y) 260 261 def continueAndAlign(mask: UInt): Bool = { 262 val res = 263 PopCount(mask) === 1.U || 264 mask === 0b00000011.U || 265 mask === 0b00001100.U || 266 mask === 0b00110000.U || 267 mask === 0b11000000.U || 268 mask === 0b00001111.U || 269 mask === 0b11110000.U || 270 mask === 0b11111111.U 271 res 272 } 273 274 def canMergePrimary(x: UncacheWordReq, e: UncacheEntry, eid: UInt): Bool = { 275 // vaddr same, properties same 276 getBlockAddr(x.vaddr) === getBlockAddr(e.vaddr) && 277 x.cmd === e.cmd && x.nc && e.nc && 278 x.memBackTypeMM === e.memBackTypeMM && !x.atomic && !e.atomic && 279 continueAndAlign(x.mask | e.mask) && 280 // not receiving uncache response, not waitReturn -> no wake-up signal in these cases 281 !(mem_grant.fire && mem_grant.bits.source === eid || states(eid).isWaitReturn()) 282 } 283 284 def canMergeSecondary(eid: UInt): Bool = { 285 // old entry is not inflight and senting 286 states(eid).canMerge() && !(q0_canSent && q0_canSentIdx === eid) 287 } 288 289 /****************************************************************** 290 * uState for non-outstanding 291 ******************************************************************/ 292 293 switch(uState){ 294 is(s_idle){ 295 when(mem_acquire.fire){ 296 uState := s_inflight 297 } 298 } 299 is(s_inflight){ 300 when(mem_grant.fire){ 301 uState := s_wait_return 302 } 303 } 304 is(s_wait_return){ 305 when(resp.fire){ 306 uState := s_idle 307 } 308 } 309 } 310 311 312 /****************************************************************** 313 * Enter Buffer 314 * Version 0 (better timing) 315 * e0 judge: alloc/merge write vec 316 * e1 alloc 317 * 318 * Version 1 (better performance) 319 * e0: solved in one cycle for achieving the original performance. 320 * e1: return idResp to set sid for handshake 321 ******************************************************************/ 322 323 /* e0: merge/alloc */ 324 val e0_fire = req.fire 325 val e0_req_valid = req.valid 326 val e0_req = req.bits 327 328 val e0_rejectVec = Wire(Vec(UncacheBufferSize, Bool())) 329 val e0_mergeVec = Wire(Vec(UncacheBufferSize, Bool())) 330 val e0_allocWaitSameVec = Wire(Vec(UncacheBufferSize, Bool())) 331 sizeForeach(i => { 332 val valid = e0_req_valid && states(i).isValid() 333 val isAddrMatch = addrMatch(e0_req, entries(i)) 334 val canMerge1 = canMergePrimary(e0_req, entries(i), i.U) 335 val canMerge2 = canMergeSecondary(i.U) 336 e0_rejectVec(i) := valid && isAddrMatch && !canMerge1 337 e0_mergeVec(i) := valid && isAddrMatch && canMerge1 && canMerge2 338 e0_allocWaitSameVec(i) := valid && isAddrMatch && canMerge1 && !canMerge2 339 }) 340 assert(PopCount(e0_mergeVec) <= 1.U, "Uncache buffer should not merge multiple entries") 341 342 val e0_invalidVec = sizeMap(i => !states(i).isValid()) 343 val e0_reject = do_uarch_drain || !e0_invalidVec.asUInt.orR || e0_rejectVec.reduce(_ || _) 344 val (e0_mergeIdx, e0_canMerge) = PriorityEncoderWithFlag(e0_mergeVec) 345 val (e0_allocIdx, e0_canAlloc) = PriorityEncoderWithFlag(e0_invalidVec) 346 val e0_allocWaitSame = e0_allocWaitSameVec.reduce(_ || _) 347 val e0_sid = Mux(e0_canMerge, e0_mergeIdx, e0_allocIdx) 348 349 // e0_fire is used to guarantee that it will not be rejected 350 when(e0_canMerge && e0_fire){ 351 entries(e0_mergeIdx).update(e0_req) 352 }.elsewhen(e0_canAlloc && e0_fire){ 353 entries(e0_allocIdx).set(e0_req) 354 states(e0_allocIdx).setValid(true.B) 355 when(e0_allocWaitSame){ 356 states(e0_allocIdx).setWaitSame(true.B) 357 } 358 } 359 360 req_ready := !e0_reject 361 362 /* e1: return accept */ 363 io.lsq.idResp.valid := RegNext(e0_fire) 364 io.lsq.idResp.bits.mid := RegEnable(e0_req.id, e0_fire) 365 io.lsq.idResp.bits.sid := RegEnable(e0_sid, e0_fire) 366 io.lsq.idResp.bits.is2lq := RegEnable(!isStore(e0_req.cmd), e0_fire) 367 io.lsq.idResp.bits.nc := RegEnable(e0_req.nc, e0_fire) 368 369 /****************************************************************** 370 * Uncache Req 371 * Version 0 (better timing) 372 * q0: choose which one is sent 373 * q0: sent 374 * 375 * Version 1 (better performance) 376 * solved in one cycle for achieving the original performance. 377 * NOTE: "Enter Buffer" & "Uncache Req" not a continuous pipeline, 378 * because there is no guarantee that mem_aquire will be always ready. 379 ******************************************************************/ 380 381 val q0_canSentVec = sizeMap(i => 382 (io.enableOutstanding || uState === s_idle) && 383 states(i).can2Bus() 384 ) 385 val q0_res = PriorityEncoderWithFlag(q0_canSentVec) 386 q0_canSentIdx := q0_res._1 387 q0_canSent := q0_res._2 388 q0_entry := entries(q0_canSentIdx) 389 390 val size = PopCount(q0_entry.mask) 391 val (lgSize, legal) = PriorityMuxWithFlag(Seq( 392 1.U -> 0.U, 393 2.U -> 1.U, 394 4.U -> 2.U, 395 8.U -> 3.U 396 ).map(m => (size===m._1) -> m._2)) 397 assert(!(q0_canSent && !legal)) 398 399 val q0_load = edge.Get( 400 fromSource = q0_canSentIdx, 401 toAddress = q0_entry.addr, 402 lgSize = lgSize 403 )._2 404 405 val q0_store = edge.Put( 406 fromSource = q0_canSentIdx, 407 toAddress = q0_entry.addr, 408 lgSize = lgSize, 409 data = q0_entry.data, 410 mask = q0_entry.mask 411 )._2 412 413 val q0_isStore = q0_entry.cmd === MemoryOpConstants.M_XWR 414 415 mem_acquire.valid := q0_canSent 416 mem_acquire.bits := Mux(q0_isStore, q0_store, q0_load) 417 mem_acquire.bits.user.lift(MemBackTypeMM).foreach(_ := q0_entry.memBackTypeMM) 418 mem_acquire.bits.user.lift(MemPageTypeNC).foreach(_ := q0_entry.nc) 419 when(mem_acquire.fire){ 420 states(q0_canSentIdx).setInflight(true.B) 421 422 // q0 should judge whether wait same block 423 (0 until UncacheBufferSize).map(j => 424 when(states(j).isValid() && !states(j).isWaitReturn() && addrMatch(q0_entry, entries(j))){ 425 states(j).setWaitSame(true.B) 426 } 427 ) 428 } 429 430 431 /****************************************************************** 432 * Uncache Resp 433 ******************************************************************/ 434 435 val (_, _, refill_done, _) = edge.addr_inc(mem_grant) 436 437 mem_grant.ready := true.B 438 when (mem_grant.fire) { 439 val id = mem_grant.bits.source 440 entries(id).update(mem_grant.bits) 441 states(id).updateUncacheResp() 442 assert(refill_done, "Uncache response should be one beat only!") 443 444 // remove state of wait same block 445 (0 until UncacheBufferSize).map(j => 446 when(states(j).isValid() && states(j).isWaitSame() && addrMatch(entries(id), entries(j))){ 447 states(j).setWaitSame(false.B) 448 } 449 ) 450 } 451 452 453 /****************************************************************** 454 * Return to LSQ 455 ******************************************************************/ 456 457 val r0_canSentVec = sizeMap(i => states(i).can2Lsq()) 458 val (r0_canSentIdx, r0_canSent) = PriorityEncoderWithFlag(r0_canSentVec) 459 resp.valid := r0_canSent 460 resp.bits := entries(r0_canSentIdx).toUncacheWordResp(r0_canSentIdx) 461 when(resp.fire){ 462 states(r0_canSentIdx).updateReturn() 463 } 464 465 466 /****************************************************************** 467 * Buffer Flush 468 * 1. when io.flush.valid is true: drain store queue and ubuffer 469 * 2. when io.lsq.req.bits.atomic is true: not support temporarily 470 ******************************************************************/ 471 empty := !VecInit(states.map(_.isValid())).asUInt.orR 472 io.flush.empty := empty 473 474 475 /****************************************************************** 476 * Load Data Forward to loadunit 477 * f0: vaddr match, fast resp 478 * f1: mask & data select, merge; paddr match; resp 479 * NOTE: forward.paddr from dtlb, which is far from uncache f0 480 ******************************************************************/ 481 482 val f0_validMask = sizeMap(i => isStore(entries(i)) && states(i).isValid()) 483 val f0_fwdMaskCandidates = VecInit(entries.map(e => e.mask)) 484 val f0_fwdDataCandidates = VecInit(entries.map(e => e.data)) 485 val f1_fwdMaskCandidates = sizeMap(i => RegEnable(entries(i).mask, f0_validMask(i))) 486 val f1_fwdDataCandidates = sizeMap(i => RegEnable(entries(i).data, f0_validMask(i))) 487 val f1_tagMismatchVec = Wire(Vec(LoadPipelineWidth, Bool())) 488 f1_needDrain := f1_tagMismatchVec.asUInt.orR && !empty 489 490 for ((forward, i) <- io.forward.zipWithIndex) { 491 val f0_fwdValid = forward.valid 492 val f1_fwdValid = RegNext(f0_fwdValid) 493 494 /* f0 */ 495 // vaddr match 496 val f0_vtagMatches = sizeMap(w => addrMatch(entries(w).vaddr, forward.vaddr)) 497 val f0_flyTagMatches = sizeMap(w => f0_vtagMatches(w) && f0_validMask(w) && f0_fwdValid && states(i).isFwdOld) 498 val f0_idleTagMatches = sizeMap(w => f0_vtagMatches(w) && f0_validMask(w) && f0_fwdValid && states(i).isFwdNew) 499 // ONLY for fast use to get better timing 500 val f0_flyMaskFast = shiftMaskToHigh( 501 forward.vaddr, 502 Mux1H(f0_flyTagMatches, f0_fwdMaskCandidates) 503 ).asTypeOf(Vec(VDataBytes, Bool())) 504 val f0_idleMaskFast = shiftMaskToHigh( 505 forward.vaddr, 506 Mux1H(f0_idleTagMatches, f0_fwdMaskCandidates) 507 ).asTypeOf(Vec(VDataBytes, Bool())) 508 509 /* f1 */ 510 val f1_flyTagMatches = RegEnable(f0_flyTagMatches, f0_fwdValid) 511 val f1_idleTagMatches = RegEnable(f0_idleTagMatches, f0_fwdValid) 512 val f1_fwdPAddr = RegEnable(forward.paddr, f0_fwdValid) 513 // select 514 val f1_flyMask = Mux1H(f1_flyTagMatches, f1_fwdMaskCandidates) 515 val f1_flyData = Mux1H(f1_flyTagMatches, f1_fwdDataCandidates) 516 val f1_idleMask = Mux1H(f1_idleTagMatches, f1_fwdMaskCandidates) 517 val f1_idleData = Mux1H(f1_idleTagMatches, f1_fwdDataCandidates) 518 // merge old(inflight) and new(idle) 519 val (f1_fwdDataTmp, f1_fwdMaskTmp) = doMerge(f1_flyData, f1_flyMask, f1_idleData, f1_idleMask) 520 val f1_fwdMask = shiftMaskToHigh(f1_fwdPAddr, f1_fwdMaskTmp).asTypeOf(Vec(VDataBytes, Bool())) 521 val f1_fwdData = shiftDataToHigh(f1_fwdPAddr, f1_fwdDataTmp).asTypeOf(Vec(VDataBytes, UInt(8.W))) 522 // paddr match and mismatch judge 523 val f1_ptagMatches = sizeMap(w => addrMatch(RegEnable(entries(w).addr, f0_fwdValid), f1_fwdPAddr)) 524 f1_tagMismatchVec(i) := sizeMap(w => 525 RegEnable(f0_vtagMatches(w), f0_fwdValid) =/= f1_ptagMatches(w) && RegEnable(f0_validMask(w), f0_fwdValid) && f1_fwdValid 526 ).asUInt.orR 527 when(f1_tagMismatchVec(i)) { 528 XSDebug("forward tag mismatch: pmatch %x vmatch %x vaddr %x paddr %x\n", 529 f1_ptagMatches.asUInt, 530 RegEnable(f0_vtagMatches.asUInt, f0_fwdValid), 531 RegEnable(forward.vaddr, f0_fwdValid), 532 RegEnable(forward.paddr, f0_fwdValid) 533 ) 534 } 535 // response 536 forward.addrInvalid := false.B // addr in ubuffer is always ready 537 forward.dataInvalid := false.B // data in ubuffer is always ready 538 forward.matchInvalid := f1_tagMismatchVec(i) // paddr / vaddr cam result does not match 539 for (j <- 0 until VDataBytes) { 540 forward.forwardMaskFast(j) := f0_flyMaskFast(j) || f0_idleMaskFast(j) 541 542 forward.forwardData(j) := f1_fwdData(j) 543 forward.forwardMask(j) := false.B 544 when(f1_fwdMask(j) && f1_fwdValid) { 545 forward.forwardMask(j) := true.B 546 } 547 } 548 549 } 550 551 552 /****************************************************************** 553 * Debug / Performance 554 ******************************************************************/ 555 556 /* Debug Counters */ 557 // print all input/output requests for debug purpose 558 // print req/resp 559 XSDebug(req.fire, "req cmd: %x addr: %x data: %x mask: %x\n", 560 req.bits.cmd, req.bits.addr, req.bits.data, req.bits.mask) 561 XSDebug(resp.fire, "data: %x\n", req.bits.data) 562 // print tilelink messages 563 XSDebug(mem_acquire.valid, "mem_acquire valid, ready=%d ", mem_acquire.ready) 564 mem_acquire.bits.dump(mem_acquire.valid) 565 566 XSDebug(mem_grant.fire, "mem_grant fire ") 567 mem_grant.bits.dump(mem_grant.fire) 568 569 /* Performance Counters */ 570 XSPerfAccumulate("e0_reject", e0_reject && e0_req_valid) 571 XSPerfAccumulate("e0_total_enter", e0_fire) 572 XSPerfAccumulate("e0_merge", e0_fire && e0_canMerge) 573 XSPerfAccumulate("e0_alloc_simple", e0_fire && e0_canAlloc && !e0_allocWaitSame) 574 XSPerfAccumulate("e0_alloc_wait_same", e0_fire && e0_canAlloc && e0_allocWaitSame) 575 XSPerfAccumulate("q0_acquire", q0_canSent) 576 XSPerfAccumulate("q0_acquire_store", q0_canSent && q0_isStore) 577 XSPerfAccumulate("q0_acquire_load", q0_canSent && !q0_isStore) 578 XSPerfAccumulate("uncache_memBackTypeMM", io.lsq.req.fire && io.lsq.req.bits.memBackTypeMM) 579 XSPerfAccumulate("uncache_mmio_store", io.lsq.req.fire && isStore(io.lsq.req.bits.cmd) && !io.lsq.req.bits.nc) 580 XSPerfAccumulate("uncache_mmio_load", io.lsq.req.fire && !isStore(io.lsq.req.bits.cmd) && !io.lsq.req.bits.nc) 581 XSPerfAccumulate("uncache_nc_store", io.lsq.req.fire && isStore(io.lsq.req.bits.cmd) && io.lsq.req.bits.nc) 582 XSPerfAccumulate("uncache_nc_load", io.lsq.req.fire && !isStore(io.lsq.req.bits.cmd) && io.lsq.req.bits.nc) 583 XSPerfAccumulate("uncache_outstanding", uState =/= s_idle && mem_acquire.fire) 584 XSPerfAccumulate("forward_count", PopCount(io.forward.map(_.forwardMask.asUInt.orR))) 585 XSPerfAccumulate("forward_vaddr_match_failed", PopCount(f1_tagMismatchVec)) 586 587 val perfEvents = Seq( 588 ("uncache_mmio_store", io.lsq.req.fire && isStore(io.lsq.req.bits.cmd) && !io.lsq.req.bits.nc), 589 ("uncache_mmio_load", io.lsq.req.fire && !isStore(io.lsq.req.bits.cmd) && !io.lsq.req.bits.nc), 590 ("uncache_nc_store", io.lsq.req.fire && isStore(io.lsq.req.bits.cmd) && io.lsq.req.bits.nc), 591 ("uncache_nc_load", io.lsq.req.fire && !isStore(io.lsq.req.bits.cmd) && io.lsq.req.bits.nc), 592 ("uncache_outstanding", uState =/= s_idle && mem_acquire.fire), 593 ("forward_count", PopCount(io.forward.map(_.forwardMask.asUInt.orR))), 594 ("forward_vaddr_match_failed", PopCount(f1_tagMismatchVec)) 595 ) 596 597 generatePerfEvent() 598 // End 599} 600