1/*************************************************************************************** 2* Copyright (c) 2024 Beijing Institute of Open Source Chip (BOSC) 3* Copyright (c) 2020-2024 Institute of Computing Technology, Chinese Academy of Sciences 4* Copyright (c) 2020-2021 Peng Cheng Laboratory 5* 6* XiangShan is licensed under Mulan PSL v2. 7* You can use this software according to the terms and conditions of the Mulan PSL v2. 8* You may obtain a copy of Mulan PSL v2 at: 9* http://license.coscl.org.cn/MulanPSL2 10* 11* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 12* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 13* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 14* 15* See the Mulan PSL v2 for more details. 16***************************************************************************************/ 17 18package xiangshan.mem 19 20 21import org.chipsalliance.cde.config.Parameters 22import chisel3._ 23import chisel3.util._ 24import utility._ 25import utils._ 26import xiangshan._ 27import xiangshan.backend.Bundles.{DynInst, MemExuInput} 28import xiangshan.backend.rob.RobPtr 29import xiangshan.cache._ 30import xiangshan.backend.fu.FenceToSbuffer 31import xiangshan.cache.wpu.ReplayCarry 32import xiangshan.mem.prefetch.PrefetchReqBundle 33import math._ 34 35object genWmask { 36 def apply(addr: UInt, sizeEncode: UInt): UInt = { 37 (LookupTree(sizeEncode, List( 38 "b00".U -> 0x1.U, //0001 << addr(2:0) 39 "b01".U -> 0x3.U, //0011 40 "b10".U -> 0xf.U, //1111 41 "b11".U -> 0xff.U //11111111 42 )) << addr(2, 0)).asUInt 43 } 44} 45 46object genVWmask { 47 def apply(addr: UInt, sizeEncode: UInt): UInt = { 48 (LookupTree(sizeEncode, List( 49 "b00".U -> 0x1.U, //0001 << addr(2:0) 50 "b01".U -> 0x3.U, //0011 51 "b10".U -> 0xf.U, //1111 52 "b11".U -> 0xff.U //11111111 53 )) << addr(3, 0)).asUInt 54 } 55} 56 57object genBasemask { 58 /** 59 * 60 * @param addr 61 * @param sizeEncode 62 * @return Return 16-byte aligned mask. 63 * 64 * Example: 65 * Address: 0x80000003 Encoding size: ‘b11 66 * Return: 0xff 67 */ 68 def apply(addr: UInt, sizeEncode: UInt): UInt = { 69 LookupTree(sizeEncode, List( 70 "b00".U -> 0x1.U, 71 "b01".U -> 0x3.U, 72 "b10".U -> 0xf.U, 73 "b11".U -> 0xff.U 74 )) 75 } 76} 77 78object shiftDataToLow { 79 def apply(addr: UInt, data : UInt): UInt = { 80 Mux(addr(3), (data >> 64).asUInt, data) 81 } 82} 83object shiftMaskToLow { 84 def apply(addr: UInt, mask: UInt): UInt = { 85 Mux(addr(3), (mask >> 8).asUInt, mask) 86 } 87} 88object shiftDataToHigh { 89 def apply(addr: UInt, data : UInt): UInt = { 90 Mux(addr(3), (data << 64).asUInt, data) 91 } 92} 93object shiftMaskToHigh { 94 def apply(addr: UInt, mask: UInt): UInt = { 95 Mux(addr(3), (mask << 8).asUInt, mask) 96 } 97} 98 99class LsPipelineBundle(implicit p: Parameters) extends XSBundle 100 with HasDCacheParameters 101 with HasVLSUParameters { 102 val uop = new DynInst 103 val vaddr = UInt(VAddrBits.W) 104 // For exception vaddr generate 105 val fullva = UInt(XLEN.W) 106 val vaNeedExt = Bool() 107 val isHyper = Bool() 108 val paddr = UInt(PAddrBits.W) 109 val gpaddr = UInt(XLEN.W) 110 val isForVSnonLeafPTE = Bool() 111 // val func = UInt(6.W) 112 val mask = UInt((VLEN/8).W) 113 val data = UInt((VLEN+1).W) 114 val wlineflag = Bool() // store write the whole cache line 115 116 val miss = Bool() 117 val tlbMiss = Bool() 118 val ptwBack = Bool() 119 val af = Bool() 120 val nc = Bool() 121 val mmio = Bool() 122 val atomic = Bool() 123 124 val forwardMask = Vec(VLEN/8, Bool()) 125 val forwardData = Vec(VLEN/8, UInt(8.W)) 126 127 // prefetch 128 val isPrefetch = Bool() 129 val isHWPrefetch = Bool() 130 def isSWPrefetch = isPrefetch && !isHWPrefetch 131 132 // misalignBuffer 133 val isFrmMisAlignBuf = Bool() 134 135 // vector 136 val isvec = Bool() 137 val isLastElem = Bool() 138 val is128bit = Bool() 139 val uop_unit_stride_fof = Bool() 140 val usSecondInv = Bool() 141 val elemIdx = UInt(elemIdxBits.W) 142 val alignedType = UInt(alignTypeBits.W) 143 val mbIndex = UInt(max(vlmBindexBits, vsmBindexBits).W) 144 // val rob_idx_valid = Vec(2,Bool()) 145 // val inner_idx = Vec(2,UInt(3.W)) 146 // val rob_idx = Vec(2,new RobPtr) 147 val reg_offset = UInt(vOffsetBits.W) 148 val elemIdxInsideVd = UInt(elemIdxBits.W) 149 // val offset = Vec(2,UInt(4.W)) 150 val vecActive = Bool() // 1: vector active element or scala mem operation, 0: vector not active element 151 val is_first_ele = Bool() 152 val vecBaseVaddr = UInt(VAddrBits.W) 153 val vecVaddrOffset = UInt(VAddrBits.W) 154 val vecTriggerMask = UInt((VLEN/8).W) 155 // val flowPtr = new VlflowPtr() // VLFlowQueue ptr 156 // val sflowPtr = new VsFlowPtr() // VSFlowQueue ptr 157 158 // For debug usage 159 val isFirstIssue = Bool() 160 val hasROBEntry = Bool() 161 162 // For load replay 163 val isLoadReplay = Bool() 164 val isFastPath = Bool() 165 val isFastReplay = Bool() 166 val replayCarry = new ReplayCarry(nWays) 167 168 // For dcache miss load 169 val mshrid = UInt(log2Up(cfg.nMissEntries).W) 170 val handledByMSHR = Bool() 171 val replacementUpdated = Bool() 172 val missDbUpdated = Bool() 173 174 val forward_tlDchannel = Bool() 175 val dcacheRequireReplay = Bool() 176 val delayedLoadError = Bool() 177 val lateKill = Bool() 178 val feedbacked = Bool() 179 val ldCancel = ValidUndirectioned(UInt(log2Ceil(LoadPipelineWidth).W)) 180 // loadQueueReplay index. 181 val schedIndex = UInt(log2Up(LoadQueueReplaySize).W) 182 // hardware prefetch and fast replay no need to query tlb 183 val tlbNoQuery = Bool() 184 185 // misalign 186 val isMisalign = Bool() 187 val isFinalSplit = Bool() 188 val misalignWith16Byte = Bool() 189 val misalignNeedWakeUp = Bool() 190 val updateAddrValid = Bool() 191} 192 193class LdPrefetchTrainBundle(implicit p: Parameters) extends LsPipelineBundle { 194 val meta_prefetch = UInt(L1PfSourceBits.W) 195 val meta_access = Bool() 196 197 def fromLsPipelineBundle(input: LsPipelineBundle, latch: Boolean = false, enable: Bool = true.B) = { 198 if (latch) vaddr := RegEnable(input.vaddr, enable) else vaddr := input.vaddr 199 if (latch) fullva := RegEnable(input.fullva, enable) else fullva := input.fullva 200 if (latch) vaNeedExt := RegEnable(input.vaNeedExt, enable) else vaNeedExt := input.vaNeedExt 201 if (latch) isHyper := RegEnable(input.isHyper, enable) else isHyper := input.isHyper 202 if (latch) paddr := RegEnable(input.paddr, enable) else paddr := input.paddr 203 if (latch) gpaddr := RegEnable(input.gpaddr, enable) else gpaddr := input.gpaddr 204 if (latch) isForVSnonLeafPTE := RegEnable(input.isForVSnonLeafPTE, enable) else isForVSnonLeafPTE := input.isForVSnonLeafPTE 205 if (latch) mask := RegEnable(input.mask, enable) else mask := input.mask 206 if (latch) data := RegEnable(input.data, enable) else data := input.data 207 if (latch) uop := RegEnable(input.uop, enable) else uop := input.uop 208 if (latch) wlineflag := RegEnable(input.wlineflag, enable) else wlineflag := input.wlineflag 209 if (latch) miss := RegEnable(input.miss, enable) else miss := input.miss 210 if (latch) tlbMiss := RegEnable(input.tlbMiss, enable) else tlbMiss := input.tlbMiss 211 if (latch) ptwBack := RegEnable(input.ptwBack, enable) else ptwBack := input.ptwBack 212 if (latch) af := RegEnable(input.af, enable) else af := input.af 213 if (latch) nc := RegEnable(input.nc, enable) else nc := input.nc 214 if (latch) mmio := RegEnable(input.mmio, enable) else mmio := input.mmio 215 if (latch) forwardMask := RegEnable(input.forwardMask, enable) else forwardMask := input.forwardMask 216 if (latch) forwardData := RegEnable(input.forwardData, enable) else forwardData := input.forwardData 217 if (latch) isPrefetch := RegEnable(input.isPrefetch, enable) else isPrefetch := input.isPrefetch 218 if (latch) isHWPrefetch := RegEnable(input.isHWPrefetch, enable) else isHWPrefetch := input.isHWPrefetch 219 if (latch) isFrmMisAlignBuf := RegEnable(input.isFrmMisAlignBuf, enable) else isFrmMisAlignBuf := input.isFrmMisAlignBuf 220 if (latch) isFirstIssue := RegEnable(input.isFirstIssue, enable) else isFirstIssue := input.isFirstIssue 221 if (latch) hasROBEntry := RegEnable(input.hasROBEntry, enable) else hasROBEntry := input.hasROBEntry 222 if (latch) dcacheRequireReplay := RegEnable(input.dcacheRequireReplay, enable) else dcacheRequireReplay := input.dcacheRequireReplay 223 if (latch) schedIndex := RegEnable(input.schedIndex, enable) else schedIndex := input.schedIndex 224 if (latch) tlbNoQuery := RegEnable(input.tlbNoQuery, enable) else tlbNoQuery := input.tlbNoQuery 225 if (latch) isvec := RegEnable(input.isvec, enable) else isvec := input.isvec 226 if (latch) isLastElem := RegEnable(input.isLastElem, enable) else isLastElem := input.isLastElem 227 if (latch) is128bit := RegEnable(input.is128bit, enable) else is128bit := input.is128bit 228 if (latch) vecActive := RegEnable(input.vecActive, enable) else vecActive := input.vecActive 229 if (latch) is_first_ele := RegEnable(input.is_first_ele, enable) else is_first_ele := input.is_first_ele 230 if (latch) uop_unit_stride_fof := RegEnable(input.uop_unit_stride_fof, enable) else uop_unit_stride_fof := input.uop_unit_stride_fof 231 if (latch) usSecondInv := RegEnable(input.usSecondInv, enable) else usSecondInv := input.usSecondInv 232 if (latch) reg_offset := RegEnable(input.reg_offset, enable) else reg_offset := input.reg_offset 233 if (latch) elemIdx := RegEnable(input.elemIdx, enable) else elemIdx := input.elemIdx 234 if (latch) alignedType := RegEnable(input.alignedType, enable) else alignedType := input.alignedType 235 if (latch) mbIndex := RegEnable(input.mbIndex, enable) else mbIndex := input.mbIndex 236 if (latch) elemIdxInsideVd := RegEnable(input.elemIdxInsideVd, enable) else elemIdxInsideVd := input.elemIdxInsideVd 237 if (latch) vecBaseVaddr := RegEnable(input.vecBaseVaddr, enable) else vecBaseVaddr := input.vecBaseVaddr 238 if (latch) vecVaddrOffset := RegEnable(input.vecVaddrOffset, enable) else vecVaddrOffset := input.vecVaddrOffset 239 if (latch) vecTriggerMask := RegEnable(input.vecTriggerMask, enable) else vecTriggerMask := input.vecTriggerMask 240 // if (latch) flowPtr := RegEnable(input.flowPtr, enable) else flowPtr := input.flowPtr 241 // if (latch) sflowPtr := RegEnable(input.sflowPtr, enable) else sflowPtr := input.sflowPtr 242 243 meta_prefetch := DontCare 244 meta_access := DontCare 245 forward_tlDchannel := DontCare 246 mshrid := DontCare 247 replayCarry := DontCare 248 atomic := DontCare 249 isLoadReplay := DontCare 250 isFastPath := DontCare 251 isFastReplay := DontCare 252 handledByMSHR := DontCare 253 replacementUpdated := DontCare 254 missDbUpdated := DontCare 255 delayedLoadError := DontCare 256 lateKill := DontCare 257 feedbacked := DontCare 258 ldCancel := DontCare 259 } 260 261 def asPrefetchReqBundle(): PrefetchReqBundle = { 262 val res = Wire(new PrefetchReqBundle) 263 res.vaddr := this.vaddr 264 res.paddr := this.paddr 265 res.pc := this.uop.pc 266 res.miss := this.miss 267 res.pfHitStream := isFromStream(this.meta_prefetch) 268 269 res 270 } 271} 272 273class StPrefetchTrainBundle(implicit p: Parameters) extends LdPrefetchTrainBundle {} 274 275class LqWriteBundle(implicit p: Parameters) extends LsPipelineBundle { 276 // load inst replay informations 277 val rep_info = new LoadToLsqReplayIO 278 // queue entry data, except flag bits, will be updated if writeQueue is true, 279 // valid bit in LqWriteBundle will be ignored 280 val data_wen_dup = Vec(6, Bool()) // dirty reg dup 281 282 283 def fromLsPipelineBundle(input: LsPipelineBundle, latch: Boolean = false, enable: Bool = true.B) = { 284 if(latch) vaddr := RegEnable(input.vaddr, enable) else vaddr := input.vaddr 285 if(latch) fullva := RegEnable(input.fullva, enable) else fullva := input.fullva 286 if(latch) vaNeedExt := RegEnable(input.vaNeedExt, enable) else vaNeedExt := input.vaNeedExt 287 if(latch) isHyper := RegEnable(input.isHyper, enable) else isHyper := input.isHyper 288 if(latch) paddr := RegEnable(input.paddr, enable) else paddr := input.paddr 289 if(latch) gpaddr := RegEnable(input.gpaddr, enable) else gpaddr := input.gpaddr 290 if(latch) isForVSnonLeafPTE := RegEnable(input.isForVSnonLeafPTE, enable) else isForVSnonLeafPTE := input.isForVSnonLeafPTE 291 if(latch) mask := RegEnable(input.mask, enable) else mask := input.mask 292 if(latch) data := RegEnable(input.data, enable) else data := input.data 293 if(latch) uop := RegEnable(input.uop, enable) else uop := input.uop 294 if(latch) wlineflag := RegEnable(input.wlineflag, enable) else wlineflag := input.wlineflag 295 if(latch) miss := RegEnable(input.miss, enable) else miss := input.miss 296 if(latch) tlbMiss := RegEnable(input.tlbMiss, enable) else tlbMiss := input.tlbMiss 297 if(latch) ptwBack := RegEnable(input.ptwBack, enable) else ptwBack := input.ptwBack 298 if(latch) mmio := RegEnable(input.mmio, enable) else mmio := input.mmio 299 if(latch) atomic := RegEnable(input.atomic, enable) else atomic := input.atomic 300 if(latch) forwardMask := RegEnable(input.forwardMask, enable) else forwardMask := input.forwardMask 301 if(latch) forwardData := RegEnable(input.forwardData, enable) else forwardData := input.forwardData 302 if(latch) isPrefetch := RegEnable(input.isPrefetch, enable) else isPrefetch := input.isPrefetch 303 if(latch) isHWPrefetch := RegEnable(input.isHWPrefetch, enable) else isHWPrefetch := input.isHWPrefetch 304 if(latch) isFrmMisAlignBuf := RegEnable(input.isFrmMisAlignBuf, enable) else isFrmMisAlignBuf := input.isFrmMisAlignBuf 305 if(latch) isFirstIssue := RegEnable(input.isFirstIssue, enable) else isFirstIssue := input.isFirstIssue 306 if(latch) hasROBEntry := RegEnable(input.hasROBEntry, enable) else hasROBEntry := input.hasROBEntry 307 if(latch) isLoadReplay := RegEnable(input.isLoadReplay, enable) else isLoadReplay := input.isLoadReplay 308 if(latch) isFastPath := RegEnable(input.isFastPath, enable) else isFastPath := input.isFastPath 309 if(latch) isFastReplay := RegEnable(input.isFastReplay, enable) else isFastReplay := input.isFastReplay 310 if(latch) mshrid := RegEnable(input.mshrid, enable) else mshrid := input.mshrid 311 if(latch) forward_tlDchannel := RegEnable(input.forward_tlDchannel, enable) else forward_tlDchannel := input.forward_tlDchannel 312 if(latch) replayCarry := RegEnable(input.replayCarry, enable) else replayCarry := input.replayCarry 313 if(latch) dcacheRequireReplay := RegEnable(input.dcacheRequireReplay, enable) else dcacheRequireReplay := input.dcacheRequireReplay 314 if(latch) schedIndex := RegEnable(input.schedIndex, enable) else schedIndex := input.schedIndex 315 if(latch) handledByMSHR := RegEnable(input.handledByMSHR, enable) else handledByMSHR := input.handledByMSHR 316 if(latch) replacementUpdated := RegEnable(input.replacementUpdated, enable) else replacementUpdated := input.replacementUpdated 317 if(latch) missDbUpdated := RegEnable(input.missDbUpdated, enable) else missDbUpdated := input.missDbUpdated 318 if(latch) delayedLoadError := RegEnable(input.delayedLoadError, enable) else delayedLoadError := input.delayedLoadError 319 if(latch) lateKill := RegEnable(input.lateKill, enable) else lateKill := input.lateKill 320 if(latch) feedbacked := RegEnable(input.feedbacked, enable) else feedbacked := input.feedbacked 321 if(latch) isvec := RegEnable(input.isvec, enable) else isvec := input.isvec 322 if(latch) is128bit := RegEnable(input.is128bit, enable) else is128bit := input.is128bit 323 if(latch) vecActive := RegEnable(input.vecActive, enable) else vecActive := input.vecActive 324 if(latch) uop_unit_stride_fof := RegEnable(input.uop_unit_stride_fof, enable) else uop_unit_stride_fof := input.uop_unit_stride_fof 325 if(latch) reg_offset := RegEnable(input.reg_offset, enable) else reg_offset := input.reg_offset 326 if(latch) mbIndex := RegEnable(input.mbIndex, enable) else mbIndex := input.mbIndex 327 if(latch) elemIdxInsideVd := RegEnable(input.elemIdxInsideVd, enable) else elemIdxInsideVd := input.elemIdxInsideVd 328 329 rep_info := DontCare 330 data_wen_dup := DontCare 331 } 332} 333 334class SqWriteBundle(implicit p: Parameters) extends LsPipelineBundle { 335 val need_rep = Bool() 336} 337 338class LoadForwardQueryIO(implicit p: Parameters) extends XSBundle { 339 val vaddr = Output(UInt(VAddrBits.W)) 340 val paddr = Output(UInt(PAddrBits.W)) 341 val mask = Output(UInt((VLEN/8).W)) 342 val uop = Output(new DynInst) // for replay 343 val pc = Output(UInt(VAddrBits.W)) //for debug 344 val valid = Output(Bool()) 345 346 val forwardMaskFast = Input(Vec((VLEN/8), Bool())) // resp to load_s1 347 val forwardMask = Input(Vec((VLEN/8), Bool())) // resp to load_s2 348 val forwardData = Input(Vec((VLEN/8), UInt(8.W))) // resp to load_s2 349 350 // val lqIdx = Output(UInt(LoadQueueIdxWidth.W)) 351 val sqIdx = Output(new SqPtr) 352 353 // dataInvalid suggests store to load forward found forward should happen, 354 // but data is not available for now. If dataInvalid, load inst should 355 // be replayed from RS. Feedback type should be RSFeedbackType.dataInvalid 356 val dataInvalid = Input(Bool()) // Addr match, but data is not valid for now 357 358 // matchInvalid suggests in store to load forward logic, paddr cam result does 359 // to equal to vaddr cam result. If matchInvalid, a microarchitectural exception 360 // should be raised to flush SQ and committed sbuffer. 361 val matchInvalid = Input(Bool()) // resp to load_s2 362 363 // addrInvalid suggests store to load forward found forward should happen, 364 // but address (SSID) is not available for now. If addrInvalid, load inst should 365 // be replayed from RS. Feedback type should be RSFeedbackType.addrInvalid 366 val addrInvalid = Input(Bool()) 367} 368 369// LoadForwardQueryIO used in load pipeline 370// 371// Difference between PipeLoadForwardQueryIO and LoadForwardQueryIO: 372// PipeIO use predecoded sqIdxMask for better forward timing 373class PipeLoadForwardQueryIO(implicit p: Parameters) extends LoadForwardQueryIO { 374 // val sqIdx = Output(new SqPtr) // for debug, should not be used in pipeline for timing reasons 375 // sqIdxMask is calcuated in earlier stage for better timing 376 val sqIdxMask = Output(UInt(StoreQueueSize.W)) 377 378 // dataInvalid: addr match, but data is not valid for now 379 val dataInvalidFast = Input(Bool()) // resp to load_s1 380 // val dataInvalid = Input(Bool()) // resp to load_s2 381 val dataInvalidSqIdx = Input(new SqPtr) // resp to load_s2, sqIdx 382 val addrInvalidSqIdx = Input(new SqPtr) // resp to load_s2, sqIdx 383} 384 385// Query load queue for ld-ld violation 386// 387// Req should be send in load_s1 388// Resp will be generated 1 cycle later 389// 390// Note that query req may be !ready, as dcache is releasing a block 391// If it happens, a replay from rs is needed. 392class LoadNukeQueryReq(implicit p: Parameters) extends XSBundle { // provide lqIdx 393 val uop = new DynInst 394 // mask: load's data mask. 395 val mask = UInt((VLEN/8).W) 396 397 // paddr: load's paddr. 398 val paddr = UInt(PAddrBits.W) 399 // dataInvalid: load data is invalid. 400 val data_valid = Bool() 401 // nc: is NC access 402 val is_nc = Bool() 403} 404 405class LoadNukeQueryResp(implicit p: Parameters) extends XSBundle { 406 // rep_frm_fetch: ld-ld violation check success, replay from fetch. 407 val rep_frm_fetch = Bool() 408} 409 410class LoadNukeQueryIO(implicit p: Parameters) extends XSBundle { 411 val req = Decoupled(new LoadNukeQueryReq) 412 val resp = Flipped(Valid(new LoadNukeQueryResp)) 413 val revoke = Output(Bool()) 414} 415 416class StoreNukeQueryIO(implicit p: Parameters) extends XSBundle { 417 // robIdx: Requestor's (a store instruction) rob index for match logic. 418 val robIdx = new RobPtr 419 420 // paddr: requestor's (a store instruction) physical address for match logic. 421 val paddr = UInt(PAddrBits.W) 422 423 // mask: requestor's (a store instruction) data width mask for match logic. 424 val mask = UInt((VLEN/8).W) 425 426 // matchLine: if store is vector 128-bits, load unit need to compare 128-bits vaddr. 427 val matchLine = Bool() 428} 429 430class StoreMaBufToSqControlIO(implicit p: Parameters) extends XSBundle { 431 // from storeMisalignBuffer to storeQueue, control it's sbuffer write 432 val toStoreQueue = Output(new XSBundle { 433 // This entry is a cross page 434 val crossPageWithHit = Bool() 435 val crossPageCanDeq = Bool() 436 // High page Paddr 437 val paddr = UInt(PAddrBits.W) 438 439 val withSameUop = Bool() 440 }) 441 // from storeQueue to storeMisalignBuffer, provide detail info of this store 442 val toStoreMisalignBuffer = Input(new XSBundle { 443 val sqPtr = new SqPtr 444 val doDeq = Bool() 445 446 val uop = new DynInst() 447 }) 448} 449 450class StoreMaBufToVecStoreMergeBufferIO(implicit p: Parameters) extends VLSUBundle{ 451 val mbIndex = Output(UInt(vsmBindexBits.W)) 452 val flush = Output(Bool()) 453} 454 455// Store byte valid mask write bundle 456// 457// Store byte valid mask write to SQ takes 2 cycles 458class StoreMaskBundle(implicit p: Parameters) extends XSBundle { 459 val sqIdx = new SqPtr 460 val mask = UInt((VLEN/8).W) 461} 462 463class LoadDataFromDcacheBundle(implicit p: Parameters) extends DCacheBundle { 464 // old dcache: optimize data sram read fanout 465 // val bankedDcacheData = Vec(DCacheBanks, UInt(64.W)) 466 // val bank_oh = UInt(DCacheBanks.W) 467 468 // new dcache 469 val respDcacheData = UInt(VLEN.W) 470 val forwardMask = Vec(VLEN/8, Bool()) 471 val forwardData = Vec(VLEN/8, UInt(8.W)) 472 val uop = new DynInst // for data selection, only fwen and fuOpType are used 473 val addrOffset = UInt(4.W) // for data selection 474 475 // forward tilelink D channel 476 val forward_D = Bool() 477 val forwardData_D = Vec(VLEN/8, UInt(8.W)) 478 479 // forward mshr data 480 val forward_mshr = Bool() 481 val forwardData_mshr = Vec(VLEN/8, UInt(8.W)) 482 483 val forward_result_valid = Bool() 484 485 def mergeTLData(): UInt = { 486 // merge TL D or MSHR data at load s2 487 val dcache_data = respDcacheData 488 val use_D = forward_D && forward_result_valid 489 val use_mshr = forward_mshr && forward_result_valid 490 Mux( 491 use_D || use_mshr, 492 Mux( 493 use_D, 494 forwardData_D.asUInt, 495 forwardData_mshr.asUInt 496 ), 497 dcache_data 498 ) 499 } 500 501 def mergeLsqFwdData(dcacheData: UInt): UInt = { 502 // merge dcache and lsq forward data at load s3 503 val rdataVec = VecInit((0 until VLEN / 8).map(j => 504 Mux(forwardMask(j), forwardData(j), dcacheData(8*(j+1)-1, 8*j)) 505 )) 506 rdataVec.asUInt 507 } 508} 509 510// Load writeback data from load queue (refill) 511class LoadDataFromLQBundle(implicit p: Parameters) extends XSBundle { 512 val lqData = UInt(64.W) // load queue has merged data 513 val uop = new DynInst // for data selection, only fwen and fuOpType are used 514 val addrOffset = UInt(3.W) // for data selection 515 516 def mergedData(): UInt = { 517 lqData 518 } 519} 520 521// Bundle for load / store wait waking up 522class MemWaitUpdateReq(implicit p: Parameters) extends XSBundle { 523 val robIdx = Vec(backendParams.StaExuCnt, ValidIO(new RobPtr)) 524 val sqIdx = Vec(backendParams.StdCnt, ValidIO(new SqPtr)) 525} 526 527object AddPipelineReg { 528 class PipelineRegModule[T <: Data](gen: T) extends Module { 529 val io = IO(new Bundle() { 530 val in = Flipped(DecoupledIO(gen.cloneType)) 531 val out = DecoupledIO(gen.cloneType) 532 val isFlush = Input(Bool()) 533 }) 534 535 val valid = RegInit(false.B) 536 valid.suggestName("pipeline_reg_valid") 537 when (io.out.fire) { valid := false.B } 538 when (io.in.fire) { valid := true.B } 539 when (io.isFlush) { valid := false.B } 540 541 io.in.ready := !valid || io.out.ready 542 io.out.bits := RegEnable(io.in.bits, io.in.fire) 543 io.out.valid := valid //&& !isFlush 544 } 545 546 def apply[T <: Data] 547 (left: DecoupledIO[T], right: DecoupledIO[T], isFlush: Bool, 548 moduleName: Option[String] = None 549 ): Unit = { 550 val pipelineReg = Module(new PipelineRegModule[T](left.bits.cloneType)) 551 if(moduleName.nonEmpty) pipelineReg.suggestName(moduleName.get) 552 pipelineReg.io.in <> left 553 right <> pipelineReg.io.out 554 pipelineReg.io.isFlush := isFlush 555 } 556}