1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan.ExceptionNO._ 25import xiangshan._ 26import xiangshan.backend.rob.RobPtr 27import xiangshan.backend.Bundles._ 28import xiangshan.mem._ 29import xiangshan.backend.fu.vector.Bundles._ 30import xiangshan.backend.fu.FuConfig._ 31 32 33class VSplitPipeline(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 34 val io = IO(new VSplitPipelineIO(isVStore)) 35 // will be override later 36 def us_whole_reg(fuOpType: UInt): Bool = false.B 37 def us_mask(fuOpType: UInt): Bool = false.B 38 def us_fof(fuOpType: UInt): Bool = false.B 39 //TODO vdIdxReg should no longer be useful, don't delete it for now 40 val vdIdxReg = RegInit(0.U(3.W)) 41 42 val s1_ready = WireInit(false.B) 43 io.in.ready := s1_ready 44 45 /**----------------------------------------------------------- 46 * s0 stage 47 * decode and generate AlignedType, uop mask, preIsSplit 48 * ---------------------------------------------------------- 49 */ 50 val s0_uop = io.in.bits.uop 51 val s0_vtype = s0_uop.vpu.vtype 52 val s0_sew = s0_vtype.vsew 53 val s0_eew = s0_uop.vpu.veew 54 val s0_lmul = s0_vtype.vlmul 55 // when load whole register or unit-stride masked , emul should be 1 56 val s0_fuOpType = s0_uop.fuOpType 57 val s0_mop = s0_fuOpType(6, 5) 58 val s0_nf = Mux(us_whole_reg(s0_fuOpType), 0.U, s0_uop.vpu.nf) 59 val s0_vm = s0_uop.vpu.vm 60 val s0_emul = Mux(us_whole_reg(s0_fuOpType) ,GenUSWholeEmul(s0_uop.vpu.nf), Mux(us_mask(s0_fuOpType), 0.U(mulBits.W), EewLog2(s0_eew) - s0_sew + s0_lmul)) 61 val s0_preIsSplit = !(isUnitStride(s0_mop) && !us_fof(s0_fuOpType)) 62 val s0_nfield = s0_nf +& 1.U 63 64 val s0_valid = Wire(Bool()) 65 val s0_kill = io.in.bits.uop.robIdx.needFlush(io.redirect) 66 val s0_can_go = s1_ready 67 val s0_fire = s0_valid && s0_can_go 68 val s0_out = Wire(new VLSBundle(isVStore)) 69 70 val isUsWholeReg = isUnitStride(s0_mop) && us_whole_reg(s0_fuOpType) 71 val isMaskReg = isUnitStride(s0_mop) && us_mask(s0_fuOpType) 72 val isSegment = s0_nf =/= 0.U && !us_whole_reg(s0_fuOpType) 73 val instType = Cat(isSegment, s0_mop) 74 val uopIdx = io.in.bits.uop.vpu.vuopIdx 75 val uopIdxInField = GenUopIdxInField(instType, s0_emul, s0_lmul, uopIdx) 76 val vdIdxInField = GenVdIdxInField(instType, s0_emul, s0_lmul, uopIdxInField) 77 val lmulLog2 = Mux(s0_lmul.asSInt >= 0.S, 0.U, s0_lmul) 78 val emulLog2 = Mux(s0_emul.asSInt >= 0.S, 0.U, s0_emul) 79 val numEewLog2 = emulLog2 - EewLog2(s0_eew) 80 val numSewLog2 = lmulLog2 - s0_sew 81 val numFlowsSameVdLog2 = Mux( 82 isIndexed(instType), 83 log2Up(VLENB).U - s0_sew(1,0), 84 log2Up(VLENB).U - s0_eew 85 ) 86 // numUops = nf * max(lmul, emul) 87 val lmulLog2Pos = Mux(s0_lmul.asSInt < 0.S, 0.U, s0_lmul) 88 val emulLog2Pos = Mux(s0_emul.asSInt < 0.S, 0.U, s0_emul) 89 val numUops = Mux( 90 isIndexed(s0_mop) && s0_lmul.asSInt > s0_emul.asSInt, 91 (s0_nf +& 1.U) << lmulLog2Pos, 92 (s0_nf +& 1.U) << emulLog2Pos 93 ) 94 95 val vvl = io.in.bits.src_vl.asTypeOf(VConfig()).vl 96 val evl = Mux(isUsWholeReg, 97 GenUSWholeRegVL(io.in.bits.uop.vpu.nf +& 1.U, s0_eew), 98 Mux(isMaskReg, 99 GenUSMaskRegVL(vvl), 100 vvl)) 101 val vvstart = io.in.bits.uop.vpu.vstart 102 val alignedType = Mux(isIndexed(instType), s0_sew(1, 0), s0_eew) 103 val broadenAligendType = Mux(s0_preIsSplit, Cat("b0".U, alignedType), "b100".U) // if is unit-stride, use 128-bits memory access 104 val flowsLog2 = GenRealFlowLog2(instType, s0_emul, s0_lmul, s0_eew, s0_sew) 105 val flowsPrevThisUop = (uopIdxInField << flowsLog2).asUInt // # of flows before this uop in a field 106 val flowsPrevThisVd = (vdIdxInField << numFlowsSameVdLog2).asUInt // # of flows before this vd in a field 107 val flowsIncludeThisUop = ((uopIdxInField +& 1.U) << flowsLog2).asUInt // # of flows before this uop besides this uop 108 val flowNum = io.in.bits.flowNum.get 109 // max index in vd, only use in index instructions for calculate index 110 val maxIdxInVdIndex = GenVLMAX(Mux(s0_emul.asSInt > 0.S, 0.U, s0_emul), s0_eew) 111 val indexVlMaxInVd = GenVlMaxMask(maxIdxInVdIndex, elemIdxBits) 112 113 // For vectore indexed instructions: 114 // When emul is greater than lmul, multiple uop correspond to a Vd, e.g: 115 // vsetvli t1,t0,e8,m1,ta,ma lmul = 1 116 // vluxei16.v v2,(a0),v8 emul = 2 117 // In this case, we need to ensure the flownumis right shift by flowsPrevThisUop, However, the mask passed to mergebuff is right shift by flowsPrevThisVd e.g: 118 // vl = 9 119 // srcMask = 0x1FF 120 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x00FF, toMergeBuffMask = 0x01FF 121 // uopIdxInField = 1 and vdIdxInField = 0, flowMask = 0x0001, toMergeBuffMask = 0x01FF 122 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 123 // uopIdxInField = 0 and vdIdxInField = 0, flowMask = 0x0000, toMergeBuffMask = 0x0000 124 val isSpecialIndexed = isIndexed(instType) && s0_emul.asSInt > s0_lmul.asSInt 125 126 val srcMask = GenFlowMask(Mux(s0_vm, Fill(VLEN, 1.U(1.W)), io.in.bits.src_mask), vvstart, evl, true) 127 val srcMaskShiftBits = Mux(isSpecialIndexed, flowsPrevThisUop, flowsPrevThisVd) 128 129 val flowMask = ((srcMask & 130 UIntToMask(flowsIncludeThisUop.asUInt, VLEN + 1) & 131 (~UIntToMask(flowsPrevThisUop.asUInt, VLEN)).asUInt 132 ) >> srcMaskShiftBits)(VLENB - 1, 0) 133 val indexedSrcMask = (srcMask >> flowsPrevThisVd).asUInt //only for index instructions 134 135 // Used to calculate the element index. 136 // See 'splitbuffer' for 'io.out.splitIdxOffset' and 'mergebuffer' for 'merge data' 137 val indexedSplitOffset = Mux(isSpecialIndexed, flowsPrevThisUop - flowsPrevThisVd, 0.U) // only for index instructions of emul > lmul 138 val vlmax = GenVLMAX(s0_lmul, s0_sew) 139 140 // connect 141 s0_out := DontCare 142 s0_out match {case x => 143 x.uop := io.in.bits.uop 144 x.uop.vpu.vl := evl 145 x.uop.uopIdx := uopIdx 146 x.uop.numUops := numUops 147 x.uop.lastUop := (uopIdx +& 1.U) === numUops 148 x.uop.vpu.nf := s0_nf 149 x.flowMask := flowMask 150 x.indexedSrcMask := indexedSrcMask // Only vector indexed instructions uses it 151 x.indexedSplitOffset := indexedSplitOffset 152 x.byteMask := GenUopByteMask(flowMask, Cat("b0".U, alignedType))(VLENB - 1, 0) 153 x.fof := isUnitStride(s0_mop) && us_fof(s0_fuOpType) 154 x.baseAddr := io.in.bits.src_rs1 155 x.stride := io.in.bits.src_stride 156 x.flowNum := flowNum 157 x.nfields := s0_nfield 158 x.vm := s0_vm 159 x.usWholeReg := isUsWholeReg 160 x.usMaskReg := isMaskReg 161 x.eew := s0_eew 162 x.sew := s0_sew 163 x.emul := s0_emul 164 x.lmul := s0_lmul 165 x.vlmax := Mux(isUsWholeReg, evl, vlmax) 166 x.instType := instType 167 x.data := io.in.bits.src_vs3 168 x.vdIdxInField := vdIdxInField 169 x.preIsSplit := s0_preIsSplit 170 x.alignedType := broadenAligendType 171 x.indexVlMaxInVd := indexVlMaxInVd 172 } 173 s0_valid := io.in.valid && !s0_kill 174 /**------------------------------------- 175 * s1 stage 176 * ------------------------------------ 177 * generate UopOffset 178 */ 179 val s1_valid = RegInit(false.B) 180 val s1_kill = Wire(Bool()) 181 val s1_in = Wire(new VLSBundle(isVStore)) 182 val s1_can_go = io.out.ready && io.toMergeBuffer.req.ready 183 val s1_fire = s1_valid && !s1_kill && s1_can_go 184 185 s1_ready := s1_kill || !s1_valid || s1_can_go 186 187 when(s0_fire){ 188 s1_valid := true.B 189 }.elsewhen(s1_fire){ 190 s1_valid := false.B 191 }.elsewhen(s1_kill){ 192 s1_valid := false.B 193 } 194 s1_in := RegEnable(s0_out, s0_fire) 195 196 val s1_flowNum = s1_in.flowNum 197 val s1_uop = s1_in.uop 198 val s1_uopidx = s1_uop.vpu.vuopIdx 199 val s1_nf = s1_uop.vpu.nf 200 val s1_nfields = s1_in.nfields 201 val s1_eew = s1_in.eew 202 val s1_emul = s1_in.emul 203 val s1_lmul = s1_in.lmul 204 val s1_instType = s1_in.instType 205 val s1_stride = s1_in.stride 206 val s1_vmask = FillInterleaved(8, s1_in.byteMask)(VLEN-1, 0) 207 val s1_alignedType = s1_in.alignedType 208 val s1_isSpecialIndexed = isIndexed(s1_instType) && s1_emul.asSInt > s1_lmul.asSInt 209 val s1_mask = Mux(s1_isSpecialIndexed, s1_in.indexedSrcMask, s1_in.flowMask) 210 val s1_vdIdx = s1_in.vdIdxInField 211 val s1_fof = s1_in.fof 212 val s1_notIndexedStride = Mux( // stride for strided/unit-stride instruction 213 isStrided(s1_instType), 214 s1_stride(XLEN - 1, 0), // for strided load, stride = x[rs2] 215 s1_nfields << s1_eew // for unit-stride load, stride = eew * NFIELDS 216 ) 217 218 val stride = Mux(isIndexed(s1_instType), s1_stride, s1_notIndexedStride).asUInt // if is index instructions, get index when split 219 val uopOffset = genVUopOffset(s1_instType, s1_fof, s1_uopidx, s1_nf, s1_eew, stride, s1_alignedType) 220 val activeNum = Mux(s1_in.preIsSplit, PopCount(s1_in.flowMask), s1_flowNum) 221 // for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two 222 val usLowBitsAddr = getCheckAddrLowBits(s1_in.baseAddr, maxMemByteNum) + getCheckAddrLowBits(uopOffset, maxMemByteNum) 223 val usAligned128 = (getCheckAddrLowBits(usLowBitsAddr, maxMemByteNum) === 0.U)// addr 128-bit aligned 224 val usMask = Cat(0.U(VLENB.W), s1_in.byteMask) << getCheckAddrLowBits(usLowBitsAddr, maxMemByteNum) 225 226 s1_kill := s1_in.uop.robIdx.needFlush(io.redirect) 227 228 // query mergeBuffer 229 io.toMergeBuffer.req.valid := io.out.ready && s1_valid// only can_go will get MergeBuffer entry 230 io.toMergeBuffer.req.bits.flowNum := activeNum 231 io.toMergeBuffer.req.bits.data := s1_in.data 232 io.toMergeBuffer.req.bits.uop := s1_in.uop 233 io.toMergeBuffer.req.bits.mask := s1_mask 234 io.toMergeBuffer.req.bits.vaddr := DontCare 235 io.toMergeBuffer.req.bits.vdIdx := s1_vdIdx //TODO vdIdxReg should no longer be useful, don't delete it for now 236 io.toMergeBuffer.req.bits.fof := s1_in.fof 237 io.toMergeBuffer.req.bits.vlmax := s1_in.vlmax 238// io.toMergeBuffer.req.bits.vdOffset := 239 240 //TODO vdIdxReg should no longer be useful, don't delete it for now 241// when (s1_in.uop.lastUop && s1_fire || s1_kill) { 242// vdIdxReg := 0.U 243// }.elsewhen(s1_fire) { 244// vdIdxReg := vdIdxReg + 1.U 245// XSError(vdIdxReg + 1.U === 0.U, s"Overflow! The number of vd should be less than 8\n") 246// } 247 // out connect 248 io.out.valid := s1_valid && io.toMergeBuffer.resp.valid && (activeNum =/= 0.U) // if activeNum == 0, this uop do nothing, can be killed. 249 io.out.bits := s1_in 250 io.out.bits.uopOffset := uopOffset 251 io.out.bits.stride := stride 252 io.out.bits.mBIndex := io.toMergeBuffer.resp.bits.mBIndex 253 io.out.bits.usLowBitsAddr := usLowBitsAddr 254 io.out.bits.usAligned128 := usAligned128 255 io.out.bits.usMask := usMask 256 257 XSPerfAccumulate("split_out", io.out.fire) 258 XSPerfAccumulate("pipe_block", io.out.valid && !io.out.ready) 259 XSPerfAccumulate("mbuffer_block", s1_valid && io.out.ready && !io.toMergeBuffer.resp.valid) 260} 261 262abstract class VSplitBuffer(isVStore: Boolean = false)(implicit p: Parameters) extends VLSUModule{ 263 val io = IO(new VSplitBufferIO(isVStore)) 264 lazy val fuCfg = if(isVStore) VstuCfg else VlduCfg 265 266 val uopq = Reg(new VLSBundle(isVStore)) 267 val allocated = RegInit(false.B) 268 val needCancel = WireInit(false.B) 269 val activeIssue = Wire(Bool()) 270 val inActiveIssue = Wire(Bool()) 271 val splitFinish = WireInit(false.B) 272 273 // for split 274 val splitIdx = RegInit(0.U(flowIdxBits.W)) 275 val strideOffsetReg = RegInit(0.U(VLEN.W)) 276 277 /** 278 * Redirect 279 */ 280 val cancelEnq = io.in.bits.uop.robIdx.needFlush(io.redirect) 281 val canEnqueue = io.in.valid 282 val needEnqueue = canEnqueue && !cancelEnq 283 284 // enqueue 285 val offset = PopCount(needEnqueue) 286 val canAccept = !allocated || allocated && splitFinish && (activeIssue || inActiveIssue) // if is valid entry, need split finish and send last uop 287 io.in.ready := canAccept 288 val doEnqueue = canAccept && needEnqueue 289 290 when(doEnqueue){ 291 uopq := io.in.bits 292 } 293 294 //split uops 295 val issueValid = allocated && !needCancel 296 val issueEntry = uopq 297 val issueMbIndex = issueEntry.mBIndex 298 val issueFlowNum = issueEntry.flowNum 299 val issueBaseAddr = issueEntry.baseAddr 300 val issueUop = issueEntry.uop 301 val issueUopIdx = issueUop.vpu.vuopIdx 302 val issueInstType = issueEntry.instType 303 val issueUopOffset = issueEntry.uopOffset 304 val issueEew = issueEntry.eew 305 val issueSew = issueEntry.sew 306 val issueLmul = issueEntry.lmul 307 val issueEmul = issueEntry.emul 308 val issueAlignedType = issueEntry.alignedType 309 val issuePreIsSplit = issueEntry.preIsSplit 310 val issueByteMask = issueEntry.byteMask 311 val issueUsMask = issueEntry.usMask 312 val issueVLMAXMask = issueEntry.vlmax - 1.U 313 val issueIsWholeReg = issueEntry.usWholeReg 314 val issueVLMAXLog2 = GenVLMAXLog2(issueEntry.lmul, issueSew) 315 val issueVlMaxInVd = issueEntry.indexVlMaxInVd 316 val issueUsLowBitsAddr = issueEntry.usLowBitsAddr 317 val issueUsAligned128 = issueEntry.usAligned128 318 val elemIdx = GenElemIdx( 319 instType = issueInstType, 320 emul = issueEmul, 321 lmul = issueLmul, 322 eew = issueEew, 323 sew = issueSew, 324 uopIdx = issueUopIdx, 325 flowIdx = splitIdx 326 ) // elemIdx inside an inst, for exception 327 328 val splitIdxOffset = issueEntry.indexedSplitOffset + splitIdx 329 330 val indexFlowInnerIdx = elemIdx & issueVlMaxInVd 331 val nfIdx = Mux(issueIsWholeReg, 0.U, elemIdx >> issueVLMAXLog2) 332 val fieldOffset = nfIdx << issueAlignedType // field offset inside a segment 333 334 val indexedStride = IndexAddr( // index for indexed instruction 335 index = issueEntry.stride, 336 flow_inner_idx = indexFlowInnerIdx, 337 eew = issueEew 338 ) 339 val issueStride = Mux(isIndexed(issueInstType), indexedStride, strideOffsetReg) 340 val vaddr = issueBaseAddr + issueUopOffset + issueStride 341 val mask = genVWmask128(vaddr ,issueAlignedType) // scala maske for flow 342 val flowMask = issueEntry.flowMask 343 val vecActive = (flowMask & UIntToOH(splitIdx)).orR 344 /* 345 * Unit-Stride split to one flow or two flow. 346 * for Unit-Stride, if uop's addr is aligned with 128-bits, split it to one flow, otherwise split two 347 */ 348 val usSplitMask = genUSSplitMask(issueUsMask, splitIdx) 349 val usMaskInSingleUop = (genUSSplitMask(issueUsMask, 1.U) === 0.U) // if second splited Mask is zero, means this uop is unnecessary to split 350 val usNoSplit = (issueUsAligned128 || usMaskInSingleUop) && 351 !issuePreIsSplit && 352 (splitIdx === 0.U)// unit-stride uop don't need to split into two flow 353 val usSplitVaddr = genUSSplitAddr(vaddr, splitIdx) 354 val regOffset = getCheckAddrLowBits(issueUsLowBitsAddr, maxMemByteNum) // offset in 256-bits vd 355 XSError((splitIdx > 1.U && usNoSplit) || (splitIdx > 1.U && !issuePreIsSplit) , "Unit-Stride addr split error!\n") 356 357 val addrAligned = LookupTree(issueEew, List( 358 "b00".U -> true.B, //b 359 "b01".U -> (issueBaseAddr(0) === 0.U), //h 360 "b10".U -> (issueBaseAddr(1, 0) === 0.U), //w 361 "b11".U -> (issueBaseAddr(2, 0) === 0.U) //d 362 )) 363 364 // data 365 io.out.bits match { case x => 366 x.uop := issueUop 367 x.uop.exceptionVec := ExceptionNO.selectByFu(issueUop.exceptionVec, fuCfg) 368 x.vaddr := Mux(!issuePreIsSplit, usSplitVaddr, vaddr) 369 x.alignedType := issueAlignedType 370 x.isvec := true.B 371 x.mask := Mux(!issuePreIsSplit, usSplitMask, mask) 372 x.reg_offset := regOffset //for merge unit-stride data 373 x.vecActive := Mux(!issuePreIsSplit, true.B, vecActive) // currently, unit-stride's flow always send to pipeline 374 x.is_first_ele := DontCare 375 x.usSecondInv := usNoSplit 376 x.elemIdx := elemIdx 377 x.elemIdxInsideVd := splitIdxOffset // if is Unit-Stride, elemIdx is the index of 2 splited mem request (for merge data) 378 x.uop_unit_stride_fof := DontCare 379 x.isFirstIssue := DontCare 380 x.mBIndex := issueMbIndex 381 } 382 383 // redirect 384 needCancel := uopq.uop.robIdx.needFlush(io.redirect) && allocated 385 386 /* Execute logic */ 387 /** Issue to scala pipeline**/ 388 val allowIssue = io.out.ready 389 val issueCount = Mux(usNoSplit, 2.U, (PopCount(inActiveIssue) + PopCount(activeIssue))) // for dont need split unit-stride, issue two flow 390 splitFinish := splitIdx >= (issueFlowNum - issueCount) 391 392 // handshake 393 activeIssue := issueValid && allowIssue && (vecActive || !issuePreIsSplit) // active issue, current use in no unit-stride 394 inActiveIssue := issueValid && !vecActive && issuePreIsSplit 395 when (!issueEntry.uop.robIdx.needFlush(io.redirect)) { 396 when (!splitFinish) { 397 when (activeIssue || inActiveIssue) { 398 // The uop has not been entirly splited yet 399 splitIdx := splitIdx + issueCount 400 strideOffsetReg := Mux(!issuePreIsSplit, strideOffsetReg, strideOffsetReg + issueEntry.stride) // when normal unit-stride, don't use strideOffsetReg 401 } 402 }.otherwise { 403 when (activeIssue || inActiveIssue) { 404 // The uop is done spliting 405 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 406 strideOffsetReg := 0.U 407 } 408 } 409 }.otherwise { 410 splitIdx := 0.U(flowIdxBits.W) // initialize flowIdx 411 strideOffsetReg := 0.U 412 } 413 // allocated 414 when(doEnqueue){ // if enqueue need to been cancelled, it will be false, so this have high priority 415 allocated := true.B 416 }.elsewhen(needCancel) { // redirect 417 allocated := false.B 418 }.elsewhen(splitFinish && (activeIssue || inActiveIssue)){ //dequeue 419 allocated := false.B 420 } 421 422 // out connect 423 io.out.valid := issueValid && (vecActive || !issuePreIsSplit) // TODO: inactive unit-stride uop do not send to pipeline 424 425 XSPerfAccumulate("out_valid", io.out.valid) 426 XSPerfAccumulate("out_fire", io.out.fire) 427 XSPerfAccumulate("out_fire_unitstride", io.out.fire && !issuePreIsSplit) 428 XSPerfAccumulate("unitstride_vlenAlign", io.out.fire && !issuePreIsSplit && getCheckAddrLowBits(io.out.bits.vaddr, maxMemByteNum) === 0.U) 429 XSPerfAccumulate("unitstride_invalid", io.out.ready && issueValid && !issuePreIsSplit && PopCount(io.out.bits.mask).orR) 430} 431 432class VSSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = true){ 433 // split data 434 val splitData = genVSData( 435 data = issueEntry.data.asUInt, 436 elemIdx = splitIdxOffset, 437 alignedType = issueAlignedType 438 ) 439 val flowData = genVWdata(splitData, issueAlignedType) 440 val usSplitData = genUSSplitData(issueEntry.data.asUInt, splitIdx, vaddr(3,0)) 441 442 val sqIdx = issueUop.sqIdx + splitIdx 443 io.out.bits.uop.sqIdx := sqIdx 444 io.out.bits.uop.exceptionVec(storeAddrMisaligned) := !addrAligned && !issuePreIsSplit && io.out.bits.mask.orR 445 446 // send data to sq 447 val vstd = io.vstd.get 448 vstd.valid := issueValid && (vecActive || !issuePreIsSplit) 449 vstd.bits.uop := issueUop 450 vstd.bits.uop.sqIdx := sqIdx 451 vstd.bits.data := Mux(!issuePreIsSplit, usSplitData, flowData) 452 vstd.bits.debug := DontCare 453 vstd.bits.vdIdx.get := DontCare 454 vstd.bits.vdIdxInField.get := DontCare 455 vstd.bits.mask.get := Mux(!issuePreIsSplit, usSplitMask, mask) 456 457} 458 459class VLSplitBufferImp(implicit p: Parameters) extends VSplitBuffer(isVStore = false){ 460 io.out.bits.uop.lqIdx := issueUop.lqIdx + splitIdx 461 io.out.bits.uop.exceptionVec(loadAddrMisaligned) := !addrAligned && !issuePreIsSplit && io.out.bits.mask.orR 462} 463 464class VSSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = true){ 465 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VstuType.vsr 466 override def us_mask(fuOpType: UInt): Bool = fuOpType === VstuType.vsm 467 override def us_fof(fuOpType: UInt): Bool = false.B // dont have vector fof store 468} 469 470class VLSplitPipelineImp(implicit p: Parameters) extends VSplitPipeline(isVStore = false){ 471 472 override def us_whole_reg(fuOpType: UInt): Bool = fuOpType === VlduType.vlr 473 override def us_mask(fuOpType: UInt): Bool = fuOpType === VlduType.vlm 474 override def us_fof(fuOpType: UInt): Bool = fuOpType === VlduType.vleff 475} 476 477class VLSplitImp(implicit p: Parameters) extends VLSUModule{ 478 val io = IO(new VSplitIO(isVStore=false)) 479 val splitPipeline = Module(new VLSplitPipelineImp()) 480 val splitBuffer = Module(new VLSplitBufferImp()) 481 // Split Pipeline 482 splitPipeline.io.in <> io.in 483 splitPipeline.io.redirect <> io.redirect 484 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 485 486 // skid buffer 487 skidBuffer(splitPipeline.io.out, splitBuffer.io.in, splitBuffer.io.in.bits.uop.robIdx.needFlush(io.redirect), "VLSplitSkidBuffer") 488 489 // Split Buffer 490 splitBuffer.io.redirect <> io.redirect 491 io.out <> splitBuffer.io.out 492} 493 494class VSSplitImp(implicit p: Parameters) extends VLSUModule{ 495 val io = IO(new VSplitIO(isVStore=true)) 496 val splitPipeline = Module(new VSSplitPipelineImp()) 497 val splitBuffer = Module(new VSSplitBufferImp()) 498 // Split Pipeline 499 splitPipeline.io.in <> io.in 500 splitPipeline.io.redirect <> io.redirect 501 io.toMergeBuffer <> splitPipeline.io.toMergeBuffer 502 503 // skid buffer 504 skidBuffer(splitPipeline.io.out, splitBuffer.io.in, splitBuffer.io.in.bits.uop.robIdx.needFlush(io.redirect),"VSSplitSkidBuffer") 505 506 // Split Buffer 507 splitBuffer.io.redirect <> io.redirect 508 io.out <> splitBuffer.io.out 509 io.vstd.get <> splitBuffer.io.vstd.get 510} 511 512