1/*************************************************************************************** 2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3 * Copyright (c) 2020-2021 Peng Cheng Laboratory 4 * 5 * XiangShan is licensed under Mulan PSL v2. 6 * You can use this software according to the terms and conditions of the Mulan PSL v2. 7 * You may obtain a copy of Mulan PSL v2 at: 8 * http://license.coscl.org.cn/MulanPSL2 9 * 10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13 * 14 * See the Mulan PSL v2 for more details. 15 ***************************************************************************************/ 16 17package xiangshan.mem 18 19import org.chipsalliance.cde.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.backend.rob.RobPtr 26import xiangshan.backend.Bundles._ 27import xiangshan.backend.fu.FuType 28import xiangshan.backend.fu.vector.Bundles.VEew 29 30/** 31 * Common used parameters or functions in vlsu 32 */ 33trait VLSUConstants { 34 val VLEN = 128 35 //for pack unit-stride flow 36 val AlignedNum = 4 // 1/2/4/8 37 def VLENB = VLEN/8 38 def vOffsetBits = log2Up(VLENB) // bits-width to index offset inside a vector reg 39 lazy val vlmBindexBits = 8 //will be overrided later 40 lazy val vsmBindexBits = 8 // will be overrided later 41 42 def alignTypes = 5 // eew/sew = 1/2/4/8, last indicate 128 bit element 43 def alignTypeBits = log2Up(alignTypes) 44 def maxMUL = 8 45 def maxFields = 8 46 /** 47 * In the most extreme cases like a segment indexed instruction, eew=64, emul=8, sew=8, lmul=1, 48 * and nf=8, each data reg is mapped with 8 index regs and there are 8 data regs in total, 49 * each for a field. Therefore an instruction can be divided into 64 uops at most. 50 */ 51 def maxUopNum = maxMUL * maxFields // 64 52 def maxFlowNum = 16 53 def maxElemNum = maxMUL * maxFlowNum // 128 54 // def uopIdxBits = log2Up(maxUopNum) // to index uop inside an robIdx 55 def elemIdxBits = log2Up(maxElemNum) + 1 // to index which element in an instruction 56 def flowIdxBits = log2Up(maxFlowNum) + 1 // to index which flow in a uop 57 def fieldBits = log2Up(maxFields) + 1 // 4-bits to indicate 1~8 58 59 def ewBits = 3 // bits-width of EEW/SEW 60 def mulBits = 3 // bits-width of emul/lmul 61 62 def getSlice(data: UInt, i: Int, alignBits: Int): UInt = { 63 require(data.getWidth >= (i+1) * alignBits) 64 data((i+1) * alignBits - 1, i * alignBits) 65 } 66 def getNoAlignedSlice(data: UInt, i: Int, alignBits: Int): UInt = { 67 data(i * 8 + alignBits - 1, i * 8) 68 } 69 70 def getByte(data: UInt, i: Int = 0) = getSlice(data, i, 8) 71 def getHalfWord(data: UInt, i: Int = 0) = getSlice(data, i, 16) 72 def getWord(data: UInt, i: Int = 0) = getSlice(data, i, 32) 73 def getDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 64) 74 def getDoubleDoubleWord(data: UInt, i: Int = 0) = getSlice(data, i, 128) 75} 76 77trait HasVLSUParameters extends HasXSParameter with VLSUConstants { 78 override val VLEN = coreParams.VLEN 79 override lazy val vlmBindexBits = log2Up(coreParams.VlMergeBufferSize) 80 override lazy val vsmBindexBits = log2Up(coreParams.VsMergeBufferSize) 81 lazy val maxMemByteNum = 16 // Maximum bytes for a single memory access 82 /** 83 * get addr aligned low bits 84 * @param addr Address to be check 85 * @param width Width for checking alignment 86 */ 87 def getCheckAddrLowBits(addr: UInt, width: Int): UInt = addr(log2Up(width) - 1, 0) 88 def getOverflowBit(in: UInt, width: Int): UInt = in(log2Up(width)) 89 def isUnitStride(instType: UInt) = instType(1, 0) === "b00".U 90 def isStrided(instType: UInt) = instType(1, 0) === "b10".U 91 def isIndexed(instType: UInt) = instType(0) === "b1".U 92 def isNotIndexed(instType: UInt) = instType(0) === "b0".U 93 def isSegment(instType: UInt) = instType(2) === "b1".U 94 def is128Bit(alignedType: UInt) = alignedType(2) === "b1".U 95 96 def mergeDataWithMask(oldData: UInt, newData: UInt, mask: UInt): Vec[UInt] = { 97 require(oldData.getWidth == newData.getWidth) 98 require(oldData.getWidth == mask.getWidth * 8) 99 VecInit(mask.asBools.zipWithIndex.map { case (en, i) => 100 Mux(en, getByte(newData, i), getByte(oldData, i)) 101 }) 102 } 103 104 // def asBytes(data: UInt) = { 105 // require(data.getWidth % 8 == 0) 106 // (0 until data.getWidth/8).map(i => getByte(data, i)) 107 // } 108 109 def mergeDataWithElemIdx( 110 oldData: UInt, 111 newData: Seq[UInt], 112 alignedType: UInt, 113 elemIdx: Seq[UInt], 114 valids: Seq[Bool] 115 ): UInt = { 116 require(newData.length == elemIdx.length) 117 require(newData.length == valids.length) 118 LookupTree(alignedType, List( 119 "b00".U -> VecInit(elemIdx.map(e => UIntToOH(e(3, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 120 ParallelPosteriorityMux( 121 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 122 getByte(oldData, i) +: newData.map(getByte(_)) 123 )}).asUInt, 124 "b01".U -> VecInit(elemIdx.map(e => UIntToOH(e(2, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 125 ParallelPosteriorityMux( 126 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 127 getHalfWord(oldData, i) +: newData.map(getHalfWord(_)) 128 )}).asUInt, 129 "b10".U -> VecInit(elemIdx.map(e => UIntToOH(e(1, 0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 130 ParallelPosteriorityMux( 131 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 132 getWord(oldData, i) +: newData.map(getWord(_)) 133 )}).asUInt, 134 "b11".U -> VecInit(elemIdx.map(e => UIntToOH(e(0)).asBools).transpose.zipWithIndex.map { case (selVec, i) => 135 ParallelPosteriorityMux( 136 true.B +: selVec.zip(valids).map(x => x._1 && x._2), 137 getDoubleWord(oldData, i) +: newData.map(getDoubleWord(_)) 138 )}).asUInt 139 )) 140 } 141 142 def mergeDataWithElemIdx(oldData: UInt, newData: UInt, alignedType: UInt, elemIdx: UInt): UInt = { 143 mergeDataWithElemIdx(oldData, Seq(newData), alignedType, Seq(elemIdx), Seq(true.B)) 144 } 145 /** 146 * for merge 128-bits data of unit-stride 147 */ 148 object mergeDataByByte{ 149 def apply(oldData: UInt, newData: UInt, mask: UInt): UInt = { 150 val selVec = Seq(mask).map(_.asBools).transpose 151 VecInit(selVec.zipWithIndex.map{ case (selV, i) => 152 ParallelPosteriorityMux( 153 true.B +: selV.map(x => x), 154 getByte(oldData, i) +: Seq(getByte(newData, i)) 155 )}).asUInt 156 } 157 } 158 159 /** 160 * for merge Unit-Stride data to 256-bits 161 * merge 128-bits data to 256-bits 162 * if have 3 port, 163 * if is port0, it is 6 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port2data) or (port2data, data) or (data, port3data) or (port3data, data) 164 * if is port1, it is 4 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) or (data, port3data) or (port3data, data) 165 * if is port3, it is 2 to 1 Multiplexer -> (128'b0, data) or (data, 128'b0) 166 * 167 */ 168 object mergeDataByIndex{ 169 def apply(data: Seq[UInt], mask: Seq[UInt], index: UInt, valids: Seq[Bool]): (UInt, UInt) = { 170 require(data.length == valids.length) 171 require(data.length == mask.length) 172 val muxLength = data.length 173 val selDataMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLEN * 2).W)))) // 3 * 2 * 256 174 val selMaskMatrix = Wire(Vec(muxLength, Vec(2, UInt((VLENB * 2).W)))) // 3 * 2 * 16 175 176 if (backendParams.debugEn){ 177 dontTouch(selDataMatrix) 178 dontTouch(selMaskMatrix) 179 } 180 181 for(i <- 0 until muxLength){ 182 if(i == 0){ 183 selDataMatrix(i)(0) := Cat(0.U(VLEN.W), data(i)) 184 selDataMatrix(i)(1) := Cat(data(i), 0.U(VLEN.W)) 185 selMaskMatrix(i)(0) := Cat(0.U(VLENB.W), mask(i)) 186 selMaskMatrix(i)(1) := Cat(mask(i), 0.U(VLENB.W)) 187 } 188 else{ 189 selDataMatrix(i)(0) := Cat(data(i), data(0)) 190 selDataMatrix(i)(1) := Cat(data(0), data(i)) 191 selMaskMatrix(i)(0) := Cat(mask(i), mask(0)) 192 selMaskMatrix(i)(1) := Cat(mask(0), mask(i)) 193 } 194 } 195 val selIdxVec = (0 until muxLength).map(_.U) 196 val selIdx = PriorityMux(valids.reverse, selIdxVec.reverse) 197 198 val selData = Mux(index === 0.U, 199 selDataMatrix(selIdx)(0), 200 selDataMatrix(selIdx)(1)) 201 val selMask = Mux(index === 0.U, 202 selMaskMatrix(selIdx)(0), 203 selMaskMatrix(selIdx)(1)) 204 (selData, selMask) 205 } 206 } 207 def mergeDataByIndex(data: UInt, mask: UInt, index: UInt): (UInt, UInt) = { 208 mergeDataByIndex(Seq(data), Seq(mask), index, Seq(true.B)) 209 } 210} 211abstract class VLSUModule(implicit p: Parameters) extends XSModule 212 with HasVLSUParameters 213 with HasCircularQueuePtrHelper 214abstract class VLSUBundle(implicit p: Parameters) extends XSBundle 215 with HasVLSUParameters 216 217class VLSUBundleWithMicroOp(implicit p: Parameters) extends VLSUBundle { 218 val uop = new DynInst 219} 220 221class OnlyVecExuOutput(implicit p: Parameters) extends VLSUBundle { 222 val isvec = Bool() 223 val vecdata = UInt(VLEN.W) 224 val mask = UInt(VLENB.W) 225 // val rob_idx_valid = Vec(2, Bool()) 226 // val inner_idx = Vec(2, UInt(3.W)) 227 // val rob_idx = Vec(2, new RobPtr) 228 // val offset = Vec(2, UInt(4.W)) 229 val reg_offset = UInt(vOffsetBits.W) 230 val vecActive = Bool() // 1: vector active element, 0: vector not active element 231 val is_first_ele = Bool() 232 val elemIdx = UInt(elemIdxBits.W) // element index 233 val elemIdxInsideVd = UInt(elemIdxBits.W) // element index in scope of vd 234 val trigger = TriggerAction() 235 val vstart = UInt(elemIdxBits.W) 236 val vecTriggerMask = UInt((VLEN/8).W) 237 // val uopQueuePtr = new VluopPtr 238 // val flowPtr = new VlflowPtr 239} 240 241class VecExuOutput(implicit p: Parameters) extends MemExuOutput with HasVLSUParameters { 242 val vec = new OnlyVecExuOutput 243 val alignedType = UInt(alignTypeBits.W) 244 // feedback 245 val vecFeedback = Bool() 246} 247 248class VecUopBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 249 val flowMask = UInt(VLENB.W) // each bit for a flow 250 val byteMask = UInt(VLENB.W) // each bit for a byte 251 val data = UInt(VLEN.W) 252 // val fof = Bool() // fof is only used for vector loads 253 val excp_eew_index = UInt(elemIdxBits.W) 254 // val exceptionVec = ExceptionVec() // uop has exceptionVec 255 val baseAddr = UInt(VAddrBits.W) 256 val stride = UInt(VLEN.W) 257 val flow_counter = UInt(flowIdxBits.W) 258 259 // instruction decode result 260 val flowNum = UInt(flowIdxBits.W) // # of flows in a uop 261 // val flowNumLog2 = UInt(log2Up(flowIdxBits).W) // log2(flowNum), for better timing of multiplication 262 val nfields = UInt(fieldBits.W) // NFIELDS 263 val vm = Bool() // whether vector masking is enabled 264 val usWholeReg = Bool() // unit-stride, whole register load 265 val usMaskReg = Bool() // unit-stride, masked store/load 266 val eew = VEew() // size of memory elements 267 val sew = UInt(ewBits.W) 268 val emul = UInt(mulBits.W) 269 val lmul = UInt(mulBits.W) 270 val vlmax = UInt(elemIdxBits.W) 271 val instType = UInt(3.W) 272 val vd_last_uop = Bool() 273 val vd_first_uop = Bool() 274} 275 276class VecFlowBundle(implicit p: Parameters) extends VLSUBundleWithMicroOp { 277 val vaddr = UInt(VAddrBits.W) 278 val mask = UInt(VLENB.W) 279 val alignedType = UInt(alignTypeBits.W) 280 val vecActive = Bool() 281 val elemIdx = UInt(elemIdxBits.W) 282 val is_first_ele = Bool() 283 284 // pack 285 val isPackage = Bool() 286 val packageNum = UInt((log2Up(VLENB) + 1).W) 287 val originAlignedType = UInt(alignTypeBits.W) 288} 289 290class VecMemExuOutput(isVector: Boolean = false)(implicit p: Parameters) extends VLSUBundle{ 291 val output = new MemExuOutput(isVector) 292 val vecFeedback = Bool() 293 val nc = Bool() 294 val mmio = Bool() 295 val usSecondInv = Bool() 296 val elemIdx = UInt(elemIdxBits.W) 297 val alignedType = UInt(alignTypeBits.W) 298 val mbIndex = UInt(vsmBindexBits.W) 299 val mask = UInt(VLENB.W) 300 val vaddr = UInt(XLEN.W) 301 val vaNeedExt = Bool() 302 val gpaddr = UInt(GPAddrBits.W) 303 val isForVSnonLeafPTE = Bool() 304 val vecTriggerMask = UInt((VLEN/8).W) 305} 306 307object MulNum { 308 def apply (mul: UInt): UInt = { //mul means emul or lmul 309 (LookupTree(mul,List( 310 "b101".U -> 1.U , // 1/8 311 "b110".U -> 1.U , // 1/4 312 "b111".U -> 1.U , // 1/2 313 "b000".U -> 1.U , // 1 314 "b001".U -> 2.U , // 2 315 "b010".U -> 4.U , // 4 316 "b011".U -> 8.U // 8 317 )))} 318} 319/** 320 * when emul is greater than or equal to 1, this means the entire register needs to be written; 321 * otherwise, only write the specified number of bytes */ 322object MulDataSize { 323 def apply (mul: UInt): UInt = { //mul means emul or lmul 324 (LookupTree(mul,List( 325 "b101".U -> 2.U , // 1/8 326 "b110".U -> 4.U , // 1/4 327 "b111".U -> 8.U , // 1/2 328 "b000".U -> 16.U , // 1 329 "b001".U -> 16.U , // 2 330 "b010".U -> 16.U , // 4 331 "b011".U -> 16.U // 8 332 )))} 333} 334 335object OneRegNum { 336 def apply (eew: UInt): UInt = { //mul means emul or lmul 337 require(eew.getWidth == 2, "The eew width must be 2.") 338 (LookupTree(eew, List( 339 "b00".U -> 16.U , // 1 340 "b01".U -> 8.U , // 2 341 "b10".U -> 4.U , // 4 342 "b11".U -> 2.U // 8 343 )))} 344} 345 346//index inst read data byte 347object SewDataSize { 348 def apply (sew: UInt): UInt = { 349 (LookupTree(sew,List( 350 "b000".U -> 1.U , // 1 351 "b001".U -> 2.U , // 2 352 "b010".U -> 4.U , // 4 353 "b011".U -> 8.U // 8 354 )))} 355} 356 357// strided inst read data byte 358object EewDataSize { 359 def apply (eew: UInt): UInt = { 360 require(eew.getWidth == 2, "The eew width must be 2.") 361 (LookupTree(eew, List( 362 "b00".U -> 1.U , // 1 363 "b01".U -> 2.U , // 2 364 "b10".U -> 4.U , // 4 365 "b11".U -> 8.U // 8 366 )))} 367} 368 369object loadDataSize { 370 def apply (instType: UInt, emul: UInt, eew: UInt, sew: UInt): UInt = { 371 (LookupTree(instType,List( 372 "b000".U -> MulDataSize(emul), // unit-stride 373 "b010".U -> EewDataSize(eew) , // strided 374 "b001".U -> SewDataSize(sew) , // indexed-unordered 375 "b011".U -> SewDataSize(sew) , // indexed-ordered 376 "b100".U -> EewDataSize(eew) , // segment unit-stride 377 "b110".U -> EewDataSize(eew) , // segment strided 378 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 379 "b111".U -> SewDataSize(sew) // segment indexed-ordered 380 )))} 381} 382 383object storeDataSize { 384 def apply (instType: UInt, eew: UInt, sew: UInt): UInt = { 385 (LookupTree(instType,List( 386 "b000".U -> EewDataSize(eew) , // unit-stride, do not use 387 "b010".U -> EewDataSize(eew) , // strided 388 "b001".U -> SewDataSize(sew) , // indexed-unordered 389 "b011".U -> SewDataSize(sew) , // indexed-ordered 390 "b100".U -> EewDataSize(eew) , // segment unit-stride 391 "b110".U -> EewDataSize(eew) , // segment strided 392 "b101".U -> SewDataSize(sew) , // segment indexed-unordered 393 "b111".U -> SewDataSize(sew) // segment indexed-ordered 394 )))} 395} 396 397/** 398 * these are used to obtain immediate addresses for index instruction */ 399object EewEq8 { 400 def apply(index:UInt, flow_inner_idx: UInt): UInt = { 401 (LookupTree(flow_inner_idx,List( 402 0.U -> index(7 ,0 ), 403 1.U -> index(15,8 ), 404 2.U -> index(23,16 ), 405 3.U -> index(31,24 ), 406 4.U -> index(39,32 ), 407 5.U -> index(47,40 ), 408 6.U -> index(55,48 ), 409 7.U -> index(63,56 ), 410 8.U -> index(71,64 ), 411 9.U -> index(79,72 ), 412 10.U -> index(87,80 ), 413 11.U -> index(95,88 ), 414 12.U -> index(103,96 ), 415 13.U -> index(111,104), 416 14.U -> index(119,112), 417 15.U -> index(127,120) 418 )))} 419} 420 421object EewEq16 { 422 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 423 (LookupTree(flow_inner_idx, List( 424 0.U -> index(15, 0), 425 1.U -> index(31, 16), 426 2.U -> index(47, 32), 427 3.U -> index(63, 48), 428 4.U -> index(79, 64), 429 5.U -> index(95, 80), 430 6.U -> index(111, 96), 431 7.U -> index(127, 112) 432 )))} 433} 434 435object EewEq32 { 436 def apply(index: UInt, flow_inner_idx: UInt): UInt = { 437 (LookupTree(flow_inner_idx, List( 438 0.U -> index(31, 0), 439 1.U -> index(63, 32), 440 2.U -> index(95, 64), 441 3.U -> index(127, 96) 442 )))} 443} 444 445object EewEq64 { 446 def apply (index: UInt, flow_inner_idx: UInt): UInt = { 447 (LookupTree(flow_inner_idx, List( 448 0.U -> index(63, 0), 449 1.U -> index(127, 64) 450 )))} 451} 452 453object IndexAddr { 454 def apply (index: UInt, flow_inner_idx: UInt, eew: UInt): UInt = { 455 require(eew.getWidth == 2, "The eew width must be 2.") 456 (LookupTree(eew, List( 457 "b00".U -> EewEq8 (index = index, flow_inner_idx = flow_inner_idx ), // Imm is 1 Byte // TODO: index maybe cross register 458 "b01".U -> EewEq16(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 2 Byte 459 "b10".U -> EewEq32(index = index, flow_inner_idx = flow_inner_idx ), // Imm is 4 Byte 460 "b11".U -> EewEq64(index = index, flow_inner_idx = flow_inner_idx ) // Imm is 8 Byte 461 )))} 462} 463 464object Log2Num { 465 def apply (num: UInt): UInt = { 466 (LookupTree(num,List( 467 16.U -> 4.U, 468 8.U -> 3.U, 469 4.U -> 2.U, 470 2.U -> 1.U, 471 1.U -> 0.U 472 )))} 473} 474 475object GenUopIdxInField { 476 /** 477 * Used in normal vector instruction 478 * */ 479 def apply (instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 480 val isIndexed = instType(0) 481 val mulInField = Mux( 482 isIndexed, 483 Mux(lmul.asSInt > emul.asSInt, lmul, emul), 484 emul 485 ) 486 LookupTree(mulInField, List( 487 "b101".U -> 0.U, 488 "b110".U -> 0.U, 489 "b111".U -> 0.U, 490 "b000".U -> 0.U, 491 "b001".U -> uopIdx(0), 492 "b010".U -> uopIdx(1, 0), 493 "b011".U -> uopIdx(2, 0) 494 )) 495 } 496 /** 497 * Only used in segment instruction. 498 * */ 499 def apply (select: UInt, uopIdx: UInt): UInt = { 500 LookupTree(select, List( 501 "b101".U -> 0.U, 502 "b110".U -> 0.U, 503 "b111".U -> 0.U, 504 "b000".U -> 0.U, 505 "b001".U -> uopIdx(0), 506 "b010".U -> uopIdx(1, 0), 507 "b011".U -> uopIdx(2, 0) 508 )) 509 } 510} 511 512//eew decode 513object EewLog2 extends VLSUConstants { 514 // def apply (eew: UInt): UInt = { 515 // (LookupTree(eew,List( 516 // "b000".U -> "b000".U , // 1 517 // "b101".U -> "b001".U , // 2 518 // "b110".U -> "b010".U , // 4 519 // "b111".U -> "b011".U // 8 520 // )))} 521 def apply(eew: UInt): UInt = { 522 require(eew.getWidth == 2, "The eew width must be 2.") 523 ZeroExt(eew, ewBits) 524 } 525} 526 527object GenRealFlowNum { 528 /** 529 * unit-stride instructions don't use this method; 530 * other instructions generate realFlowNum by EmulDataSize >> eew, 531 * EmulDataSize means the number of bytes that need to be written to the register, 532 * eew means the number of bytes written at once. 533 * 534 * @param instType As the name implies. 535 * @param emul As the name implies. 536 * @param lmul As the name implies. 537 * @param eew As the name implies. 538 * @param sew As the name implies. 539 * @param isSegment Only modules related to segment need to be set to true. 540 * @return FlowNum of instruction. 541 * 542 */ 543 def apply (instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = { 544 require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)") 545 require(eew.getWidth == 2, "The eew width must be 2.") 546 // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency. 547 val segmentIndexFlowNum = if (isSegment) (MulDataSize(lmul) >> sew(1,0)).asUInt 548 else Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt) 549 (LookupTree(instType,List( 550 "b000".U -> (MulDataSize(emul) >> eew).asUInt, // store use, load do not use 551 "b010".U -> (MulDataSize(emul) >> eew).asUInt, // strided 552 "b001".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-unordered 553 "b011".U -> Mux(emul.asSInt > lmul.asSInt, (MulDataSize(emul) >> eew).asUInt, (MulDataSize(lmul) >> sew(1,0)).asUInt), // indexed-ordered 554 "b100".U -> (MulDataSize(emul) >> eew).asUInt, // segment unit-stride 555 "b110".U -> (MulDataSize(emul) >> eew).asUInt, // segment strided 556 "b101".U -> segmentIndexFlowNum, // segment indexed-unordered 557 "b111".U -> segmentIndexFlowNum // segment indexed-ordered 558 )))} 559} 560 561object GenRealFlowLog2 extends VLSUConstants { 562 /** 563 * GenRealFlowLog2 = Log2(GenRealFlowNum) 564 * 565 * @param instType As the name implies. 566 * @param emul As the name implies. 567 * @param lmul As the name implies. 568 * @param eew As the name implies. 569 * @param sew As the name implies. 570 * @param isSegment Only modules related to segment need to be set to true. 571 * @return FlowNumLog2 of instruction. 572 */ 573 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, isSegment: Boolean = false): UInt = { 574 require(instType.getWidth == 3, "The instType width must be 3, (isSegment, mop)") 575 require(eew.getWidth == 2, "The eew width must be 2.") 576 val emulLog2 = Mux(emul.asSInt >= 0.S, 0.U, emul) 577 val lmulLog2 = Mux(lmul.asSInt >= 0.S, 0.U, lmul) 578 val eewRealFlowLog2 = emulLog2 + log2Up(VLENB).U - eew 579 val sewRealFlowLog2 = lmulLog2 + log2Up(VLENB).U - sew(1, 0) 580 // Because the new segmentunit is needed. But the previous implementation is retained for the time being in case of emergency. 581 val segmentIndexFlowLog2 = if (isSegment) sewRealFlowLog2 else Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2) 582 (LookupTree(instType, List( 583 "b000".U -> eewRealFlowLog2, // unit-stride 584 "b010".U -> eewRealFlowLog2, // strided 585 "b001".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-unordered 586 "b011".U -> Mux(emul.asSInt > lmul.asSInt, eewRealFlowLog2, sewRealFlowLog2), // indexed-ordered 587 "b100".U -> eewRealFlowLog2, // segment unit-stride 588 "b110".U -> eewRealFlowLog2, // segment strided 589 "b101".U -> segmentIndexFlowLog2, // segment indexed-unordered 590 "b111".U -> segmentIndexFlowLog2, // segment indexed-ordered 591 ))) 592 } 593} 594 595/** 596 * GenElemIdx generals an element index within an instruction, given a certain uopIdx and a known flowIdx 597 * inside the uop. 598 */ 599object GenElemIdx extends VLSUConstants { 600 def apply(instType: UInt, emul: UInt, lmul: UInt, eew: UInt, sew: UInt, 601 uopIdx: UInt, flowIdx: UInt): UInt = { 602 require(eew.getWidth == 2, "The eew width must be 2.") 603 val isIndexed = instType(0).asBool 604 val eewUopFlowsLog2 = Mux(emul.asSInt > 0.S, 0.U, emul) + log2Up(VLENB).U - eew 605 val sewUopFlowsLog2 = Mux(lmul.asSInt > 0.S, 0.U, lmul) + log2Up(VLENB).U - sew(1, 0) 606 val uopFlowsLog2 = Mux( 607 isIndexed, 608 Mux(emul.asSInt > lmul.asSInt, eewUopFlowsLog2, sewUopFlowsLog2), 609 eewUopFlowsLog2 610 ) 611 LookupTree(uopFlowsLog2, List( 612 0.U -> uopIdx, 613 1.U -> uopIdx ## flowIdx(0), 614 2.U -> uopIdx ## flowIdx(1, 0), 615 3.U -> uopIdx ## flowIdx(2, 0), 616 4.U -> uopIdx ## flowIdx(3, 0) 617 )) 618 } 619} 620 621/** 622 * GenVLMAX calculates VLMAX, which equals MUL * ew 623 */ 624object GenVLMAXLog2 extends VLSUConstants { 625 def apply(lmul: UInt, sew: UInt): UInt = lmul + log2Up(VLENB).U - sew 626} 627object GenVLMAX { 628 def apply(lmul: UInt, sew: UInt): UInt = 1.U << GenVLMAXLog2(lmul, sew) 629} 630/** 631 * generate mask base on vlmax 632 * example: vlmax = b100, max = b011 633 * */ 634object GenVlMaxMask{ 635 def apply(vlmax: UInt, length: Int): UInt = (vlmax - 1.U)(length-1, 0) 636} 637 638object GenUSWholeRegVL extends VLSUConstants { 639 def apply(nfields: UInt, eew: UInt): UInt = { 640 require(eew.getWidth == 2, "The eew width must be 2.") 641 LookupTree(eew, List( 642 "b00".U -> (nfields << (log2Up(VLENB) - 0)), 643 "b01".U -> (nfields << (log2Up(VLENB) - 1)), 644 "b10".U -> (nfields << (log2Up(VLENB) - 2)), 645 "b11".U -> (nfields << (log2Up(VLENB) - 3)) 646 )) 647 } 648} 649object GenUSWholeEmul extends VLSUConstants{ 650 def apply(nf: UInt): UInt={ 651 LookupTree(nf,List( 652 "b000".U -> "b000".U(mulBits.W), 653 "b001".U -> "b001".U(mulBits.W), 654 "b011".U -> "b010".U(mulBits.W), 655 "b111".U -> "b011".U(mulBits.W) 656 )) 657 } 658} 659 660 661object GenUSMaskRegVL extends VLSUConstants { 662 def apply(vl: UInt): UInt = { 663 Mux(vl(2,0) === 0.U , (vl >> 3.U), ((vl >> 3.U) + 1.U)) 664 } 665} 666 667object GenUopByteMask { 668 def apply(flowMask: UInt, alignedType: UInt): UInt = { 669 LookupTree(alignedType, List( 670 "b000".U -> flowMask, 671 "b001".U -> FillInterleaved(2, flowMask), 672 "b010".U -> FillInterleaved(4, flowMask), 673 "b011".U -> FillInterleaved(8, flowMask), 674 "b100".U -> FillInterleaved(16, flowMask) 675 )) 676 } 677} 678 679object GenVdIdxInField extends VLSUConstants { 680 def apply(instType: UInt, emul: UInt, lmul: UInt, uopIdx: UInt): UInt = { 681 val vdIdx = Wire(UInt(log2Up(maxMUL).W)) 682 when (instType(1,0) === "b00".U || instType(1,0) === "b10".U || lmul.asSInt > emul.asSInt) { 683 // Unit-stride or Strided, or indexed with lmul >= emul 684 vdIdx := uopIdx 685 }.otherwise { 686 // Indexed with lmul <= emul 687 val multiple = emul - lmul 688 val uopIdxWidth = uopIdx.getWidth 689 vdIdx := LookupTree(multiple, List( 690 0.U -> uopIdx, 691 1.U -> (uopIdx >> 1), 692 2.U -> (uopIdx >> 2), 693 3.U -> (uopIdx >> 3) 694 )) 695 } 696 vdIdx 697 } 698} 699/** 700* Use start and vl to generate flow activative mask 701* mod = true fill 0 702* mod = false fill 1 703*/ 704object GenFlowMask extends VLSUConstants { 705 def apply(elementMask: UInt, start: UInt, vl: UInt , mod: Boolean): UInt = { 706 val startMask = ~UIntToMask(start, VLEN) 707 val vlMask = UIntToMask(vl, VLEN) 708 val maskVlStart = vlMask & startMask 709 if(mod){ 710 elementMask & maskVlStart 711 } 712 else{ 713 (~elementMask).asUInt & maskVlStart 714 } 715 } 716} 717 718object genVWmask128 { 719 def apply(addr: UInt, sizeEncode: UInt): UInt = { 720 (LookupTree(sizeEncode, List( 721 "b000".U -> 0x1.U, //0001 << addr(2:0) 722 "b001".U -> 0x3.U, //0011 723 "b010".U -> 0xf.U, //1111 724 "b011".U -> 0xff.U, //11111111 725 "b100".U -> 0xffff.U //1111111111111111 726 )) << addr(3, 0)).asUInt 727 } 728} 729/* 730* only use in max length is 128 731*/ 732object genVWdata { 733 def apply(data: UInt, sizeEncode: UInt): UInt = { 734 LookupTree(sizeEncode, List( 735 "b000".U -> Fill(16, data(7, 0)), 736 "b001".U -> Fill(8, data(15, 0)), 737 "b010".U -> Fill(4, data(31, 0)), 738 "b011".U -> Fill(2, data(63,0)), 739 "b100".U -> data(127,0) 740 )) 741 } 742} 743 744object genUSSplitAddr{ 745 def apply(addr: UInt, index: UInt, width: Int): UInt = { 746 val tmpAddr = Cat(addr(width - 1, 4), 0.U(4.W)) 747 val nextCacheline = tmpAddr + 16.U 748 LookupTree(index, List( 749 0.U -> tmpAddr, 750 1.U -> nextCacheline 751 )) 752 } 753} 754 755object genUSSplitMask{ 756 def apply(mask: UInt, index: UInt): UInt = { 757 require(mask.getWidth == 32) // need to be 32-bits 758 LookupTree(index, List( 759 0.U -> mask(15, 0), 760 1.U -> mask(31, 16), 761 )) 762 } 763} 764 765object genUSSplitData{ 766 def apply(data: UInt, index: UInt, addrOffset: UInt): UInt = { 767 val tmpData = WireInit(0.U(256.W)) 768 val lookupTable = (0 until 16).map{case i => 769 if(i == 0){ 770 i.U -> Cat(0.U(128.W), data) 771 }else{ 772 i.U -> Cat(0.U(((16-i)*8).W), data, 0.U((i*8).W)) 773 } 774 } 775 tmpData := LookupTree(addrOffset, lookupTable).asUInt 776 777 LookupTree(index, List( 778 0.U -> tmpData(127, 0), 779 1.U -> tmpData(255, 128) 780 )) 781 } 782} 783 784object genVSData extends VLSUConstants { 785 def apply(data: UInt, elemIdx: UInt, alignedType: UInt): UInt = { 786 LookupTree(alignedType, List( 787 "b000".U -> ZeroExt(LookupTree(elemIdx(3, 0), List.tabulate(VLEN/8)(i => i.U -> getByte(data, i))), VLEN), 788 "b001".U -> ZeroExt(LookupTree(elemIdx(2, 0), List.tabulate(VLEN/16)(i => i.U -> getHalfWord(data, i))), VLEN), 789 "b010".U -> ZeroExt(LookupTree(elemIdx(1, 0), List.tabulate(VLEN/32)(i => i.U -> getWord(data, i))), VLEN), 790 "b011".U -> ZeroExt(LookupTree(elemIdx(0), List.tabulate(VLEN/64)(i => i.U -> getDoubleWord(data, i))), VLEN), 791 "b100".U -> data // if have wider element, it will broken 792 )) 793 } 794} 795 796// TODO: more elegant 797object genVStride extends VLSUConstants { 798 def apply(uopIdx: UInt, stride: UInt): UInt = { 799 LookupTree(uopIdx, List( 800 0.U -> 0.U, 801 1.U -> stride, 802 2.U -> (stride << 1), 803 3.U -> ((stride << 1).asUInt + stride), 804 4.U -> (stride << 2), 805 5.U -> ((stride << 2).asUInt + stride), 806 6.U -> ((stride << 2).asUInt + (stride << 1)), 807 7.U -> ((stride << 2).asUInt + (stride << 1) + stride) 808 )) 809 } 810} 811/** 812 * generate uopOffset, not used in segment instruction 813 * */ 814object genVUopOffset extends VLSUConstants { 815 def apply(instType: UInt, isfof: Bool, uopidx: UInt, nf: UInt, eew: UInt, stride: UInt, alignedType: UInt): UInt = { 816 val uopInsidefield = (uopidx >> nf).asUInt // when nf == 0, is uopidx 817 818// val fofVUopOffset = (LookupTree(instType,List( 819// "b000".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // unit-stride fof 820// "b100".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // segment unit-stride fof 821// ))).asUInt 822 823 val otherVUopOffset = (LookupTree(instType,List( 824 "b000".U -> ( uopInsidefield << alignedType ) , // unit-stride 825 "b010".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // strided 826 "b001".U -> ( 0.U ) , // indexed-unordered 827 "b011".U -> ( 0.U ) , // indexed-ordered 828 "b100".U -> ( uopInsidefield << alignedType ) , // segment unit-stride 829 "b110".U -> ( genVStride(uopInsidefield, stride) << (log2Up(VLENB).U - eew) ) , // segment strided 830 "b101".U -> ( 0.U ) , // segment indexed-unordered 831 "b111".U -> ( 0.U ) // segment indexed-ordered 832 ))).asUInt 833 834// Mux(isfof, fofVUopOffset, otherVUopOffset) 835 otherVUopOffset 836 } 837} 838 839 840 841object genVFirstUnmask extends VLSUConstants { 842 /** 843 * Find the lowest unmasked number of bits. 844 * example: 845 * mask = 16'b1111_1111_1110_0000 846 * return 5 847 * @param mask 16bits of mask. 848 * @return lowest unmasked number of bits. 849 */ 850 def apply(mask: UInt): UInt = { 851 require(mask.getWidth == 16, "The mask width must be 16") 852 val select = (0 until 16).zip(mask.asBools).map{case (i, v) => 853 (v, i.U) 854 } 855 PriorityMuxDefault(select, 0.U) 856 } 857 858 def apply(mask: UInt, regOffset: UInt): UInt = { 859 require(mask.getWidth == 16, "The mask width must be 16") 860 val realMask = (mask >> regOffset).asUInt 861 val select = (0 until 16).zip(realMask.asBools).map{case (i, v) => 862 (v, i.U) 863 } 864 PriorityMuxDefault(select, 0.U) 865 } 866} 867 868class skidBufferConnect[T <: Data](gen: T) extends Module { 869 val io = IO(new Bundle() { 870 val in = Flipped(DecoupledIO(gen.cloneType)) 871 val flush = Input(Bool()) 872 val out = DecoupledIO(gen.cloneType) 873 }) 874 875 skidBuffer.connect(io.in, io.out, io.flush) 876} 877 878object skidBuffer{ 879 /* 880 * Skid Buffer used to break timing path of ready 881 * */ 882 def connect[T <: Data]( 883 in: DecoupledIO[T], 884 out: DecoupledIO[T], 885 flush: Bool 886 ): T = { 887 val empty :: skid :: Nil = Enum(2) 888 val state = RegInit(empty) 889 val stateNext = WireInit(empty) 890 val dataBuffer = RegEnable(in.bits, (!out.ready && in.fire)) 891 892 when(state === empty){ 893 stateNext := Mux(!out.ready && in.fire && !flush, skid, empty) 894 }.elsewhen(state === skid){ 895 stateNext := Mux(out.ready || flush, empty, skid) 896 } 897 state := stateNext 898 899 in.ready := state === empty 900 out.bits := Mux(state === skid, dataBuffer, in.bits) 901 out.valid := in.valid || (state === skid) 902 903 dataBuffer 904 } 905 def apply[T <: Data]( 906 in: DecoupledIO[T], 907 out: DecoupledIO[T], 908 flush: Bool, 909 moduleName: String 910 ): Unit = { 911 val buffer = Module(new skidBufferConnect(in.bits)) 912 buffer.suggestName(moduleName) 913 buffer.io.in <> in 914 buffer.io.flush := flush 915 out <> buffer.io.out 916 } 917} 918 919