xref: /XiangShan/src/main/scala/xiangshan/backend/fu/wrapper/VFALU.scala (revision 78a8cd257caa1ff2b977d80082b1b3a2fa98a1d3)
1package xiangshan.backend.fu.wrapper
2
3import org.chipsalliance.cde.config.Parameters
4import chisel3._
5import chisel3.util._
6import utils.XSError
7import xiangshan.backend.fu.FuConfig
8import xiangshan.backend.fu.vector.Bundles.{VLmul, VSew, ma}
9import xiangshan.backend.fu.vector.utils.VecDataSplitModule
10import xiangshan.backend.fu.vector.{Mgu, Mgtu, VecInfo, VecPipedFuncUnit}
11import xiangshan.ExceptionNO
12import yunsuan.{VfaluType, VfpuType}
13import yunsuan.vector.VectorFloatAdder
14import xiangshan.backend.fu.vector.Bundles.VConfig
15
16class VFAlu(cfg: FuConfig)(implicit p: Parameters) extends VecPipedFuncUnit(cfg) {
17  XSError(io.in.valid && io.in.bits.ctrl.fuOpType === VfpuType.dummy, "Vfalu OpType not supported")
18
19  // params alias
20  private val dataWidth = cfg.destDataBits
21  private val dataWidthOfDataModule = 64
22  private val numVecModule = dataWidth / dataWidthOfDataModule
23
24  // io alias
25  private val opcode  = fuOpType(4,0)
26  private val resWiden  = fuOpType(5)
27  private val opbWiden  = fuOpType(6)
28
29  // modules
30  private val vfalus = Seq.fill(numVecModule)(Module(new VectorFloatAdder))
31  private val vs2Split = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
32  private val vs1Split = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
33  private val oldVdSplit  = Module(new VecDataSplitModule(dataWidth, dataWidthOfDataModule))
34  private val mgu = Module(new Mgu(dataWidth))
35  private val mgtu = Module(new Mgtu(dataWidth))
36
37  /**
38    * In connection of [[vs2Split]], [[vs1Split]] and [[oldVdSplit]]
39    */
40  vs2Split.io.inVecData := vs2
41  vs1Split.io.inVecData := vs1
42  oldVdSplit.io.inVecData := oldVd
43
44  /**
45    * [[vfalus]]'s in connection
46    */
47  // Vec(vs2(31,0), vs2(63,32), vs2(95,64), vs2(127,96)) ==>
48  // Vec(
49  //   Cat(vs2(95,64),  vs2(31,0)),
50  //   Cat(vs2(127,96), vs2(63,32)),
51  // )
52  private val vs2GroupedVec: Vec[UInt] = VecInit(vs2Split.io.outVec32b.zipWithIndex.groupBy(_._2 % 2).map(x => x._1 -> x._2.map(_._1)).values.map(x => Cat(x.reverse)).toSeq)
53  private val vs1GroupedVec: Vec[UInt] = VecInit(vs1Split.io.outVec32b.zipWithIndex.groupBy(_._2 % 2).map(x => x._1 -> x._2.map(_._1)).values.map(x => Cat(x.reverse)).toSeq)
54  private val resultData = Wire(Vec(numVecModule,UInt(dataWidthOfDataModule.W)))
55  private val fflagsData = Wire(Vec(numVecModule,UInt(20.W)))
56  private val srcMaskRShiftForReduction = Wire(UInt((8 * numVecModule).W))
57  // for reduction
58  val isFirstGroupUop = vuopIdx === 0.U ||
59    (vuopIdx === 1.U && (vlmul === VLmul.m4 || vlmul === VLmul.m8)) ||
60    ((vuopIdx === 2.U || vuopIdx === 3.U) && vlmul === VLmul.m8)
61  val maskRshiftWidthForReduction = Wire(UInt(6.W))
62  maskRshiftWidthForReduction := Mux(fuOpType === VfaluType.vfredosum || fuOpType === VfaluType.vfwredosum,
63    vuopIdx,
64    Mux1H(Seq(
65      (vsew === VSew.e16) -> (vuopIdx(1, 0) << 4),
66      (vsew === VSew.e32) -> (vuopIdx(1, 0) << 3),
67      (vsew === VSew.e64) -> (vuopIdx(1, 0) << 2),
68    ))
69  )
70  val vlMaskForReduction = (~(Fill(VLEN, 1.U) << vl)).asUInt
71  srcMaskRShiftForReduction := ((srcMask & vlMaskForReduction) >> maskRshiftWidthForReduction)(8 * numVecModule - 1, 0)
72  val existMask = (srcMask & vlMaskForReduction).orR
73  val existMaskReg = RegEnable(existMask, io.in.fire)
74
75
76  def genMaskForReduction(inmask: UInt, sew: UInt, i: Int): UInt = {
77    val f64MaskNum = dataWidth / 64 * 2
78    val f32MaskNum = dataWidth / 32 * 2
79    val f16MaskNum = dataWidth / 16 * 2
80    val f64Mask = inmask(f64MaskNum - 1, 0)
81    val f32Mask = inmask(f32MaskNum - 1, 0)
82    val f16Mask = inmask(f16MaskNum - 1, 0)
83    // vs2 reordered, so mask use high bits
84    val f64FirstFoldMaskUnorder = Mux1H(
85      Seq(
86        vecCtrl.fpu.isFoldTo1_2 -> Cat(0.U(3.W), f64Mask(0), 0.U(3.W), f64Mask(1)),
87      )
88    )
89    val f64FirstFoldMaskOrder = Mux1H(
90      Seq(
91        vecCtrl.fpu.isFoldTo1_2 -> Cat(0.U(3.W), f64Mask(1), 0.U(3.W), f64Mask(0))
92      )
93    )
94    val f32FirstFoldMaskUnorder = Mux1H(
95      Seq(
96        vecCtrl.fpu.isFoldTo1_2 -> Cat(0.U(2.W), f32Mask(1), f32Mask(0), 0.U(2.W), f32Mask(3), f32Mask(2)),
97        vecCtrl.fpu.isFoldTo1_4 -> Cat(0.U(3.W), f32Mask(0), 0.U(3.W), f32Mask(1)),
98      )
99    )
100    val f32FirstFoldMaskOrder = Mux1H(
101      Seq(
102        vecCtrl.fpu.isFoldTo1_2 -> Cat(0.U(2.W), f32Mask(3), f32Mask(2), 0.U(2.W), f32Mask(1), f32Mask(0)),
103        vecCtrl.fpu.isFoldTo1_4 -> Cat(0.U(3.W), f32Mask(1), 0.U(3.W), f32Mask(0)),
104      )
105    )
106    val f16FirstFoldMaskUnorder = Mux1H(
107      Seq(
108        vecCtrl.fpu.isFoldTo1_2 -> Cat(f16Mask(7,4), f16Mask(3,0)),
109        vecCtrl.fpu.isFoldTo1_4 -> Cat(0.U(2.W), f16Mask(1), f16Mask(0), 0.U(2.W), f16Mask(3), f16Mask(2)),
110        vecCtrl.fpu.isFoldTo1_8 -> Cat(0.U(3.W), f16Mask(0), 0.U(3.W), f16Mask(1)),
111      )
112    )
113    val f16FirstFoldMaskOrder = Mux1H(
114      Seq(
115        vecCtrl.fpu.isFoldTo1_2 -> Cat(f16Mask(7,4), f16Mask(3,0)),
116        vecCtrl.fpu.isFoldTo1_4 -> Cat(0.U(2.W), f16Mask(3), f16Mask(2), 0.U(2.W), f16Mask(1), f16Mask(0)),
117        vecCtrl.fpu.isFoldTo1_8 -> Cat(0.U(3.W), f16Mask(1), 0.U(3.W), f16Mask(0)),
118      )
119    )
120    val f64FoldMask = Mux1H(
121      Seq(
122        vecCtrl.fpu.isFoldTo1_2 -> "b00010001".U,
123      )
124    )
125    val f32FoldMask = Mux1H(
126      Seq(
127        vecCtrl.fpu.isFoldTo1_2 -> "b00110011".U,
128        vecCtrl.fpu.isFoldTo1_4 -> "b00010001".U,
129      )
130    )
131    val f16FoldMask = Mux1H(
132      Seq(
133        vecCtrl.fpu.isFoldTo1_2 -> "b11111111".U,
134        vecCtrl.fpu.isFoldTo1_4 -> "b00110011".U,
135        vecCtrl.fpu.isFoldTo1_8 -> "b00010001".U,
136      )
137    )
138    // low 4 bits for vs2(fp_a), high 4 bits for vs1(fp_b),
139    val isFold = vecCtrl.fpu.isFoldTo1_2 || vecCtrl.fpu.isFoldTo1_4 || vecCtrl.fpu.isFoldTo1_8
140    val f64FirstNotFoldMask = Cat(0.U(3.W), f64Mask(i + 2), 0.U(3.W), f64Mask(i))
141    val f32FirstNotFoldMask = Cat(0.U(2.W), f32Mask(i * 2 + 5, i * 2 + 4), 0.U(2.W), Cat(f32Mask(i * 2 + 1, i * 2)))
142    val f16FirstNotFoldMask = Cat(f16Mask(i * 4 + 11, i * 4 + 8), f16Mask(i * 4 + 3, i * 4))
143    val f64MaskI = Mux(fuOpType === VfaluType.vfredosum || fuOpType === VfaluType.vfwredosum,
144      Mux(isFold, f64FirstFoldMaskOrder, f64FirstNotFoldMask),
145      Mux(isFirstGroupUop,
146        Mux(isFold, f64FirstFoldMaskUnorder, f64FirstNotFoldMask),
147        Mux(isFold, f64FoldMask, Fill(8, 1.U))))
148    val f32MaskI = Mux(fuOpType === VfaluType.vfredosum || fuOpType === VfaluType.vfwredosum,
149      Mux(isFold, f32FirstFoldMaskOrder, f32FirstNotFoldMask),
150      Mux(isFirstGroupUop,
151        Mux(isFold, f32FirstFoldMaskUnorder, f32FirstNotFoldMask),
152        Mux(isFold, f32FoldMask, Fill(8, 1.U))))
153    val f16MaskI = Mux(fuOpType === VfaluType.vfredosum || fuOpType === VfaluType.vfwredosum,
154      Mux(isFold, f16FirstFoldMaskOrder, f16FirstNotFoldMask),
155      Mux(isFirstGroupUop,
156        Mux(isFold, f16FirstFoldMaskUnorder, f16FirstNotFoldMask),
157        Mux(isFold, f16FoldMask, Fill(8, 1.U))))
158    val outMask = Mux1H(
159      Seq(
160        (sew === 3.U) -> f64MaskI,
161        (sew === 2.U) -> f32MaskI,
162        (sew === 1.U) -> f16MaskI,
163      )
164    )
165    Mux(fuOpType === VfaluType.vfredosum || fuOpType === VfaluType.vfwredosum, outMask(0),outMask)
166  }
167  def genMaskForMerge(inmask:UInt, sew:UInt, i:Int): UInt = {
168    val f64MaskNum = dataWidth / 64
169    val f32MaskNum = dataWidth / 32
170    val f16MaskNum = dataWidth / 16
171    val f64Mask = inmask(f64MaskNum-1,0)
172    val f32Mask = inmask(f32MaskNum-1,0)
173    val f16Mask = inmask(f16MaskNum-1,0)
174    val f64MaskI = Cat(0.U(3.W),f64Mask(i))
175    val f32MaskI = Cat(0.U(2.W),f32Mask(2*i+1,2*i))
176    val f16MaskI = f16Mask(4*i+3,4*i)
177    val outMask = Mux1H(
178      Seq(
179        (sew === 3.U) -> f64MaskI,
180        (sew === 2.U) -> f32MaskI,
181        (sew === 1.U) -> f16MaskI,
182      )
183    )
184    outMask
185  }
186  def genMaskForRedFFlag(sew:UInt): UInt = {
187    val f64FoldMask = Mux1H(
188      Seq(
189        vecCtrl.fpu.isFoldTo1_2 -> "b00000001".U,
190      )
191    )
192    val f32FoldMask = Mux1H(
193      Seq(
194        vecCtrl.fpu.isFoldTo1_2 -> "b00000011".U,
195        vecCtrl.fpu.isFoldTo1_4 -> "b00000001".U,
196      )
197    )
198    val f16FoldMask = Mux1H(
199      Seq(
200        vecCtrl.fpu.isFoldTo1_2 -> "b00001111".U,
201        vecCtrl.fpu.isFoldTo1_4 -> "b00000011".U,
202        vecCtrl.fpu.isFoldTo1_8 -> "b00000001".U,
203      )
204    )
205    Mux1H(
206      Seq(
207        (sew === 3.U) -> f64FoldMask,
208        (sew === 2.U) -> f32FoldMask,
209        (sew === 1.U) -> f16FoldMask,
210      )
211    )
212  }
213  val isScalarMove = (fuOpType === VfaluType.vfmv_f_s) || (fuOpType === VfaluType.vfmv_s_f)
214  val srcMaskRShift = Wire(UInt((4 * numVecModule).W))
215  val maskRshiftWidth = Wire(UInt(6.W))
216  maskRshiftWidth := Mux1H(
217    Seq(
218      (vsew === VSew.e16) -> (vuopIdx(2,0) << 3),
219      (vsew === VSew.e32) -> (vuopIdx(2,0) << 2),
220      (vsew === VSew.e64) -> (vuopIdx(2,0) << 1),
221    )
222  )
223  srcMaskRShift := (srcMask >> maskRshiftWidth)(4 * numVecModule - 1, 0)
224  val fp_aIsFpCanonicalNAN = Wire(Vec(numVecModule,Bool()))
225  val fp_bIsFpCanonicalNAN = Wire(Vec(numVecModule,Bool()))
226  val inIsFold = Wire(UInt(3.W))
227  inIsFold := Cat(vecCtrl.fpu.isFoldTo1_8, vecCtrl.fpu.isFoldTo1_4, vecCtrl.fpu.isFoldTo1_2)
228  vfalus.zipWithIndex.foreach {
229    case (mod, i) =>
230      mod.io.fire             := io.in.valid
231      mod.io.fp_a             := vs2Split.io.outVec64b(i)
232      mod.io.fp_b             := vs1Split.io.outVec64b(i)
233      mod.io.widen_a          := Cat(vs2Split.io.outVec32b(i+numVecModule), vs2Split.io.outVec32b(i))
234      mod.io.widen_b          := Cat(vs1Split.io.outVec32b(i+numVecModule), vs1Split.io.outVec32b(i))
235      mod.io.frs1             := 0.U     // already vf -> vv
236      mod.io.is_frs1          := false.B // already vf -> vv
237      mod.io.mask             := Mux(isScalarMove, !vuopIdx.orR, genMaskForMerge(inmask = srcMaskRShift, sew = vsew, i = i))
238      mod.io.maskForReduction := genMaskForReduction(inmask = srcMaskRShiftForReduction, sew = vsew, i = i)
239      mod.io.uop_idx          := vuopIdx(0)
240      mod.io.is_vec           := true.B // Todo
241      mod.io.round_mode       := rm
242      mod.io.fp_format        := Mux(resWiden, vsew + 1.U, vsew)
243      mod.io.opb_widening     := opbWiden
244      mod.io.res_widening     := resWiden
245      mod.io.op_code          := opcode
246      mod.io.is_vfwredosum    := fuOpType === VfaluType.vfwredosum
247      mod.io.is_fold          := inIsFold
248      mod.io.vs2_fold         := vs2      // for better timing
249      resultData(i)           := mod.io.fp_result
250      fflagsData(i)           := mod.io.fflags
251      fp_aIsFpCanonicalNAN(i) := vecCtrl.fpu.isFpToVecInst & (
252          ((vsew === VSew.e32) & (!vs2Split.io.outVec64b(i).head(32).andR)) |
253          ((vsew === VSew.e16) & (!vs2Split.io.outVec64b(i).head(48).andR))
254        )
255      fp_bIsFpCanonicalNAN(i) := vecCtrl.fpu.isFpToVecInst & (
256          ((vsew === VSew.e32) & (!vs1Split.io.outVec64b(i).head(32).andR)) |
257          ((vsew === VSew.e16) & (!vs1Split.io.outVec64b(i).head(48).andR))
258        )
259      mod.io.fp_aIsFpCanonicalNAN := fp_aIsFpCanonicalNAN(i)
260      mod.io.fp_bIsFpCanonicalNAN := fp_bIsFpCanonicalNAN(i)
261  }
262  val outVuopidx = outVecCtrl.vuopIdx(2, 0)
263  val numOfUopVFRED = Wire(UInt(4.W))
264  val numofUopVFREDReg = RegEnable(numOfUopVFRED, io.in.fire)
265  val vs1Reg = RegEnable(vs1, io.in.fire)
266  val outIsVfRedUnordered = outCtrl.fuOpType === VfaluType.vfredusum ||
267    outCtrl.fuOpType === VfaluType.vfredmax ||
268    outCtrl.fuOpType === VfaluType.vfredmin
269  val outIsVfRedOrdered = outCtrl.fuOpType === VfaluType.vfredosum ||
270    outCtrl.fuOpType === VfaluType.vfwredosum
271
272  val isLastUopRed = outIsVfRedUnordered && outLastUop
273  val resultDataUInt = Mux(isLastUopRed && !existMaskReg, vs1Reg, resultData.asUInt)
274  val cmpResultWidth = dataWidth / 16
275  val cmpResult = Wire(Vec(cmpResultWidth, Bool()))
276  for (i <- 0 until cmpResultWidth) {
277    if(i == 0) {
278      cmpResult(i) := resultDataUInt(0)
279    }
280    else if(i < dataWidth / 64) {
281      cmpResult(i) := Mux1H(
282        Seq(
283          (outVecCtrl.vsew === 1.U) -> resultDataUInt(i*16),
284          (outVecCtrl.vsew === 2.U) -> resultDataUInt(i*32),
285          (outVecCtrl.vsew === 3.U) -> resultDataUInt(i*64)
286        )
287      )
288    }
289    else if(i < dataWidth / 32) {
290      cmpResult(i) := Mux1H(
291        Seq(
292          (outVecCtrl.vsew === 1.U) -> resultDataUInt(i * 16),
293          (outVecCtrl.vsew === 2.U) -> resultDataUInt(i * 32),
294          (outVecCtrl.vsew === 3.U) -> false.B
295        )
296      )
297    }
298    else if(i <  dataWidth / 16) {
299      cmpResult(i) := Mux(outVecCtrl.vsew === 1.U, resultDataUInt(i*16), false.B)
300    }
301  }
302  val outCtrl_s0 = ctrlVec.head
303  val outVecCtrl_s0 = ctrlVec.head.vpu.get
304  val outEew_s0 = Mux(resWiden, outVecCtrl_s0.vsew + 1.U, outVecCtrl_s0.vsew)
305  val outEew = Mux(RegEnable(resWiden, io.in.fire), outVecCtrl.vsew + 1.U, outVecCtrl.vsew)
306  val vlMax_s0 = ((VLEN/8).U >> outEew_s0).asUInt
307  val vlMax = ((VLEN/8).U >> outEew).asUInt
308  val lmulAbs = Mux(outVecCtrl.vlmul(2), (~outVecCtrl.vlmul(1,0)).asUInt + 1.U, outVecCtrl.vlmul(1,0))
309  //  vfmv_f_s need vl=1, reduction last uop need vl=1, other uop need vl=vlmax
310  numOfUopVFRED := {
311    // addTime include add frs1
312    val addTime = MuxLookup(outVecCtrl_s0.vlmul, 1.U(4.W))(Array(
313      VLmul.m2 -> 2.U,
314      VLmul.m4 -> 4.U,
315      VLmul.m8 -> 8.U,
316    ))
317    val foldLastVlmul = MuxLookup(outVecCtrl_s0.vsew, "b000".U)(Array(
318      VSew.e16 -> VLmul.mf8,
319      VSew.e32 -> VLmul.mf4,
320      VSew.e64 -> VLmul.mf2,
321    ))
322    // lmul < 1, foldTime = vlmul - foldFastVlmul
323    // lmul >= 1, foldTime = 0.U - foldFastVlmul
324    val foldTime = Mux(outVecCtrl_s0.vlmul(2), outVecCtrl_s0.vlmul, 0.U) - foldLastVlmul
325    addTime + foldTime
326  }
327  val reductionVl = Mux((outVecCtrl_s0.vuopIdx ===  numOfUopVFRED - 1.U) || (outCtrl_s0.fuOpType === VfaluType.vfredosum || outCtrl_s0.fuOpType === VfaluType.vfwredosum), 1.U, vlMax_s0)
328  val outIsResuction = outCtrl.fuOpType === VfaluType.vfredusum ||
329    outCtrl.fuOpType === VfaluType.vfredmax ||
330    outCtrl.fuOpType === VfaluType.vfredmin ||
331    outCtrl.fuOpType === VfaluType.vfredosum ||
332    outCtrl.fuOpType === VfaluType.vfwredosum
333  val outIsResuction_s0 = outCtrl_s0.fuOpType === VfaluType.vfredusum ||
334    outCtrl_s0.fuOpType === VfaluType.vfredmax ||
335    outCtrl_s0.fuOpType === VfaluType.vfredmin ||
336    outCtrl_s0.fuOpType === VfaluType.vfredosum ||
337    outCtrl_s0.fuOpType === VfaluType.vfwredosum
338  val outVConfig_s0  = if(!cfg.vconfigWakeUp) outVecCtrl_s0.vconfig else dataVec.head.getSrcVConfig.asTypeOf(new VConfig)
339  val outVl_s0       = outVConfig_s0.vl
340  val outVlFix_s0 = Mux(
341    outVecCtrl_s0.fpu.isFpToVecInst || (outCtrl_s0.fuOpType === VfaluType.vfmv_f_s),
342    1.U,
343    Mux(
344      outCtrl_s0.fuOpType === VfaluType.vfmv_s_f,
345      outVl_s0.orR,
346      Mux(outIsResuction_s0, reductionVl, outVl_s0)
347    )
348  )
349  val outVlFix = RegEnable(outVlFix_s0,io.in.fire)
350
351  val vlMaxAllUop = Wire(outVl.cloneType)
352  vlMaxAllUop := Mux(outVecCtrl.vlmul(2), vlMax >> lmulAbs, vlMax << lmulAbs).asUInt
353  val vlMaxThisUop = Mux(outVecCtrl.vlmul(2), vlMax >> lmulAbs, vlMax).asUInt
354  val vlSetThisUop = Mux(outVlFix > outVuopidx*vlMaxThisUop, outVlFix - outVuopidx*vlMaxThisUop, 0.U)
355  val vlThisUop = Wire(UInt(3.W))
356  vlThisUop := Mux(vlSetThisUop < vlMaxThisUop, vlSetThisUop, vlMaxThisUop)
357  val vlMaskRShift = Wire(UInt((4 * numVecModule).W))
358  vlMaskRShift := Fill(4 * numVecModule, 1.U(1.W)) >> ((4 * numVecModule).U - vlThisUop)
359
360  val outIsFisrtGroup = outVuopidx === 0.U ||
361    (outVuopidx === 1.U && (outVlmul === VLmul.m4 || outVlmul === VLmul.m8)) ||
362    ((outVuopidx === 2.U || outVuopidx === 3.U) && outVlmul === VLmul.m8)
363  val needFFlags = (outIsFisrtGroup || outVecCtrl.lastUop) && outIsVfRedUnordered
364  private val needNoMask = outCtrl.fuOpType === VfaluType.vfmerge ||
365    outCtrl.fuOpType === VfaluType.vfmv_s_f ||
366    outIsResuction ||
367    outVecCtrl.fpu.isFpToVecInst
368  val maskToMgu = Mux(needNoMask, allMaskTrue, outSrcMask)
369  val allFFlagsEn = Wire(Vec(4*numVecModule,Bool()))
370  val outSrcMaskRShift = Wire(UInt((4*numVecModule).W))
371  outSrcMaskRShift := (maskToMgu >> (outVecCtrl.vuopIdx(2,0) * vlMax))(4*numVecModule-1,0)
372  val f16FFlagsEn = outSrcMaskRShift
373  val f32FFlagsEn = Wire(Vec(numVecModule,UInt(4.W)))
374  val f64FFlagsEn = Wire(Vec(numVecModule, UInt(4.W)))
375  val f16VlMaskEn = vlMaskRShift
376  val f32VlMaskEn = Wire(Vec(numVecModule, UInt(4.W)))
377  val f64VlMaskEn = Wire(Vec(numVecModule, UInt(4.W)))
378  for (i <- 0 until numVecModule){
379    f32FFlagsEn(i) := Cat(Fill(2, 0.U), outSrcMaskRShift(2*i+1,2*i))
380    f64FFlagsEn(i) := Cat(Fill(3, 0.U), outSrcMaskRShift(i))
381    f32VlMaskEn(i) := Cat(Fill(2, 0.U), vlMaskRShift(2 * i + 1, 2 * i))
382    f64VlMaskEn(i) := Cat(Fill(3, 0.U), vlMaskRShift(i))
383  }
384  val fflagsEn= Mux1H(
385    Seq(
386      (outEew === 1.U) -> f16FFlagsEn.asUInt,
387      (outEew === 2.U) -> f32FFlagsEn.asUInt,
388      (outEew === 3.U) -> f64FFlagsEn.asUInt
389    )
390  )
391  val vlMaskEn = Mux1H(
392    Seq(
393      (outEew === 1.U) -> f16VlMaskEn.asUInt,
394      (outEew === 2.U) -> f32VlMaskEn.asUInt,
395      (outEew === 3.U) -> f64VlMaskEn.asUInt
396    )
397  )
398  if (backendParams.debugEn){
399    dontTouch(allFFlagsEn)
400  }
401  val fflagsRedMask = Cat(Fill(4 ,0.U),genMaskForRedFFlag(outVecCtrl.vsew))
402  allFFlagsEn := Mux(outIsResuction, Cat(Fill(4*numVecModule - 1, needFFlags) & fflagsRedMask(4*numVecModule - 1, 1),
403    needFFlags && fflagsRedMask(0) || outIsVfRedOrdered), fflagsEn & vlMaskEn).asTypeOf(allFFlagsEn)
404
405  val allFFlags = fflagsData.asTypeOf(Vec( 4*numVecModule,UInt(5.W)))
406  val outFFlags = allFFlagsEn.zip(allFFlags).map{
407    case(en,fflags) => Mux(en, fflags, 0.U(5.W))
408  }.reduce(_ | _)
409  io.out.bits.res.fflags.get := outFFlags
410
411
412  val cmpResultOldVd = Wire(UInt(cmpResultWidth.W))
413  val cmpResultOldVdRshiftWidth = Wire(UInt(6.W))
414  cmpResultOldVdRshiftWidth := Mux1H(
415    Seq(
416      (outVecCtrl.vsew === VSew.e16) -> (outVecCtrl.vuopIdx(2, 0) << 3),
417      (outVecCtrl.vsew === VSew.e32) -> (outVecCtrl.vuopIdx(2, 0) << 2),
418      (outVecCtrl.vsew === VSew.e64) -> (outVecCtrl.vuopIdx(2, 0) << 1),
419    )
420  )
421  cmpResultOldVd := (outOldVd >> cmpResultOldVdRshiftWidth)(4*numVecModule-1,0)
422  val cmpResultForMgu = Wire(Vec(cmpResultWidth, Bool()))
423  private val maxVdIdx = 8
424  private val elementsInOneUop = Mux1H(
425    Seq(
426      (outEew === 1.U) -> (cmpResultWidth).U(4.W),
427      (outEew === 2.U) -> (cmpResultWidth / 2).U(4.W),
428      (outEew === 3.U) -> (cmpResultWidth / 4).U(4.W),
429    )
430  )
431  private val vdIdx = outVecCtrl.vuopIdx(2, 0)
432  private val elementsComputed = Mux1H(Seq.tabulate(maxVdIdx)(i => (vdIdx === i.U) -> (elementsInOneUop * i.U)))
433  for (i <- 0 until cmpResultWidth) {
434    val cmpResultWithVmask = Mux(outSrcMaskRShift(i), cmpResult(i), Mux(outVecCtrl.vma, true.B, cmpResultOldVd(i)))
435    cmpResultForMgu(i) := Mux(elementsComputed +& i.U >= outVl, true.B, cmpResultWithVmask)
436  }
437  val outIsFold = outVecCtrl.fpu.isFoldTo1_2 || outVecCtrl.fpu.isFoldTo1_4 || outVecCtrl.fpu.isFoldTo1_8
438  val outOldVdForREDO = Mux1H(Seq(
439    (outVecCtrl.vsew === VSew.e16) -> (outOldVd >> 16),
440    (outVecCtrl.vsew === VSew.e32) -> (outOldVd >> 32),
441    (outVecCtrl.vsew === VSew.e64) -> (outOldVd >> 64),
442  ))
443  val outOldVdForWREDO = Mux(
444    !outIsFold,
445    Mux(outVecCtrl.vsew === VSew.e16, Cat(outOldVd(VLEN-1-16,16), 0.U(32.W)), Cat(outOldVd(VLEN-1-32,32), 0.U(64.W))),
446    Mux(outVecCtrl.vsew === VSew.e16,
447      // Divide vuopIdx by 8 and the remainder is 1
448      Mux(outVecCtrl.vuopIdx(2,0) === 1.U, outOldVd, outOldVd >> 16),
449      // Divide vuopIdx by 4 and the remainder is 1
450      Mux(outVecCtrl.vuopIdx(1,0) === 1.U, outOldVd, outOldVd >> 32)
451    ),
452  )
453  val outOldVdForRED = Mux(outCtrl.fuOpType === VfaluType.vfredosum, outOldVdForREDO, outOldVdForWREDO)
454  val numOfUopVFREDOSUM = {
455    val uvlMax = MuxLookup(outVecCtrl.vsew, 0.U)(Array(
456      VSew.e16 -> 8.U,
457      VSew.e32 -> 4.U,
458      VSew.e64 -> 2.U,
459    ))
460    val vlMax = Mux(outVecCtrl.vlmul(2), uvlMax >> (-outVecCtrl.vlmul)(1, 0), uvlMax << outVecCtrl.vlmul(1, 0)).asUInt
461    vlMax
462  }
463  val isLastUopForREDO = outVecCtrl.lastUop
464  val isOutOldVdForREDO = ((outCtrl.fuOpType === VfaluType.vfredosum && outIsFold) || outCtrl.fuOpType === VfaluType.vfwredosum) && !isLastUopForREDO
465  val taIsFalseForVFREDO = ((outCtrl.fuOpType === VfaluType.vfredosum) || (outCtrl.fuOpType === VfaluType.vfwredosum)) && (outVecCtrl.vuopIdx =/= numOfUopVFREDOSUM - 1.U)
466  // outVecCtrl.fpu.isFpToVecInst means the instruction is float instruction, not vector float instruction
467  val notUseVl = outVecCtrl.fpu.isFpToVecInst || (outCtrl.fuOpType === VfaluType.vfmv_f_s)
468  val notModifyVd = !notUseVl && (outVl === 0.U)
469  mgu.io.in.vd := Mux(outVecCtrl.isDstMask, Cat(0.U((dataWidth / 16 * 15).W), cmpResultForMgu.asUInt), resultDataUInt)
470  mgu.io.in.oldVd := Mux(isOutOldVdForREDO, outOldVdForRED, outOldVd)
471  mgu.io.in.mask := maskToMgu
472  mgu.io.in.info.ta := Mux(outCtrl.fuOpType === VfaluType.vfmv_f_s, true.B , Mux(taIsFalseForVFREDO, false.B, outVecCtrl.vta))
473  mgu.io.in.info.ma := Mux(outCtrl.fuOpType === VfaluType.vfmv_s_f, true.B , outVecCtrl.vma)
474  mgu.io.in.info.vl := outVlFix
475  mgu.io.in.info.vstart := outVecCtrl.vstart
476  mgu.io.in.info.vlmul := outVecCtrl.vlmul
477  mgu.io.in.info.valid := Mux(notModifyVd, false.B, io.in.valid)
478  mgu.io.in.info.vstart := Mux(outVecCtrl.fpu.isFpToVecInst, 0.U, outVecCtrl.vstart)
479  mgu.io.in.info.eew :=  RegEnable(outEew_s0,io.in.fire)
480  mgu.io.in.info.vsew := outVecCtrl.vsew
481  mgu.io.in.info.vdIdx := RegEnable(Mux(outIsResuction_s0, 0.U, outVecCtrl_s0.vuopIdx), io.in.fire)
482  mgu.io.in.info.narrow := outVecCtrl.isNarrow
483  mgu.io.in.info.dstMask := outVecCtrl.isDstMask
484  mgu.io.in.isIndexedVls := false.B
485  mgtu.io.in.vd := Mux(outVecCtrl.isDstMask, mgu.io.out.vd, resultDataUInt)
486  mgtu.io.in.vl := outVl
487  val resultFpMask = Wire(UInt(VLEN.W))
488  val isFclass = outVecCtrl.fpu.isFpToVecInst && (outCtrl.fuOpType === VfaluType.vfclass)
489  val fpCmpFuOpType = Seq(VfaluType.vfeq, VfaluType.vflt, VfaluType.vfle)
490  val isCmp = outVecCtrl.fpu.isFpToVecInst && (fpCmpFuOpType.map(_ === outCtrl.fuOpType).reduce(_|_))
491  resultFpMask := Mux(isFclass || isCmp, Fill(16, 1.U(1.W)), Fill(VLEN, 1.U(1.W)))
492  // when dest is mask, the result need to be masked by mgtu
493  io.out.bits.res.data := Mux(notModifyVd, outOldVd, Mux(outVecCtrl.isDstMask, mgtu.io.out.vd, mgu.io.out.vd) & resultFpMask)
494  io.out.bits.ctrl.exceptionVec.get(ExceptionNO.illegalInstr) := mgu.io.out.illegal
495
496}
497
498class VFMgu(vlen:Int)(implicit p: Parameters) extends Module{
499  val io = IO(new VFMguIO(vlen))
500
501  val vd = io.in.vd
502  val oldvd = io.in.oldVd
503  val mask = io.in.mask
504  val vsew = io.in.info.eew
505  val num16bits = vlen / 16
506
507}
508
509class VFMguIO(vlen: Int)(implicit p: Parameters) extends Bundle {
510  val in = new Bundle {
511    val vd = Input(UInt(vlen.W))
512    val oldVd = Input(UInt(vlen.W))
513    val mask = Input(UInt(vlen.W))
514    val info = Input(new VecInfo)
515  }
516  val out = new Bundle {
517    val vd = Output(UInt(vlen.W))
518  }
519}