1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.mem 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils._ 23import utility._ 24import xiangshan._ 25import xiangshan.cache.{AtomicWordIO, MemoryOpConstants, HasDCacheParameters} 26import xiangshan.cache.mmu.{TlbCmd, TlbRequestIO} 27import difftest._ 28import xiangshan.ExceptionNO._ 29import xiangshan.backend.fu.PMPRespBundle 30 31class AtomicsUnit(implicit p: Parameters) extends XSModule with MemoryOpConstants with HasDCacheParameters{ 32 val io = IO(new Bundle() { 33 val hartId = Input(UInt(8.W)) 34 val in = Flipped(Decoupled(new ExuInput)) 35 val storeDataIn = Flipped(Valid(new ExuOutput)) // src2 from rs 36 val out = Decoupled(new ExuOutput) 37 val dcache = new AtomicWordIO 38 val dtlb = new TlbRequestIO(2) 39 val pmpResp = Flipped(new PMPRespBundle()) 40 val rsIdx = Input(UInt(log2Up(IssQueSize).W)) 41 val flush_sbuffer = new SbufferFlushBundle 42 val feedbackSlow = ValidIO(new RSFeedback) 43 val redirect = Flipped(ValidIO(new Redirect)) 44 val exceptionAddr = ValidIO(UInt(VAddrBits.W)) 45 val csrCtrl = Flipped(new CustomCSRCtrlIO) 46 }) 47 48 //------------------------------------------------------- 49 // Atomics Memory Accsess FSM 50 //------------------------------------------------------- 51 val s_invalid :: s_tlb_and_flush_sbuffer_req :: s_pm :: s_wait_flush_sbuffer_resp :: s_cache_req :: s_cache_resp :: s_cache_resp_latch :: s_finish :: Nil = Enum(8) 52 val state = RegInit(s_invalid) 53 val out_valid = RegInit(false.B) 54 val data_valid = RegInit(false.B) 55 val in = Reg(new ExuInput()) 56 val exceptionVec = RegInit(0.U.asTypeOf(ExceptionVec())) 57 val atom_override_xtval = RegInit(false.B) 58 val isLr = in.uop.ctrl.fuOpType === LSUOpType.lr_w || in.uop.ctrl.fuOpType === LSUOpType.lr_d 59 // paddr after translation 60 val paddr = Reg(UInt()) 61 val vaddr = in.src(0) 62 val is_mmio = Reg(Bool()) 63 // pmp check 64 val static_pm = Reg(Valid(Bool())) // valid for static, bits for mmio 65 // dcache response data 66 val resp_data = Reg(UInt()) 67 val resp_data_wire = WireInit(0.U) 68 val is_lrsc_valid = Reg(Bool()) 69 // sbuffer is empty or not 70 val sbuffer_empty = io.flush_sbuffer.empty 71 72 73 // Difftest signals 74 val paddr_reg = Reg(UInt(64.W)) 75 val data_reg = Reg(UInt(64.W)) 76 val mask_reg = Reg(UInt(8.W)) 77 val fuop_reg = Reg(UInt(8.W)) 78 79 io.exceptionAddr.valid := atom_override_xtval 80 io.exceptionAddr.bits := in.src(0) 81 82 // assign default value to output signals 83 io.in.ready := false.B 84 85 io.dcache.req.valid := false.B 86 io.dcache.req.bits := DontCare 87 88 io.dtlb.req.valid := false.B 89 io.dtlb.req.bits := DontCare 90 io.dtlb.req_kill := false.B 91 io.dtlb.resp.ready := true.B 92 93 io.flush_sbuffer.valid := false.B 94 95 XSDebug("state: %d\n", state) 96 97 when (state === s_invalid) { 98 io.in.ready := true.B 99 when (io.in.fire) { 100 in := io.in.bits 101 in.src(1) := in.src(1) // leave src2 unchanged 102 state := s_tlb_and_flush_sbuffer_req 103 } 104 } 105 106 when (io.storeDataIn.fire) { 107 in.src(1) := io.storeDataIn.bits.data 108 data_valid := true.B 109 } 110 111 assert(!(io.storeDataIn.fire && data_valid), "atomic unit re-receive data") 112 113 // Send TLB feedback to store issue queue 114 // we send feedback right after we receives request 115 // also, we always treat amo as tlb hit 116 // since we will continue polling tlb all by ourself 117 io.feedbackSlow.valid := RegNext(RegNext(io.in.valid)) 118 io.feedbackSlow.bits.hit := true.B 119 io.feedbackSlow.bits.rsIdx := RegEnable(io.rsIdx, io.in.valid) 120 io.feedbackSlow.bits.flushState := DontCare 121 io.feedbackSlow.bits.sourceType := DontCare 122 io.feedbackSlow.bits.dataInvalidSqIdx := DontCare 123 124 // tlb translation, manipulating signals && deal with exception 125 // at the same time, flush sbuffer 126 when (state === s_tlb_and_flush_sbuffer_req) { 127 // send req to dtlb 128 // keep firing until tlb hit 129 io.dtlb.req.valid := true.B 130 io.dtlb.req.bits.vaddr := in.src(0) 131 io.dtlb.resp.ready := true.B 132 io.dtlb.req.bits.cmd := Mux(isLr, TlbCmd.atom_read, TlbCmd.atom_write) 133 io.dtlb.req.bits.debug.pc := in.uop.cf.pc 134 io.dtlb.req.bits.debug.isFirstIssue := false.B 135 136 // send req to sbuffer to flush it if it is not empty 137 io.flush_sbuffer.valid := Mux(sbuffer_empty, false.B, true.B) 138 139 when(io.dtlb.resp.fire){ 140 paddr := io.dtlb.resp.bits.paddr(0) 141 // exception handling 142 val addrAligned = LookupTree(in.uop.ctrl.fuOpType(1,0), List( 143 "b00".U -> true.B, //b 144 "b01".U -> (in.src(0)(0) === 0.U), //h 145 "b10".U -> (in.src(0)(1,0) === 0.U), //w 146 "b11".U -> (in.src(0)(2,0) === 0.U) //d 147 )) 148 exceptionVec(loadAddrMisaligned) := !addrAligned && isLr 149 exceptionVec(storeAddrMisaligned) := !addrAligned && !isLr 150 exceptionVec(storePageFault) := io.dtlb.resp.bits.excp(0).pf.st 151 exceptionVec(loadPageFault) := io.dtlb.resp.bits.excp(0).pf.ld 152 exceptionVec(storeAccessFault) := io.dtlb.resp.bits.excp(0).af.st 153 exceptionVec(loadAccessFault) := io.dtlb.resp.bits.excp(0).af.ld 154 static_pm := io.dtlb.resp.bits.static_pm 155 156 when (!io.dtlb.resp.bits.miss) { 157 when (!addrAligned) { 158 // NOTE: when addrAligned, do not need to wait tlb actually 159 // check for miss aligned exceptions, tlb exception are checked next cycle for timing 160 // if there are exceptions, no need to execute it 161 state := s_finish 162 out_valid := true.B 163 atom_override_xtval := true.B 164 } .otherwise { 165 state := s_pm 166 } 167 } 168 } 169 } 170 171 when (state === s_pm) { 172 val pmp = WireInit(io.pmpResp) 173 when (static_pm.valid) { 174 pmp.ld := false.B 175 pmp.st := false.B 176 pmp.instr := false.B 177 pmp.mmio := static_pm.bits 178 } 179 is_mmio := pmp.mmio 180 // NOTE: only handle load/store exception here, if other exception happens, don't send here 181 val exception_va = exceptionVec(storePageFault) || exceptionVec(loadPageFault) || 182 exceptionVec(storeAccessFault) || exceptionVec(loadAccessFault) 183 val exception_pa = pmp.st || pmp.ld 184 when (exception_va || exception_pa) { 185 state := s_finish 186 out_valid := true.B 187 atom_override_xtval := true.B 188 }.otherwise { 189 // if sbuffer has been flushed, go to query dcache, otherwise wait for sbuffer. 190 state := Mux(sbuffer_empty, s_cache_req, s_wait_flush_sbuffer_resp); 191 } 192 // update storeAccessFault bit 193 exceptionVec(loadAccessFault) := exceptionVec(loadAccessFault) || pmp.ld && isLr 194 exceptionVec(storeAccessFault) := exceptionVec(storeAccessFault) || pmp.st || pmp.ld && !isLr 195 } 196 197 when (state === s_wait_flush_sbuffer_resp) { 198 when (sbuffer_empty) { 199 state := s_cache_req 200 } 201 } 202 203 when (state === s_cache_req) { 204 val pipe_req = io.dcache.req.bits 205 pipe_req := DontCare 206 207 pipe_req.cmd := LookupTree(in.uop.ctrl.fuOpType, List( 208 LSUOpType.lr_w -> M_XLR, 209 LSUOpType.sc_w -> M_XSC, 210 LSUOpType.amoswap_w -> M_XA_SWAP, 211 LSUOpType.amoadd_w -> M_XA_ADD, 212 LSUOpType.amoxor_w -> M_XA_XOR, 213 LSUOpType.amoand_w -> M_XA_AND, 214 LSUOpType.amoor_w -> M_XA_OR, 215 LSUOpType.amomin_w -> M_XA_MIN, 216 LSUOpType.amomax_w -> M_XA_MAX, 217 LSUOpType.amominu_w -> M_XA_MINU, 218 LSUOpType.amomaxu_w -> M_XA_MAXU, 219 220 LSUOpType.lr_d -> M_XLR, 221 LSUOpType.sc_d -> M_XSC, 222 LSUOpType.amoswap_d -> M_XA_SWAP, 223 LSUOpType.amoadd_d -> M_XA_ADD, 224 LSUOpType.amoxor_d -> M_XA_XOR, 225 LSUOpType.amoand_d -> M_XA_AND, 226 LSUOpType.amoor_d -> M_XA_OR, 227 LSUOpType.amomin_d -> M_XA_MIN, 228 LSUOpType.amomax_d -> M_XA_MAX, 229 LSUOpType.amominu_d -> M_XA_MINU, 230 LSUOpType.amomaxu_d -> M_XA_MAXU 231 )) 232 pipe_req.miss := false.B 233 pipe_req.probe := false.B 234 pipe_req.probe_need_data := false.B 235 pipe_req.source := AMO_SOURCE.U 236 pipe_req.addr := get_block_addr(paddr) 237 pipe_req.vaddr := get_block_addr(in.src(0)) // vaddr 238 pipe_req.word_idx := get_word(paddr) 239 pipe_req.amo_data := genWdata(in.src(1), in.uop.ctrl.fuOpType(1,0)) 240 pipe_req.amo_mask := genWmask(paddr, in.uop.ctrl.fuOpType(1,0)) 241 242 io.dcache.req.valid := Mux( 243 io.dcache.req.bits.cmd === M_XLR, 244 !io.dcache.block_lr, // block lr to survive in lr storm 245 data_valid // wait until src(1) is ready 246 ) 247 248 when(io.dcache.req.fire){ 249 state := s_cache_resp 250 paddr_reg := paddr 251 data_reg := io.dcache.req.bits.amo_data 252 mask_reg := io.dcache.req.bits.amo_mask 253 fuop_reg := in.uop.ctrl.fuOpType 254 } 255 } 256 257 val dcache_resp_data = Reg(UInt()) 258 val dcache_resp_id = Reg(UInt()) 259 val dcache_resp_error = Reg(Bool()) 260 261 when (state === s_cache_resp) { 262 // when not miss 263 // everything is OK, simply send response back to sbuffer 264 // when miss and not replay 265 // wait for missQueue to handling miss and replaying our request 266 // when miss and replay 267 // req missed and fail to enter missQueue, manually replay it later 268 // TODO: add assertions: 269 // 1. add a replay delay counter? 270 // 2. when req gets into MissQueue, it should not miss any more 271 when(io.dcache.resp.fire()) { 272 when(io.dcache.resp.bits.miss) { 273 when(io.dcache.resp.bits.replay) { 274 state := s_cache_req 275 } 276 } .otherwise { 277 dcache_resp_data := io.dcache.resp.bits.data 278 dcache_resp_id := io.dcache.resp.bits.id 279 dcache_resp_error := io.dcache.resp.bits.error 280 state := s_cache_resp_latch 281 } 282 } 283 } 284 285 when (state === s_cache_resp_latch) { 286 is_lrsc_valid := dcache_resp_id 287 val rdataSel = LookupTree(paddr(2, 0), List( 288 "b000".U -> dcache_resp_data(63, 0), 289 "b001".U -> dcache_resp_data(63, 8), 290 "b010".U -> dcache_resp_data(63, 16), 291 "b011".U -> dcache_resp_data(63, 24), 292 "b100".U -> dcache_resp_data(63, 32), 293 "b101".U -> dcache_resp_data(63, 40), 294 "b110".U -> dcache_resp_data(63, 48), 295 "b111".U -> dcache_resp_data(63, 56) 296 )) 297 298 resp_data_wire := LookupTree(in.uop.ctrl.fuOpType, List( 299 LSUOpType.lr_w -> SignExt(rdataSel(31, 0), XLEN), 300 LSUOpType.sc_w -> dcache_resp_data, 301 LSUOpType.amoswap_w -> SignExt(rdataSel(31, 0), XLEN), 302 LSUOpType.amoadd_w -> SignExt(rdataSel(31, 0), XLEN), 303 LSUOpType.amoxor_w -> SignExt(rdataSel(31, 0), XLEN), 304 LSUOpType.amoand_w -> SignExt(rdataSel(31, 0), XLEN), 305 LSUOpType.amoor_w -> SignExt(rdataSel(31, 0), XLEN), 306 LSUOpType.amomin_w -> SignExt(rdataSel(31, 0), XLEN), 307 LSUOpType.amomax_w -> SignExt(rdataSel(31, 0), XLEN), 308 LSUOpType.amominu_w -> SignExt(rdataSel(31, 0), XLEN), 309 LSUOpType.amomaxu_w -> SignExt(rdataSel(31, 0), XLEN), 310 311 LSUOpType.lr_d -> SignExt(rdataSel(63, 0), XLEN), 312 LSUOpType.sc_d -> dcache_resp_data, 313 LSUOpType.amoswap_d -> SignExt(rdataSel(63, 0), XLEN), 314 LSUOpType.amoadd_d -> SignExt(rdataSel(63, 0), XLEN), 315 LSUOpType.amoxor_d -> SignExt(rdataSel(63, 0), XLEN), 316 LSUOpType.amoand_d -> SignExt(rdataSel(63, 0), XLEN), 317 LSUOpType.amoor_d -> SignExt(rdataSel(63, 0), XLEN), 318 LSUOpType.amomin_d -> SignExt(rdataSel(63, 0), XLEN), 319 LSUOpType.amomax_d -> SignExt(rdataSel(63, 0), XLEN), 320 LSUOpType.amominu_d -> SignExt(rdataSel(63, 0), XLEN), 321 LSUOpType.amomaxu_d -> SignExt(rdataSel(63, 0), XLEN) 322 )) 323 324 when (dcache_resp_error && io.csrCtrl.cache_error_enable) { 325 exceptionVec(loadAccessFault) := isLr 326 exceptionVec(storeAccessFault) := !isLr 327 assert(!exceptionVec(loadAccessFault)) 328 assert(!exceptionVec(storeAccessFault)) 329 } 330 331 resp_data := resp_data_wire 332 state := s_finish 333 out_valid := true.B 334 } 335 336 io.out.valid := out_valid 337 XSError((state === s_finish) =/= out_valid, "out_valid reg error\n") 338 io.out.bits := DontCare 339 io.out.bits.uop := in.uop 340 io.out.bits.uop.cf.exceptionVec := exceptionVec 341 io.out.bits.data := resp_data 342 io.out.bits.redirectValid := false.B 343 io.out.bits.debug.isMMIO := is_mmio 344 io.out.bits.debug.paddr := paddr 345 when (io.out.fire) { 346 XSDebug("atomics writeback: pc %x data %x\n", io.out.bits.uop.cf.pc, io.dcache.resp.bits.data) 347 state := s_invalid 348 out_valid := false.B 349 } 350 351 when (state === s_finish) { 352 data_valid := false.B 353 } 354 355 when (io.redirect.valid) { 356 atom_override_xtval := false.B 357 } 358 359 // atomic trigger 360 val csrCtrl = io.csrCtrl 361 val tdata = Reg(Vec(6, new MatchTriggerIO)) 362 val tEnable = RegInit(VecInit(Seq.fill(6)(false.B))) 363 val en = csrCtrl.trigger_enable 364 tEnable := VecInit(en(2), en (3), en(7), en(4), en(5), en(9)) 365 when(csrCtrl.mem_trigger.t.valid) { 366 tdata(csrCtrl.mem_trigger.t.bits.addr) := csrCtrl.mem_trigger.t.bits.tdata 367 } 368 val lTriggerMapping = Map(0 -> 2, 1 -> 3, 2 -> 5) 369 val sTriggerMapping = Map(0 -> 0, 1 -> 1, 2 -> 4) 370 371 val backendTriggerHitReg = Reg(Vec(6, Bool())) 372 backendTriggerHitReg := VecInit(Seq.fill(6)(false.B)) 373 374 when(state === s_cache_req){ 375 // store trigger 376 val store_hit = Wire(Vec(3, Bool())) 377 for (j <- 0 until 3) { 378 store_hit(j) := !tdata(sTriggerMapping(j)).select && TriggerCmp( 379 vaddr, 380 tdata(sTriggerMapping(j)).tdata2, 381 tdata(sTriggerMapping(j)).matchType, 382 tEnable(sTriggerMapping(j)) 383 ) 384 backendTriggerHitReg(sTriggerMapping(j)) := store_hit(j) 385 } 386 387 when(tdata(0).chain) { 388 backendTriggerHitReg(0) := store_hit(0) && store_hit(1) 389 backendTriggerHitReg(1) := store_hit(0) && store_hit(1) 390 } 391 392 when(!in.uop.cf.trigger.backendEn(0)) { 393 backendTriggerHitReg(4) := false.B 394 } 395 396 // load trigger 397 val load_hit = Wire(Vec(3, Bool())) 398 for (j <- 0 until 3) { 399 400 val addrHit = TriggerCmp( 401 vaddr, 402 tdata(lTriggerMapping(j)).tdata2, 403 tdata(lTriggerMapping(j)).matchType, 404 tEnable(lTriggerMapping(j)) 405 ) 406 load_hit(j) := addrHit && !tdata(lTriggerMapping(j)).select 407 backendTriggerHitReg(lTriggerMapping(j)) := load_hit(j) 408 } 409 when(tdata(2).chain) { 410 backendTriggerHitReg(2) := load_hit(0) && load_hit(1) 411 backendTriggerHitReg(3) := load_hit(0) && load_hit(1) 412 } 413 when(!in.uop.cf.trigger.backendEn(1)) { 414 backendTriggerHitReg(5) := false.B 415 } 416 } 417 418 // addr trigger do cmp at s_cache_req 419 // trigger result is used at s_finish 420 // thus we can delay it safely 421 io.out.bits.uop.cf.trigger.backendHit := VecInit(Seq.fill(6)(false.B)) 422 when(isLr){ 423 // enable load trigger 424 io.out.bits.uop.cf.trigger.backendHit(2) := backendTriggerHitReg(2) 425 io.out.bits.uop.cf.trigger.backendHit(3) := backendTriggerHitReg(3) 426 io.out.bits.uop.cf.trigger.backendHit(5) := backendTriggerHitReg(5) 427 }.otherwise{ 428 // enable store trigger 429 io.out.bits.uop.cf.trigger.backendHit(0) := backendTriggerHitReg(0) 430 io.out.bits.uop.cf.trigger.backendHit(1) := backendTriggerHitReg(1) 431 io.out.bits.uop.cf.trigger.backendHit(4) := backendTriggerHitReg(4) 432 } 433 434 if (env.EnableDifftest) { 435 val difftest = Module(new DifftestAtomicEvent) 436 difftest.io.clock := clock 437 difftest.io.coreid := io.hartId 438 difftest.io.atomicResp := state === s_cache_resp_latch 439 difftest.io.atomicAddr := paddr_reg 440 difftest.io.atomicData := data_reg 441 difftest.io.atomicMask := mask_reg 442 difftest.io.atomicFuop := fuop_reg 443 difftest.io.atomicOut := resp_data_wire 444 } 445 446 if (env.EnableDifftest || env.AlwaysBasicDiff) { 447 val uop = io.out.bits.uop 448 val difftest = Module(new DifftestLrScEvent) 449 difftest.io.clock := clock 450 difftest.io.coreid := io.hartId 451 difftest.io.valid := io.out.fire && 452 (uop.ctrl.fuOpType === LSUOpType.sc_d || uop.ctrl.fuOpType === LSUOpType.sc_w) 453 difftest.io.success := is_lrsc_valid 454 } 455} 456