1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17// See LICENSE.SiFive for license details. 18 19package xiangshan.backend.fu 20 21import chipsalliance.rocketchip.config.Parameters 22import chisel3._ 23import chisel3.internal.naming.chiselName 24import chisel3.util._ 25import utility.MaskedRegMap.WritableMask 26import xiangshan._ 27import xiangshan.backend.fu.util.HasCSRConst 28import utils._ 29import utility._ 30import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle} 31 32trait PMPConst extends HasPMParameters { 33 val PMPOffBits = 2 // minimal 4bytes 34 val CoarserGrain: Boolean = PlatformGrain > PMPOffBits 35} 36 37abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst 38abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst 39abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst 40 41@chiselName 42class PMPConfig(implicit p: Parameters) extends PMPBundle { 43 val l = Bool() 44 val c = Bool() // res(1), unuse in pmp 45 val atomic = Bool() // res(0), unuse in pmp 46 val a = UInt(2.W) 47 val x = Bool() 48 val w = Bool() 49 val r = Bool() 50 51 def res: UInt = Cat(c, atomic) // in pmp, unused 52 def off = a === 0.U 53 def tor = a === 1.U 54 def na4 = { if (CoarserGrain) false.B else a === 2.U } 55 def napot = { if (CoarserGrain) a(1).asBool else a === 3.U } 56 def off_tor = !a(1) 57 def na4_napot = a(1) 58 59 def locked = l 60 def addr_locked: Bool = locked 61 def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor) 62} 63 64object PMPConfigUInt { 65 def apply( 66 l: Boolean = false, 67 c: Boolean = false, 68 atomic: Boolean = false, 69 a: Int = 0, 70 x: Boolean = false, 71 w: Boolean = false, 72 r: Boolean = false)(implicit p: Parameters): UInt = { 73 var config = 0 74 if (l) { config += (1 << 7) } 75 if (c) { config += (1 << 6) } 76 if (atomic) { config += (1 << 5) } 77 if (a > 0) { config += (a << 3) } 78 if (x) { config += (1 << 2) } 79 if (w) { config += (1 << 1) } 80 if (r) { config += (1 << 0) } 81 config.U(8.W) 82 } 83} 84trait PMPReadWriteMethodBare extends PMPConst { 85 def match_mask(cfg: PMPConfig, paddr: UInt) = { 86 val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W) 87 Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W)) 88 } 89 90 def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = { 91 val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig)) 92 for (i <- cfgVec.indices) { 93 val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig) 94 cfgVec(i) := cfg_w_m_tmp 95 when (!cfg_w_m_tmp.l) { 96 cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r 97 if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) } 98 when (cfgVec(i).na4_napot) { 99 mask(index + i) := match_mask(cfgVec(i), addr(index + i)) 100 } 101 } 102 } 103 cfgVec.asUInt 104 } 105 106 def read_addr(cfg: PMPConfig)(addr: UInt): UInt = { 107 val G = PlatformGrain - PMPOffBits 108 require(G >= 0) 109 if (G == 0) { 110 addr 111 } else if (G >= 2) { 112 Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G)) 113 } else { // G is 1 114 Mux(cfg.off_tor, clear_low_bits(addr, G), addr) 115 } 116 } 117 118 def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = { 119 val locked = cfg.addr_locked(next) 120 mask := Mux(!locked, match_mask(cfg, paddr), mask) 121 Mux(!locked, paddr, addr) 122 } 123 124 def set_low_bits(data: UInt, num: Int): UInt = { 125 require(num >= 0) 126 data | ((1 << num)-1).U 127 } 128 129 /** mask the data's low num bits (lsb) */ 130 def clear_low_bits(data: UInt, num: Int): UInt = { 131 require(num >= 0) 132 // use Cat instead of & with mask to avoid "Signal Width" problem 133 if (num == 0) { data } 134 else { Cat(data(data.getWidth-1, num), 0.U(num.W)) } 135 } 136} 137 138trait PMPReadWriteMethod extends PMPReadWriteMethodBare { this: PMPBase => 139 def write_cfg_vec(cfgs: UInt): UInt = { 140 val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig)) 141 for (i <- cfgVec.indices) { 142 val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig) 143 cfgVec(i) := cfg_w_tmp 144 when (!cfg_w_tmp.l) { 145 cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r 146 if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) } 147 } 148 } 149 cfgVec.asUInt 150 } 151 152 /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable. 153 * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros. 154 * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros. 155 * The low OffBits is dropped 156 */ 157 def read_addr(): UInt = { 158 read_addr(cfg)(addr) 159 } 160 161 /** addr for inside addr, drop OffBits with. 162 * compare_addr for inside addr for comparing. 163 * paddr for outside addr. 164 */ 165 def write_addr(next: PMPConfig)(paddr: UInt): UInt = { 166 Mux(!cfg.addr_locked(next), paddr, addr) 167 } 168 def write_addr(paddr: UInt): UInt = { 169 Mux(!cfg.addr_locked, paddr, addr) 170 } 171} 172 173/** PMPBase for CSR unit 174 * with only read and write logic 175 */ 176@chiselName 177class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod { 178 val cfg = new PMPConfig 179 val addr = UInt((PMPAddrBits - PMPOffBits).W) 180 181 def gen(cfg: PMPConfig, addr: UInt) = { 182 require(addr.getWidth == this.addr.getWidth) 183 this.cfg := cfg 184 this.addr := addr 185 } 186} 187 188trait PMPMatchMethod extends PMPConst { this: PMPEntry => 189 /** compare_addr is used to compare with input addr */ 190 def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt 191 192 /** size and maxSize are all log2 Size 193 * for dtlb, the maxSize is bPMXLEN which is 8 194 * for itlb and ptw, the maxSize is log2(512) ? 195 * but we may only need the 64 bytes? how to prevent the bugs? 196 * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN 197 */ 198 def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = { 199 Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize), 200 Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B)) 201 } 202 203 /** generate match mask to help match in napot mode */ 204 def match_mask(paddr: UInt): UInt = { 205 match_mask(cfg, paddr) 206 } 207 208 def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = { 209 if (lgMaxSize <= PlatformGrain) { 210 (paddr < compare_addr) 211 } else { 212 val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize) 213 val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize) 214 val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize)) < compare_addr(lgMaxSize-1, 0) 215 highLess || (highEqual && lowLess) 216 } 217 } 218 219 def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = { 220 !boundMatch(paddr, lgSize, lgMaxSize) 221 } 222 223 def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = { 224 boundMatch(paddr, 0.U, lgMaxSize) 225 } 226 227 def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = { 228 last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize) 229 } 230 231 def unmaskEqual(a: UInt, b: UInt, m: UInt) = { 232 (a & ~m) === (b & ~m) 233 } 234 235 def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = { 236 if (lgMaxSize <= PlatformGrain) { 237 unmaskEqual(paddr, compare_addr, mask) 238 } else { 239 val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize) 240 val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize) 241 val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0)) 242 highMatch && lowMatch 243 } 244 } 245 246 def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = { 247 if (lgMaxSize <= PlatformGrain) { 248 true.B 249 } else { 250 val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize) 251 val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) && 252 ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U) 253 val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) && 254 ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U) 255 val torAligned = !(lowerBound || upperBound) 256 val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U 257 Mux(cfg.na4_napot, napotAligned, torAligned) 258 } 259 } 260} 261 262/** PMPEntry for outside pmp copies 263 * with one more elements mask to help napot match 264 * TODO: make mask an element, not an method, for timing opt 265 */ 266@chiselName 267class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod { 268 val mask = UInt(PMPAddrBits.W) // help to match in napot 269 270 def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = { 271 mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask) 272 Mux(!cfg.addr_locked(next), paddr, addr) 273 } 274 275 def write_addr(mask: UInt)(paddr: UInt) = { 276 mask := Mux(!cfg.addr_locked, match_mask(paddr), mask) 277 Mux(!cfg.addr_locked, paddr, addr) 278 } 279 280 def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = { 281 require(addr.getWidth == this.addr.getWidth) 282 this.cfg := cfg 283 this.addr := addr 284 this.mask := mask 285 } 286} 287 288trait PMPMethod extends PMPConst { 289 def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= { 290 val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W)))) 291 val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W))) 292 val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W))) 293 addr := DontCare 294 mask := DontCare 295 (cfg, addr, mask) 296 } 297 298 def pmp_gen_mapping 299 ( 300 init: () => (Vec[UInt], Vec[UInt], Vec[UInt]), 301 num: Int = 16, 302 cfgBase: Int, 303 addrBase: Int, 304 entries: Vec[PMPEntry] 305 ) = { 306 val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth 307 def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR) 308 val init_value = init() 309 /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */ 310 val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W)))) 311 val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig())) 312 val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W))) 313 val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W))) 314 315 for (i <- entries.indices) { 316 entries(i).gen(cfgs(i), addr(i), mask(i)) 317 } 318 319 val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map( 320 MaskedRegMap( 321 addr = cfgBase + pmpCfgIndex(i), 322 reg = cfgMerged(i/pmpCfgPerCSR), 323 wmask = WritableMask, 324 wfn = new PMPBase().write_cfg_vec(mask, addr, i) 325 )) 326 }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes 327 328 val addr_mapping = (0 until num).map(i => {Map( 329 MaskedRegMap( 330 addr = addrBase + i, 331 reg = addr(i), 332 wmask = WritableMask, 333 wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) }, 334 rmask = WritableMask, 335 rfn = new PMPBase().read_addr(entries(i).cfg) 336 )) 337 }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes. 338 339 cfg_mapping ++ addr_mapping 340 } 341} 342 343@chiselName 344class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst { 345 val io = IO(new Bundle { 346 val distribute_csr = Flipped(new DistributedCSRIO()) 347 val pmp = Output(Vec(NumPMP, new PMPEntry())) 348 val pma = Output(Vec(NumPMA, new PMPEntry())) 349 }) 350 351 val w = io.distribute_csr.w 352 353 val pmp = Wire(Vec(NumPMP, new PMPEntry())) 354 val pma = Wire(Vec(NumPMA, new PMPEntry())) 355 356 val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp) 357 val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma) 358 val mapping = pmpMapping ++ pmaMapping 359 360 val rdata = Wire(UInt(PMXLEN.W)) 361 MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data) 362 363 io.pmp := pmp 364 io.pma := pma 365} 366 367class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle { 368 val addr = Output(UInt(PMPAddrBits.W)) 369 val size = Output(UInt(log2Ceil(lgMaxSize+1).W)) 370 val cmd = Output(TlbCmd()) 371 372 def apply(addr: UInt, size: UInt, cmd: UInt) { 373 this.addr := addr 374 this.size := size 375 this.cmd := cmd 376 } 377 378 def apply(addr: UInt) { // req minimal permission and req align size 379 apply(addr, lgMaxSize.U, TlbCmd.read) 380 } 381 382} 383 384class PMPRespBundle(implicit p: Parameters) extends PMPBundle { 385 val ld = Output(Bool()) 386 val st = Output(Bool()) 387 val instr = Output(Bool()) 388 val mmio = Output(Bool()) 389 val atomic = Output(Bool()) 390 391 def |(resp: PMPRespBundle): PMPRespBundle = { 392 val res = Wire(new PMPRespBundle()) 393 res.ld := this.ld || resp.ld 394 res.st := this.st || resp.st 395 res.instr := this.instr || resp.instr 396 res.mmio := this.mmio || resp.mmio 397 res.atomic := this.atomic || resp.atomic 398 res 399 } 400} 401 402trait PMPCheckMethod extends PMPConst { 403 def pmp_check(cmd: UInt, cfg: PMPConfig) = { 404 val resp = Wire(new PMPRespBundle) 405 resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r 406 resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) && !cfg.w 407 resp.instr := TlbCmd.isExec(cmd) && !cfg.x 408 resp.mmio := false.B 409 resp.atomic := false.B 410 resp 411 } 412 413 def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)( 414 addr: UInt, 415 size: UInt, 416 pmpEntries: Vec[PMPEntry], 417 mode: UInt, 418 lgMaxSize: Int 419 ) = { 420 val num = pmpEntries.size 421 require(num == NumPMP) 422 423 val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U) 424 val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry())) 425 pmpDefault.cfg.r := passThrough 426 pmpDefault.cfg.w := passThrough 427 pmpDefault.cfg.x := passThrough 428 429 val match_vec = Wire(Vec(num+1, Bool())) 430 val cfg_vec = Wire(Vec(num+1, new PMPEntry())) 431 432 pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) => 433 val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp) 434 val ignore = passThrough && !pmp.cfg.l 435 val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp) 436 437 val cur = WireInit(pmp) 438 cur.cfg.r := aligned && (pmp.cfg.r || ignore) 439 cur.cfg.w := aligned && (pmp.cfg.w || ignore) 440 cur.cfg.x := aligned && (pmp.cfg.x || ignore) 441 442// Mux(is_match, cur, prev) 443 match_vec(i) := is_match 444 cfg_vec(i) := cur 445 } 446 447 // default value 448 match_vec(num) := true.B 449 cfg_vec(num) := pmpDefault 450 451 if (leaveHitMux) { 452 ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid)) 453 } else { 454 ParallelPriorityMux(match_vec, cfg_vec) 455 } 456 } 457} 458 459class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle { 460 val mode = UInt(2.W) 461 val pmp = Vec(NumPMP, new PMPEntry()) 462 val pma = Vec(NumPMA, new PMPEntry()) 463 464 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = { 465 this.mode := mode 466 this.pmp := pmp 467 this.pma := pma 468 } 469} 470 471class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle { 472 val check_env = Input(new PMPCheckerEnv()) 473 val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal 474 val resp = new PMPRespBundle() 475 476 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = { 477 check_env.apply(mode, pmp, pma) 478 this.req := req 479 resp 480 } 481 482 def req_apply(valid: Bool, addr: UInt): Unit = { 483 this.req.valid := valid 484 this.req.bits.apply(addr) 485 } 486 487 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = { 488 check_env.apply(mode, pmp, pma) 489 req_apply(valid, addr) 490 resp 491 } 492} 493 494class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle { 495 val check_env = Input(new PMPCheckerEnv()) 496 val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal 497 val resp = Output(new PMPConfig()) 498 499 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = { 500 check_env.apply(mode, pmp, pma) 501 this.req := req 502 resp 503 } 504 505 def req_apply(valid: Bool, addr: UInt): Unit = { 506 this.req.valid := valid 507 this.req.bits.apply(addr) 508 } 509 510 def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = { 511 check_env.apply(mode, pmp, pma) 512 req_apply(valid, addr) 513 resp 514 } 515} 516 517@chiselName 518class PMPChecker 519( 520 lgMaxSize: Int = 3, 521 sameCycle: Boolean = false, 522 leaveHitMux: Boolean = false, 523 pmpUsed: Boolean = true 524)(implicit p: Parameters) extends PMPModule 525 with PMPCheckMethod 526 with PMACheckMethod 527{ 528 require(!(leaveHitMux && sameCycle)) 529 val io = IO(new PMPCheckIO(lgMaxSize)) 530 531 val req = io.req.bits 532 533 val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize) 534 val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize) 535 536 val resp_pmp = pmp_check(req.cmd, res_pmp.cfg) 537 val resp_pma = pma_check(req.cmd, res_pma.cfg) 538 val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma 539 540 if (sameCycle || leaveHitMux) { 541 io.resp := resp 542 } else { 543 io.resp := RegEnable(resp, io.req.valid) 544 } 545} 546 547/* get config with check */ 548@chiselName 549class PMPCheckerv2 550( 551 lgMaxSize: Int = 3, 552 sameCycle: Boolean = false, 553 leaveHitMux: Boolean = false 554)(implicit p: Parameters) extends PMPModule 555 with PMPCheckMethod 556 with PMACheckMethod 557{ 558 require(!(leaveHitMux && sameCycle)) 559 val io = IO(new PMPCheckv2IO(lgMaxSize)) 560 561 val req = io.req.bits 562 563 val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize) 564 val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize) 565 566 val resp = and(res_pmp, res_pma) 567 568 if (sameCycle || leaveHitMux) { 569 io.resp := resp 570 } else { 571 io.resp := RegEnable(resp, io.req.valid) 572 } 573 574 def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = { 575 val tmp_res = Wire(new PMPConfig) 576 tmp_res.l := DontCare 577 tmp_res.a := DontCare 578 tmp_res.r := pmp.cfg.r && pma.cfg.r 579 tmp_res.w := pmp.cfg.w && pma.cfg.w 580 tmp_res.x := pmp.cfg.x && pma.cfg.x 581 tmp_res.c := pma.cfg.c 582 tmp_res.atomic := pma.cfg.atomic 583 tmp_res 584 } 585} 586