xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMP.scala (revision 935edac446654a1880ac0112b2380315b5368504)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17// See LICENSE.SiFive for license details.
18
19package xiangshan.backend.fu
20
21import chipsalliance.rocketchip.config.Parameters
22import chisel3._
23import chisel3.util._
24import utility.MaskedRegMap.WritableMask
25import xiangshan._
26import xiangshan.backend.fu.util.HasCSRConst
27import utils._
28import utility._
29import xiangshan.cache.mmu.{TlbCmd, TlbExceptionBundle}
30
31trait PMPConst extends HasPMParameters {
32  val PMPOffBits = 2 // minimal 4bytes
33  val CoarserGrain: Boolean = PlatformGrain > PMPOffBits
34}
35
36abstract class PMPBundle(implicit val p: Parameters) extends Bundle with PMPConst
37abstract class PMPModule(implicit val p: Parameters) extends Module with PMPConst
38abstract class PMPXSModule(implicit p: Parameters) extends XSModule with PMPConst
39
40class PMPConfig(implicit p: Parameters) extends PMPBundle {
41  val l = Bool()
42  val c = Bool() // res(1), unuse in pmp
43  val atomic = Bool() // res(0), unuse in pmp
44  val a = UInt(2.W)
45  val x = Bool()
46  val w = Bool()
47  val r = Bool()
48
49  def res: UInt = Cat(c, atomic) // in pmp, unused
50  def off = a === 0.U
51  def tor = a === 1.U
52  def na4 = { if (CoarserGrain) false.B else a === 2.U }
53  def napot = { if (CoarserGrain) a(1).asBool else a === 3.U }
54  def off_tor = !a(1)
55  def na4_napot = a(1)
56
57  def locked = l
58  def addr_locked: Bool = locked
59  def addr_locked(next: PMPConfig): Bool = locked || (next.locked && next.tor)
60}
61
62object PMPConfigUInt {
63  def apply(
64    l: Boolean = false,
65    c: Boolean = false,
66    atomic: Boolean = false,
67    a: Int = 0,
68    x: Boolean = false,
69    w: Boolean = false,
70    r: Boolean = false)(implicit p: Parameters): UInt = {
71    var config = 0
72    if (l) { config += (1 << 7) }
73    if (c) { config += (1 << 6) }
74    if (atomic) { config += (1 << 5) }
75    if (a > 0) { config += (a << 3) }
76    if (x) { config += (1 << 2) }
77    if (w) { config += (1 << 1) }
78    if (r) { config += (1 << 0) }
79    config.U(8.W)
80  }
81}
82trait PMPReadWriteMethodBare extends PMPConst {
83  def match_mask(cfg: PMPConfig, paddr: UInt) = {
84    val match_mask_c_addr = Cat(paddr, cfg.a(0)) | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
85    Cat(match_mask_c_addr & ~(match_mask_c_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
86  }
87
88  def write_cfg_vec(mask: Vec[UInt], addr: Vec[UInt], index: Int)(cfgs: UInt): UInt = {
89    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
90    for (i <- cfgVec.indices) {
91      val cfg_w_m_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
92      cfgVec(i) := cfg_w_m_tmp
93      when (!cfg_w_m_tmp.l) {
94        cfgVec(i).w := cfg_w_m_tmp.w && cfg_w_m_tmp.r
95        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_m_tmp.a(1), cfg_w_m_tmp.a.orR) }
96        when (cfgVec(i).na4_napot) {
97          mask(index + i) := match_mask(cfgVec(i), addr(index + i))
98        }
99      }
100    }
101    cfgVec.asUInt
102  }
103
104  def read_addr(cfg: PMPConfig)(addr: UInt): UInt = {
105    val G = PlatformGrain - PMPOffBits
106    require(G >= 0)
107    if (G == 0) {
108      addr
109    } else if (G >= 2) {
110      Mux(cfg.na4_napot, set_low_bits(addr, G-1), clear_low_bits(addr, G))
111    } else { // G is 1
112      Mux(cfg.off_tor, clear_low_bits(addr, G), addr)
113    }
114  }
115
116  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt, cfg: PMPConfig, addr: UInt): UInt = {
117    val locked = cfg.addr_locked(next)
118    mask := Mux(!locked, match_mask(cfg, paddr), mask)
119    Mux(!locked, paddr, addr)
120  }
121
122  def set_low_bits(data: UInt, num: Int): UInt = {
123    require(num >= 0)
124    data | ((1 << num)-1).U
125  }
126
127  /** mask the data's low num bits (lsb) */
128  def clear_low_bits(data: UInt, num: Int): UInt = {
129    require(num >= 0)
130    // use Cat instead of & with mask to avoid "Signal Width" problem
131    if (num == 0) { data }
132    else { Cat(data(data.getWidth-1, num), 0.U(num.W)) }
133  }
134}
135
136trait PMPReadWriteMethod extends PMPReadWriteMethodBare  { this: PMPBase =>
137  def write_cfg_vec(cfgs: UInt): UInt = {
138    val cfgVec = Wire(Vec(cfgs.getWidth/8, new PMPConfig))
139    for (i <- cfgVec.indices) {
140      val cfg_w_tmp = cfgs((i+1)*8-1, i*8).asUInt.asTypeOf(new PMPConfig)
141      cfgVec(i) := cfg_w_tmp
142      when (!cfg_w_tmp.l) {
143        cfgVec(i).w := cfg_w_tmp.w && cfg_w_tmp.r
144        if (CoarserGrain) { cfgVec(i).a := Cat(cfg_w_tmp.a(1), cfg_w_tmp.a.orR) }
145      }
146    }
147    cfgVec.asUInt
148  }
149
150  /** In general, the PMP grain is 2**{G+2} bytes. when G >= 1, na4 is not selectable.
151   * When G >= 2 and cfg.a(1) is set(then the mode is napot), the bits addr(G-2, 0) read as zeros.
152   * When G >= 1 and cfg.a(1) is clear(the mode is off or tor), the addr(G-1, 0) read as zeros.
153   * The low OffBits is dropped
154   */
155  def read_addr(): UInt = {
156    read_addr(cfg)(addr)
157  }
158
159  /** addr for inside addr, drop OffBits with.
160   * compare_addr for inside addr for comparing.
161   * paddr for outside addr.
162   */
163  def write_addr(next: PMPConfig)(paddr: UInt): UInt = {
164    Mux(!cfg.addr_locked(next), paddr, addr)
165  }
166  def write_addr(paddr: UInt): UInt = {
167    Mux(!cfg.addr_locked, paddr, addr)
168  }
169}
170
171/** PMPBase for CSR unit
172  * with only read and write logic
173  */
174class PMPBase(implicit p: Parameters) extends PMPBundle with PMPReadWriteMethod {
175  val cfg = new PMPConfig
176  val addr = UInt((PMPAddrBits - PMPOffBits).W)
177
178  def gen(cfg: PMPConfig, addr: UInt) = {
179    require(addr.getWidth == this.addr.getWidth)
180    this.cfg := cfg
181    this.addr := addr
182  }
183}
184
185trait PMPMatchMethod extends PMPConst { this: PMPEntry =>
186  /** compare_addr is used to compare with input addr */
187  def compare_addr: UInt = ((addr << PMPOffBits) & ~(((1 << PlatformGrain) - 1).U(PMPAddrBits.W))).asUInt
188
189  /** size and maxSize are all log2 Size
190   * for dtlb, the maxSize is bPMXLEN which is 8
191   * for itlb and ptw, the maxSize is log2(512) ?
192   * but we may only need the 64 bytes? how to prevent the bugs?
193   * TODO: handle the special case that itlb & ptw & dcache access wider size than PMXLEN
194   */
195  def is_match(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
196    Mux(cfg.na4_napot, napotMatch(paddr, lgSize, lgMaxSize),
197      Mux(cfg.tor, torMatch(paddr, lgSize, lgMaxSize, last_pmp), false.B))
198  }
199
200  /** generate match mask to help match in napot mode */
201  def match_mask(paddr: UInt): UInt = {
202    match_mask(cfg, paddr)
203  }
204
205  def boundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
206    if (lgMaxSize <= PlatformGrain) {
207      (paddr < compare_addr)
208    } else {
209      val highLess = (paddr >> lgMaxSize) < (compare_addr >> lgMaxSize)
210      val highEqual = (paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)
211      val lowLess = (paddr(lgMaxSize-1, 0) | OneHot.UIntToOH1(lgSize, lgMaxSize))  < compare_addr(lgMaxSize-1, 0)
212      highLess || (highEqual && lowLess)
213    }
214  }
215
216  def lowerBoundMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int): Bool = {
217    !boundMatch(paddr, lgSize, lgMaxSize)
218  }
219
220  def higherBoundMatch(paddr: UInt, lgMaxSize: Int) = {
221    boundMatch(paddr, 0.U, lgMaxSize)
222  }
223
224  def torMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last_pmp: PMPEntry): Bool = {
225    last_pmp.lowerBoundMatch(paddr, lgSize, lgMaxSize) && higherBoundMatch(paddr, lgMaxSize)
226  }
227
228  def unmaskEqual(a: UInt, b: UInt, m: UInt) = {
229    (a & ~m) === (b & ~m)
230  }
231
232  def napotMatch(paddr: UInt, lgSize: UInt, lgMaxSize: Int) = {
233    if (lgMaxSize <= PlatformGrain) {
234      unmaskEqual(paddr, compare_addr, mask)
235    } else {
236      val lowMask = mask | OneHot.UIntToOH1(lgSize, lgMaxSize)
237      val highMatch = unmaskEqual(paddr >> lgMaxSize, compare_addr >> lgMaxSize, mask >> lgMaxSize)
238      val lowMatch = unmaskEqual(paddr(lgMaxSize-1, 0), compare_addr(lgMaxSize-1, 0), lowMask(lgMaxSize-1, 0))
239      highMatch && lowMatch
240    }
241  }
242
243  def aligned(paddr: UInt, lgSize: UInt, lgMaxSize: Int, last: PMPEntry) = {
244    if (lgMaxSize <= PlatformGrain) {
245      true.B
246    } else {
247      val lowBitsMask = OneHot.UIntToOH1(lgSize, lgMaxSize)
248      val lowerBound = ((paddr >> lgMaxSize) === (last.compare_addr >> lgMaxSize)) &&
249        ((~paddr(lgMaxSize-1, 0) & last.compare_addr(lgMaxSize-1, 0)) =/= 0.U)
250      val upperBound = ((paddr >> lgMaxSize) === (compare_addr >> lgMaxSize)) &&
251        ((compare_addr(lgMaxSize-1, 0) & (paddr(lgMaxSize-1, 0) | lowBitsMask)) =/= 0.U)
252      val torAligned = !(lowerBound || upperBound)
253      val napotAligned = (lowBitsMask & ~mask(lgMaxSize-1, 0)) === 0.U
254      Mux(cfg.na4_napot, napotAligned, torAligned)
255    }
256  }
257}
258
259/** PMPEntry for outside pmp copies
260  * with one more elements mask to help napot match
261  * TODO: make mask an element, not an method, for timing opt
262  */
263class PMPEntry(implicit p: Parameters) extends PMPBase with PMPMatchMethod {
264  val mask = UInt(PMPAddrBits.W) // help to match in napot
265
266  def write_addr(next: PMPConfig, mask: UInt)(paddr: UInt) = {
267    mask := Mux(!cfg.addr_locked(next), match_mask(paddr), mask)
268    Mux(!cfg.addr_locked(next), paddr, addr)
269  }
270
271  def write_addr(mask: UInt)(paddr: UInt) = {
272    mask := Mux(!cfg.addr_locked, match_mask(paddr), mask)
273    Mux(!cfg.addr_locked, paddr, addr)
274  }
275
276  def gen(cfg: PMPConfig, addr: UInt, mask: UInt) = {
277    require(addr.getWidth == this.addr.getWidth)
278    this.cfg := cfg
279    this.addr := addr
280    this.mask := mask
281  }
282}
283
284trait PMPMethod extends PMPConst {
285  def pmp_init() : (Vec[UInt], Vec[UInt], Vec[UInt])= {
286    val cfg = WireInit(0.U.asTypeOf(Vec(NumPMP/8, UInt(PMXLEN.W))))
287    val addr = Wire(Vec(NumPMP, UInt((PMPAddrBits-PMPOffBits).W)))
288    val mask = Wire(Vec(NumPMP, UInt(PMPAddrBits.W)))
289    addr := DontCare
290    mask := DontCare
291    (cfg, addr, mask)
292  }
293
294  def pmp_gen_mapping
295  (
296    init: () => (Vec[UInt], Vec[UInt], Vec[UInt]),
297    num: Int = 16,
298    cfgBase: Int,
299    addrBase: Int,
300    entries: Vec[PMPEntry]
301  ) = {
302    val pmpCfgPerCSR = PMXLEN / new PMPConfig().getWidth
303    def pmpCfgIndex(i: Int) = (PMXLEN / 32) * (i / pmpCfgPerCSR)
304    val init_value = init()
305    /** to fit MaskedRegMap's write, declare cfgs as Merged CSRs and split them into each pmp */
306    val cfgMerged = RegInit(init_value._1) //(Vec(num / pmpCfgPerCSR, UInt(PMXLEN.W))) // RegInit(VecInit(Seq.fill(num / pmpCfgPerCSR)(0.U(PMXLEN.W))))
307    val cfgs = WireInit(cfgMerged).asTypeOf(Vec(num, new PMPConfig()))
308    val addr = RegInit(init_value._2) // (Vec(num, UInt((PMPAddrBits-PMPOffBits).W)))
309    val mask = RegInit(init_value._3) // (Vec(num, UInt(PMPAddrBits.W)))
310
311    for (i <- entries.indices) {
312      entries(i).gen(cfgs(i), addr(i), mask(i))
313    }
314
315    val cfg_mapping = (0 until num by pmpCfgPerCSR).map(i => {Map(
316      MaskedRegMap(
317        addr = cfgBase + pmpCfgIndex(i),
318        reg = cfgMerged(i/pmpCfgPerCSR),
319        wmask = WritableMask,
320        wfn = new PMPBase().write_cfg_vec(mask, addr, i)
321      ))
322    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes
323
324    val addr_mapping = (0 until num).map(i => {Map(
325      MaskedRegMap(
326        addr = addrBase + i,
327        reg = addr(i),
328        wmask = WritableMask,
329        wfn = { if (i != num-1) entries(i).write_addr(entries(i+1).cfg, mask(i)) else entries(i).write_addr(mask(i)) },
330        rmask = WritableMask,
331        rfn = new PMPBase().read_addr(entries(i).cfg)
332      ))
333    }).fold(Map())((a, b) => a ++ b) // ugly code, hit me if u have better codes.
334
335    cfg_mapping ++ addr_mapping
336  }
337}
338
339class PMP(implicit p: Parameters) extends PMPXSModule with HasXSParameter with PMPMethod with PMAMethod with HasCSRConst {
340  val io = IO(new Bundle {
341    val distribute_csr = Flipped(new DistributedCSRIO())
342    val pmp = Output(Vec(NumPMP, new PMPEntry()))
343    val pma = Output(Vec(NumPMA, new PMPEntry()))
344  })
345
346  val w = io.distribute_csr.w
347
348  val pmp = Wire(Vec(NumPMP, new PMPEntry()))
349  val pma = Wire(Vec(NumPMA, new PMPEntry()))
350
351  val pmpMapping = pmp_gen_mapping(pmp_init, NumPMP, PmpcfgBase, PmpaddrBase, pmp)
352  val pmaMapping = pmp_gen_mapping(pma_init, NumPMA, PmacfgBase, PmaaddrBase, pma)
353  val mapping = pmpMapping ++ pmaMapping
354
355  val rdata = Wire(UInt(PMXLEN.W))
356  MaskedRegMap.generate(mapping, w.bits.addr, rdata, w.valid, w.bits.data)
357
358  io.pmp := pmp
359  io.pma := pma
360}
361
362class PMPReqBundle(lgMaxSize: Int = 3)(implicit p: Parameters) extends PMPBundle {
363  val addr = Output(UInt(PMPAddrBits.W))
364  val size = Output(UInt(log2Ceil(lgMaxSize+1).W))
365  val cmd = Output(TlbCmd())
366
367  def apply(addr: UInt, size: UInt, cmd: UInt) {
368    this.addr := addr
369    this.size := size
370    this.cmd := cmd
371  }
372
373  def apply(addr: UInt) { // req minimal permission and req align size
374    apply(addr, lgMaxSize.U, TlbCmd.read)
375  }
376
377}
378
379class PMPRespBundle(implicit p: Parameters) extends PMPBundle {
380  val ld = Output(Bool())
381  val st = Output(Bool())
382  val instr = Output(Bool())
383  val mmio = Output(Bool())
384  val atomic = Output(Bool())
385
386  def |(resp: PMPRespBundle): PMPRespBundle = {
387    val res = Wire(new PMPRespBundle())
388    res.ld := this.ld || resp.ld
389    res.st := this.st || resp.st
390    res.instr := this.instr || resp.instr
391    res.mmio := this.mmio || resp.mmio
392    res.atomic := this.atomic || resp.atomic
393    res
394  }
395}
396
397trait PMPCheckMethod extends PMPConst {
398  def pmp_check(cmd: UInt, cfg: PMPConfig) = {
399    val resp = Wire(new PMPRespBundle)
400    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAmo(cmd) && !cfg.r
401    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAmo(cmd)) && !cfg.w
402    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
403    resp.mmio := false.B
404    resp.atomic := false.B
405    resp
406  }
407
408  def pmp_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
409    addr: UInt,
410    size: UInt,
411    pmpEntries: Vec[PMPEntry],
412    mode: UInt,
413    lgMaxSize: Int
414  ) = {
415    val num = pmpEntries.size
416    require(num == NumPMP)
417
418    val passThrough = if (pmpEntries.isEmpty) true.B else (mode > 1.U)
419    val pmpDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
420    pmpDefault.cfg.r := passThrough
421    pmpDefault.cfg.w := passThrough
422    pmpDefault.cfg.x := passThrough
423
424    val match_vec = Wire(Vec(num+1, Bool()))
425    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
426
427    pmpEntries.zip(pmpDefault +: pmpEntries.take(num-1)).zipWithIndex.foreach{ case ((pmp, last_pmp), i) =>
428      val is_match = pmp.is_match(addr, size, lgMaxSize, last_pmp)
429      val ignore = passThrough && !pmp.cfg.l
430      val aligned = pmp.aligned(addr, size, lgMaxSize, last_pmp)
431
432      val cur = WireInit(pmp)
433      cur.cfg.r := aligned && (pmp.cfg.r || ignore)
434      cur.cfg.w := aligned && (pmp.cfg.w || ignore)
435      cur.cfg.x := aligned && (pmp.cfg.x || ignore)
436
437//      Mux(is_match, cur, prev)
438      match_vec(i) := is_match
439      cfg_vec(i) := cur
440    }
441
442    // default value
443    match_vec(num) := true.B
444    cfg_vec(num) := pmpDefault
445
446    if (leaveHitMux) {
447      ParallelPriorityMux(match_vec.map(RegEnable(_, false.B, valid)), RegEnable(cfg_vec, valid))
448    } else {
449      ParallelPriorityMux(match_vec, cfg_vec)
450    }
451  }
452}
453
454class PMPCheckerEnv(implicit p: Parameters) extends PMPBundle {
455  val mode = UInt(2.W)
456  val pmp = Vec(NumPMP, new PMPEntry())
457  val pma = Vec(NumPMA, new PMPEntry())
458
459  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry]): Unit = {
460    this.mode := mode
461    this.pmp := pmp
462    this.pma := pma
463  }
464}
465
466class PMPCheckIO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
467  val check_env = Input(new PMPCheckerEnv())
468  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
469  val resp = new PMPRespBundle()
470
471  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
472    check_env.apply(mode, pmp, pma)
473    this.req := req
474    resp
475  }
476
477  def req_apply(valid: Bool, addr: UInt): Unit = {
478    this.req.valid := valid
479    this.req.bits.apply(addr)
480  }
481
482  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
483    check_env.apply(mode, pmp, pma)
484    req_apply(valid, addr)
485    resp
486  }
487}
488
489class PMPCheckv2IO(lgMaxSize: Int)(implicit p: Parameters) extends PMPBundle {
490  val check_env = Input(new PMPCheckerEnv())
491  val req = Flipped(Valid(new PMPReqBundle(lgMaxSize))) // usage: assign the valid to fire signal
492  val resp = Output(new PMPConfig())
493
494  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], req: Valid[PMPReqBundle]) = {
495    check_env.apply(mode, pmp, pma)
496    this.req := req
497    resp
498  }
499
500  def req_apply(valid: Bool, addr: UInt): Unit = {
501    this.req.valid := valid
502    this.req.bits.apply(addr)
503  }
504
505  def apply(mode: UInt, pmp: Vec[PMPEntry], pma: Vec[PMPEntry], valid: Bool, addr: UInt) = {
506    check_env.apply(mode, pmp, pma)
507    req_apply(valid, addr)
508    resp
509  }
510}
511
512class PMPChecker
513(
514  lgMaxSize: Int = 3,
515  sameCycle: Boolean = false,
516  leaveHitMux: Boolean = false,
517  pmpUsed: Boolean = true
518)(implicit p: Parameters) extends PMPModule
519  with PMPCheckMethod
520  with PMACheckMethod
521{
522  require(!(leaveHitMux && sameCycle))
523  val io = IO(new PMPCheckIO(lgMaxSize))
524
525  val req = io.req.bits
526
527  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
528  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
529
530  val resp_pmp = pmp_check(req.cmd, res_pmp.cfg)
531  val resp_pma = pma_check(req.cmd, res_pma.cfg)
532  val resp = if (pmpUsed) (resp_pmp | resp_pma) else resp_pma
533
534  if (sameCycle || leaveHitMux) {
535    io.resp := resp
536  } else {
537    io.resp := RegEnable(resp, io.req.valid)
538  }
539}
540
541/* get config with check */
542class PMPCheckerv2
543(
544  lgMaxSize: Int = 3,
545  sameCycle: Boolean = false,
546  leaveHitMux: Boolean = false
547)(implicit p: Parameters) extends PMPModule
548  with PMPCheckMethod
549  with PMACheckMethod
550{
551  require(!(leaveHitMux && sameCycle))
552  val io = IO(new PMPCheckv2IO(lgMaxSize))
553
554  val req = io.req.bits
555
556  val res_pmp = pmp_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pmp, io.check_env.mode, lgMaxSize)
557  val res_pma = pma_match_res(leaveHitMux, io.req.valid)(req.addr, req.size, io.check_env.pma, io.check_env.mode, lgMaxSize)
558
559  val resp = and(res_pmp, res_pma)
560
561  if (sameCycle || leaveHitMux) {
562    io.resp := resp
563  } else {
564    io.resp := RegEnable(resp, io.req.valid)
565  }
566
567  def and(pmp: PMPEntry, pma: PMPEntry): PMPConfig = {
568    val tmp_res = Wire(new PMPConfig)
569    tmp_res.l := DontCare
570    tmp_res.a := DontCare
571    tmp_res.r := pmp.cfg.r && pma.cfg.r
572    tmp_res.w := pmp.cfg.w && pma.cfg.w
573    tmp_res.x := pmp.cfg.x && pma.cfg.x
574    tmp_res.c := pma.cfg.c
575    tmp_res.atomic := pma.cfg.atomic
576    tmp_res
577  }
578}
579