xref: /XiangShan/src/main/scala/xiangshan/backend/fu/PMA.scala (revision 066ac8a465b27b54ba22458ff1a67bcd28215d73)
1/***************************************************************************************
2 * Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3 * Copyright (c) 2020-2021 Peng Cheng Laboratory
4 *
5 * XiangShan is licensed under Mulan PSL v2.
6 * You can use this software according to the terms and conditions of the Mulan PSL v2.
7 * You may obtain a copy of Mulan PSL v2 at:
8 *          http://license.coscl.org.cn/MulanPSL2
9 *
10 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11 * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12 * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13 *
14 * See the Mulan PSL v2 for more details.
15 ***************************************************************************************/
16
17package xiangshan.backend.fu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.internal.naming.chiselName
22import chisel3.util._
23import utils.ParallelPriorityMux
24import xiangshan.{HasXSParameter, XSModule}
25import xiangshan.backend.fu.util.HasCSRConst
26import xiangshan.cache.mmu.TlbCmd
27
28trait PMAMethod extends HasXSParameter with PMPConst { this: XSModule =>
29  /**
30  def SimpleMemMapList = List(
31      //     Base address      Top address       Width  Description    Mode (RWXIDSAC)
32      MemMap("h00_0000_0000", "h00_0FFF_FFFF",   "h0", "Reserved",    "RW"),
33      MemMap("h00_1000_0000", "h00_1FFF_FFFF",   "h0", "QSPI_Flash",  "RWX"),
34      MemMap("h00_2000_0000", "h00_2FFF_FFFF",   "h0", "Reserved",    "RW"),
35      MemMap("h00_3000_0000", "h00_3000_FFFF",   "h0", "DMA",         "RW"),
36      MemMap("h00_3001_0000", "h00_3004_FFFF",   "h0", "GPU",         "RWC"),
37      MemMap("h00_3005_0000", "h00_3006_FFFF",   "h0", "USB/SDMMC",   "RW"),
38      MemMap("h00_3007_0000", "h00_30FF_FFFF",   "h0", "Reserved",    "RW"),
39      MemMap("h00_3100_0000", "h00_3111_FFFF",   "h0", "MMIO",        "RW"),
40      MemMap("h00_3112_0000", "h00_37FF_FFFF",   "h0", "Reserved",    "RW"),
41      MemMap("h00_3800_0000", "h00_3800_FFFF",   "h0", "CLINT",       "RW"),
42      MemMap("h00_3801_0000", "h00_3801_FFFF",   "h0", "BEU",         "RW"),
43      MemMap("h00_3802_0000", "h00_3802_0FFF",   "h0", "DebugModule", "RWX"),
44      MemMap("h00_3802_1000", "h00_3900_0FFF",   "h0", "Reserved",    ""),
45      MemMap("h00_3900_1000", "h00_3900_101F",   "h0", "Core_reset",  "RW"),
46      MemMap("h00_3900_1020", "h00_39FF_FFFF",   "h0", "Reserved",    ""),
47      MemMap("h00_3A00_0000", "h00_3A00_0020",   "h0", "PLL0",        "RW),
48      MemMap('h00_3A00_0020", "h00_3BFF_FFFF",   "h0", "Reserved",    ""),
49      MemMap("h00_3C00_0000", "h00_3FFF_FFFF",   "h0", "PLIC",        "RW"),
50      MemMap("h00_4000_0000", "h00_7FFF_FFFF",   "h0", "PCIe",        "RW"),
51      MemMap("h00_8000_0000", "h0F_FFFF_FFFF",   "h0", "DDR",         "RWXIDSA"),
52    )
53   */
54
55  def pma_init() : (Vec[UInt], Vec[UInt], Vec[UInt]) = {
56    // the init value is zero
57    // from 0 to num(default 16) - 1, lower priority
58    // according to simple map, 9 entries is needed, pick 6-14, leave 0-5 & 15 unusedcfgMerged.map(_ := 0.U)
59
60    val num = NumPMA
61    require(num >= 16)
62    val cfg = WireInit(0.U.asTypeOf(Vec(num, new PMPConfig())))
63
64    val addr = Wire(Vec(num, UInt((PAddrBits-PMPOffBits).W)))
65    val mask = Wire(Vec(NumPMP, UInt(PAddrBits.W)))
66    addr := DontCare
67    mask := DontCare
68
69    var idx = num-1
70
71    // TODO: turn to napot to save entries
72    // use tor instead of napot, for napot may be confusing and hard to understand
73    // NOTE: all the addr space are default set to DDR, RWXCA
74    idx = idx - 1
75    addr(idx) := shift_addr(0xFFFFFFFFFL) // all the addr are default ddr, whicn means rwxca
76    cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B; cfg(idx).c := true.B; cfg(idx).atomic := true.B
77    mask(idx) := match_mask(addr(idx), cfg(idx))
78    idx = idx - 1
79
80    // NOTE: (0x0_0000_0000L, 0x0_8000_0000L) are default set to MMIO, only RW
81    addr(idx) := get_napot(0x00000000L, 0x80000000L)
82    cfg(idx).a := 3.U; cfg(idx).r := true.B; cfg(idx).w := true.B
83    mask(idx) := match_mask(addr(idx), cfg(idx))
84    idx = idx - 1
85
86    addr(idx) := shift_addr(0x3C000000)
87    cfg(idx).a := 1.U
88    idx = idx - 1
89
90    addr(idx) := shift_addr(0x3A000040)
91    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
92    idx = idx - 1
93
94    addr(idx) := shift_addr(0x3A000000)
95    cfg(idx).a := 1.U
96    idx = idx - 1
97
98    addr(idx) := shift_addr(0x39001040)
99    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
100    idx = idx - 1
101
102    addr(idx) := shift_addr(0x39001000)
103    cfg(idx).a := 1.U
104    idx = idx - 1
105
106    addr(idx) := shift_addr(0x38021000)
107    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B
108    idx = idx - 1
109
110    addr(idx) := shift_addr(0x38020000)
111    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
112    idx = idx - 1
113
114    addr(idx) := shift_addr( 0x30050000)
115    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).c := true.B
116    idx = idx - 1
117
118    addr(idx) := shift_addr( 0x30010000)
119    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
120    idx = idx - 1
121
122    addr(idx) := shift_addr( 0x20000000)
123    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B; cfg(idx).x := true.B
124    idx = idx - 1
125
126    addr(idx) := shift_addr( 0x10000000)
127    cfg(idx).a := 1.U; cfg(idx).r := true.B; cfg(idx).w := true.B
128    idx = idx - 1
129
130    addr(idx) := shift_addr(0)
131
132    require(idx >= 0)
133
134    val cfgInitMerge = cfg.asTypeOf(Vec(num/8, UInt(XLEN.W)))
135    (cfgInitMerge, addr, mask)
136  }
137
138  def get_napot(base: BigInt, range: BigInt) = {
139    val PlatformGrainBytes = (1 << PlatformGrain)
140    if ((base % PlatformGrainBytes) != 0) {
141      println("base:%x", base)
142    }
143    if ((range % PlatformGrainBytes) != 0) {
144      println("range: %x", range)
145    }
146    require((base % PlatformGrainBytes) == 0)
147    require((range % PlatformGrainBytes) == 0)
148
149    ((base + (range/2 - 1)) >> PMPOffBits).U
150  }
151
152  def match_mask(paddr: UInt, cfg: PMPConfig) = {
153    val match_mask_addr: UInt = Cat(paddr, cfg.a(0)).asUInt() | (((1 << PlatformGrain) - 1) >> PMPOffBits).U((paddr.getWidth + 1).W)
154    Cat(match_mask_addr & ~(match_mask_addr + 1.U), ((1 << PMPOffBits) - 1).U(PMPOffBits.W))
155  }
156
157  def shift_addr(addr: BigInt) = {
158    (addr >> 2).U
159  }
160}
161
162trait PMACheckMethod extends HasXSParameter with HasCSRConst { this: PMPChecker =>
163  def pma_check(cmd: UInt, cfg: PMPConfig) = {
164    val resp = Wire(new PMPRespBundle)
165    resp.ld := TlbCmd.isRead(cmd) && !TlbCmd.isAtom(cmd) && !cfg.r
166    resp.st := (TlbCmd.isWrite(cmd) || TlbCmd.isAtom(cmd) && cfg.atomic) && !cfg.w
167    resp.instr := TlbCmd.isExec(cmd) && !cfg.x
168    resp.mmio := !cfg.c
169    resp
170  }
171
172  def pma_match_res(leaveHitMux: Boolean = false, valid: Bool = true.B)(
173    addr: UInt,
174    size: UInt,
175    pmaEntries: Vec[PMPEntry],
176    mode: UInt,
177    lgMaxSize: Int
178  ) = {
179    val num = pmaEntries.size
180    require(num == NumPMA)
181    // pma should always be checked, could not be ignored
182    // like amo and cached, it is the attribute not protection
183    // so it must have initialization.
184    require(!pmaEntries.isEmpty)
185
186    val pmaDefault = WireInit(0.U.asTypeOf(new PMPEntry()))
187    val match_vec = Wire(Vec(num+1, Bool()))
188    val cfg_vec = Wire(Vec(num+1, new PMPEntry()))
189
190    pmaEntries.zip(pmaDefault +: pmaEntries.take(num-1)).zipWithIndex.foreach{ case ((pma, last_pma), i) =>
191      val is_match = pma.is_match(addr, size, lgMaxSize, last_pma)
192      val aligned = pma.aligned(addr, size, lgMaxSize, last_pma)
193
194      val cur = WireInit(pma)
195      cur.cfg.r := aligned && pma.cfg.r
196      cur.cfg.w := aligned && pma.cfg.w
197      cur.cfg.x := aligned && pma.cfg.x
198      cur.cfg.atomic := aligned && pma.cfg.atomic
199      cur.cfg.c := aligned && pma.cfg.c
200
201      match_vec(i) := is_match
202      cfg_vec(i) := cur
203    }
204
205    match_vec(num) := true.B
206    cfg_vec(num) := pmaDefault
207    if (leaveHitMux) {
208      ParallelPriorityMux(match_vec.map(RegEnable(_, init = false.B, valid)), RegEnable(cfg_vec, valid))
209    } else {
210      ParallelPriorityMux(match_vec, cfg_vec)
211    }
212  }
213}
214