xref: /XiangShan/src/main/scala/xiangshan/cache/mmu/L2TLBMissQueue.scala (revision a58e33519795596dc4f85fe66907cbc7dde2d66a)
1/***************************************************************************************
2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences
3* Copyright (c) 2020-2021 Peng Cheng Laboratory
4*
5* XiangShan is licensed under Mulan PSL v2.
6* You can use this software according to the terms and conditions of the Mulan PSL v2.
7* You may obtain a copy of Mulan PSL v2 at:
8*          http://license.coscl.org.cn/MulanPSL2
9*
10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
13*
14* See the Mulan PSL v2 for more details.
15***************************************************************************************/
16
17package xiangshan.cache.mmu
18
19import chipsalliance.rocketchip.config.Parameters
20import chisel3._
21import chisel3.util._
22import chisel3.internal.naming.chiselName
23import xiangshan._
24import xiangshan.cache.{HasDCacheParameters, MemoryOpConstants}
25import utils._
26import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp}
27import freechips.rocketchip.tilelink._
28
29/* Miss Queue dont care about duplicate req, which is done by PtwFilter
30 * PtwMissQueue is just a Queue inside Chisel with flush
31 */
32
33class L2TlbMQEntry(implicit p: Parameters) extends XSBundle with HasPtwConst {
34  val vpn = UInt(vpnLen.W)
35  val source = UInt(bPtwWidth.W)
36  val ppn = UInt(ppnLen.W)
37  val wait_id = UInt(log2Up(MSHRSize).W)
38}
39
40class L2TlbMQInBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
41  val vpn = Output(UInt(vpnLen.W))
42  val source = Output(UInt(bPtwWidth.W))
43  val l3 = Valid(Output(UInt(PAddrBits.W)))
44}
45
46class L2TlbMQCacheBundle(implicit p: Parameters) extends XSBundle with HasPtwConst {
47  val vpn = Output(UInt(vpnLen.W))
48  val source = Output(UInt(bPtwWidth.W))
49}
50
51class L2TlbMQIO(implicit p: Parameters) extends XSBundle with HasPtwConst {
52  val in = Flipped(Decoupled(new L2TlbMQInBundle()))
53  val sfence = Input(new SfenceBundle)
54  val cache = Decoupled(new L2TlbMQCacheBundle())
55  val fsm_done = Input(Bool())
56  val out = DecoupledIO(new Bundle {
57    val source = Output(UInt(bPtwWidth.W))
58    val id = Output(UInt(bMemID.W))
59    val vpn = Output(UInt(vpnLen.W))
60  })
61  val mem = new Bundle {
62    val req = DecoupledIO(new L2TlbMemReqBundle())
63    val resp = Flipped(Valid(new Bundle {
64      val id = Output(UInt(log2Up(MSHRSize).W))
65    }))
66
67    val refill_vpn = Output(UInt(vpnLen.W))
68    val req_mask = Input(Vec(MSHRSize, Bool()))
69  }
70}
71
72@chiselName
73class L2TlbMissQueue(implicit p: Parameters) extends XSModule with HasPtwConst {
74  val io = IO(new L2TlbMQIO())
75
76  val entries = Reg(Vec(MSHRSize, new L2TlbMQEntry()))
77  val state_idle :: state_cache_high :: state_cache_low :: state_cache_next :: state_mem_req :: state_mem_waiting :: state_mem_out :: Nil = Enum(7)
78  val state = RegInit(VecInit(Seq.fill(MSHRSize)(state_idle)))
79  val is_emptys = state.map(_ === state_idle)
80  val is_caches_high = state.map(_ === state_cache_high)
81  val is_caches_low = state.map(_ === state_cache_low)
82  val is_mems = state.map(_ === state_mem_req)
83  val is_waiting = state.map(_ === state_mem_waiting)
84  val is_having = state.map(_ === state_mem_out)
85  val is_cache_next = state.map(_ === state_cache_next)
86
87  val full = !ParallelOR(is_emptys).asBool()
88  val enq_ptr = ParallelPriorityEncoder(is_emptys)
89  val cache_high_ptr = ParallelPriorityEncoder(is_caches_high)
90  val cache_low_ptr = ParallelPriorityEncoder(is_caches_low)
91
92  val cache_arb = Module(new Arbiter(new L2TlbMQCacheBundle(), 2))
93  cache_arb.io.in(0).valid := Cat(is_caches_high).orR && io.fsm_done // fsm busy, required l1/l2 pte is not ready
94  cache_arb.io.in(0).bits.vpn := entries(cache_high_ptr).vpn
95  cache_arb.io.in(0).bits.source := entries(cache_high_ptr).source
96  cache_arb.io.in(1).valid := Cat(is_caches_low).orR
97  cache_arb.io.in(1).bits.vpn := entries(cache_low_ptr).vpn
98  cache_arb.io.in(1).bits.source := entries(cache_low_ptr).source
99  cache_arb.io.out.ready := io.cache.ready
100  val cache_ptr = Mux(cache_arb.io.chosen === 0.U, cache_high_ptr, cache_low_ptr)
101
102  val mem_ptr = ParallelPriorityEncoder(is_having)
103  val mem_arb = Module(new RRArbiter(new L2TlbMQEntry(), MSHRSize))
104  for (i <- 0 until MSHRSize) {
105    mem_arb.io.in(i).bits := entries(i)
106    mem_arb.io.in(i).valid := is_mems(i) && !io.mem.req_mask(i)
107  }
108
109  // duplicate req
110  // to_wait: wait for the last to access mem, set to mem_resp
111  // to_cache: the last is back just right now, set to mem_cache
112  val dropLowVpn = entries.map(a => dropL3SectorBits(a.vpn))
113  val dropLowVpnIn = dropL3SectorBits(io.in.bits.vpn)
114  val dup_vec = state.indices.map(i =>
115    io.in.bits.l3.valid && (dropLowVpnIn === dropLowVpn(i))
116  )
117  val dup_vec_mem = dup_vec.zip(is_mems).map{case (d, m) => d && m} // already some req are state_mem_req
118  val dup_vec_wait = dup_vec.zip(is_waiting).map{case (d, w) => d && w} // already some req are state_mem_wait
119  val dup_vec_wait_id = dup_vec_mem.zip(dup_vec_wait).map{case (a, b) => a || b} // get the wait_id from above reqs
120  val dup_vec_having = dup_vec.zipWithIndex.map{case (d, i) => d && (is_having(i) || is_caches_low(i) || is_cache_next(i))}
121  val wait_id = ParallelMux(dup_vec_wait_id zip entries.map(_.wait_id))
122  val dup_wait_resp = io.mem.resp.valid && VecInit(dup_vec_wait)(io.mem.resp.bits.id)
123  val to_wait = Cat(dup_vec_mem).orR || (Cat(dup_vec_wait).orR && !dup_wait_resp)
124  val to_cache = Cat(dup_vec_having).orR || dup_wait_resp
125
126  for (i <- 0 until MSHRSize) {
127    when (state(i) === state_cache_next) {
128      state(i) := state_cache_low
129    }
130  }
131  val enq_state = Mux(to_cache, state_cache_next, // relay one cycle to wait for refill
132    Mux(to_wait, state_mem_waiting,
133    Mux(io.in.bits.l3.valid, state_mem_req, state_cache_high)))
134  when (io.in.fire()) {
135    state(enq_ptr) := enq_state
136    entries(enq_ptr).vpn := io.in.bits.vpn
137    entries(enq_ptr).ppn := io.in.bits.l3.bits
138    entries(enq_ptr).source := io.in.bits.source
139    entries(enq_ptr).wait_id := Mux(to_wait, wait_id, enq_ptr)
140  }
141  when (mem_arb.io.out.fire()) {
142    state(mem_arb.io.chosen) := state_mem_waiting
143    entries(mem_arb.io.chosen).wait_id := mem_arb.io.chosen
144  }
145  when (io.mem.resp.fire()) {
146    state.indices.map{i =>
147      when (state(i) === state_mem_waiting &&
148        io.mem.resp.bits.id === entries(i).wait_id &&
149        i.U =/= entries(i).wait_id) {
150        state(i) := state_cache_low
151      }
152    }
153    state(io.mem.resp.bits.id(log2Up(MSHRSize)-1, 0)) := state_mem_out
154  }
155  when (io.out.fire()) {
156    assert(state(mem_ptr) === state_mem_out)
157    state(mem_ptr) := state_idle
158  }
159  when (io.cache.fire()) {
160    state(cache_ptr) := state_idle
161  }
162
163  when (io.sfence.valid) {
164    state.map(_ := state_idle)
165  }
166
167  io.in.ready := !full
168  io.cache.valid := cache_arb.io.out.valid
169  io.cache.bits.vpn := cache_arb.io.out.bits.vpn
170  io.cache.bits.source := cache_arb.io.out.bits.source
171  io.out.valid := ParallelOR(is_having).asBool()
172  io.out.bits.source := entries(mem_ptr).source
173  io.out.bits.vpn := entries(mem_ptr).vpn
174  io.out.bits.id := mem_ptr
175  io.mem.req.valid := mem_arb.io.out.valid
176  io.mem.req.bits.addr := MakeAddr(mem_arb.io.out.bits.ppn, getVpnn(mem_arb.io.out.bits.vpn, 0))
177  io.mem.req.bits.id := mem_arb.io.chosen
178  mem_arb.io.out.ready := io.mem.req.ready
179  io.mem.refill_vpn := entries(RegNext(io.mem.resp.bits.id(log2Up(MSHRSize)-1, 0))).vpn
180
181  XSPerfAccumulate("mq_in_count", io.in.fire())
182  XSPerfAccumulate("mq_in_block", io.in.valid && !io.in.ready)
183  for (i <- 0 until 7) {
184    XSPerfAccumulate(s"enq_state${i}", io.in.fire() && enq_state === i.U)
185  }
186  for (i <- 0 until (MSHRSize + 1)) {
187    XSPerfAccumulate(s"util${i}", PopCount(is_emptys.map(!_)) === i.U)
188    XSPerfAccumulate(s"cache_high_util${i}", PopCount(is_caches_high) === i.U)
189    XSPerfAccumulate(s"cache_low_util${i}", PopCount(is_caches_low) === i.U)
190    XSPerfAccumulate(s"mem_util${i}", PopCount(is_mems) === i.U)
191    XSPerfAccumulate(s"waiting_util${i}", PopCount(is_waiting) === i.U)
192  }
193  XSPerfAccumulate("mem_count", io.mem.req.fire())
194  XSPerfAccumulate("mem_cycle", PopCount(is_waiting) =/= 0.U)
195
196  for (i <- 0 until MSHRSize) {
197    TimeOutAssert(state(i) =/= state_idle, timeOutThreshold, s"missqueue time out no out ${i}")
198  }
199  assert(!io.in.valid || io.in.ready, "when io.in.valid, should always ready")
200}