1/*************************************************************************************** 2* Copyright (c) 2020-2021 Institute of Computing Technology, Chinese Academy of Sciences 3* Copyright (c) 2020-2021 Peng Cheng Laboratory 4* 5* XiangShan is licensed under Mulan PSL v2. 6* You can use this software according to the terms and conditions of the Mulan PSL v2. 7* You may obtain a copy of Mulan PSL v2 at: 8* http://license.coscl.org.cn/MulanPSL2 9* 10* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, 11* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, 12* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. 13* 14* See the Mulan PSL v2 for more details. 15***************************************************************************************/ 16 17package xiangshan.cache 18 19import chipsalliance.rocketchip.config.Parameters 20import chisel3._ 21import chisel3.util._ 22import utils.{HasTLDump, XSDebug, XSPerfAccumulate} 23import freechips.rocketchip.tilelink.{TLArbiter, TLBundleC, TLBundleD, TLEdgeOut, TLPermissions} 24import huancun.{DirtyField, DirtyKey} 25 26class WritebackReq(implicit p: Parameters) extends DCacheBundle { 27 val addr = UInt(PAddrBits.W) 28 val param = UInt(TLPermissions.cWidth.W) 29 val voluntary = Bool() 30 val hasData = Bool() 31 val dirty = Bool() 32 val data = UInt((cfg.blockBytes * 8).W) 33 34 def dump() = { 35 XSDebug("WritebackReq addr: %x param: %d voluntary: %b hasData: %b data: %x\n", 36 addr, param, voluntary, hasData, data) 37 } 38} 39 40class WritebackEntry(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasTLDump 41{ 42 val io = IO(new Bundle { 43 val id = Input(UInt()) 44 45 val req = Flipped(DecoupledIO(new WritebackReq)) 46 val mem_release = DecoupledIO(new TLBundleC(edge.bundle)) 47 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 48 49 val block_addr = Output(Valid(UInt())) 50 }) 51 52 val s_invalid :: s_release_req :: s_release_resp :: Nil = Enum(3) 53 val state = RegInit(s_invalid) 54 55 // internal regs 56 // remaining beats 57 val remain = RegInit(0.U(refillCycles.W)) 58 val remain_set = WireInit(0.U(refillCycles.W)) 59 val remain_clr = WireInit(0.U(refillCycles.W)) 60 remain := (remain | remain_set) & ~remain_clr 61 62 val busy = remain.orR 63 64 val req = Reg(new WritebackReq) 65 66 // assign default signals to output signals 67 io.req.ready := false.B 68 io.mem_release.valid := false.B 69 io.mem_release.bits := DontCare 70 io.mem_grant.ready := false.B 71 io.block_addr.valid := state =/= s_invalid 72 io.block_addr.bits := req.addr 73 74 75 when (state =/= s_invalid) { 76 XSDebug("WritebackEntry: %d state: %d block_addr: %x\n", io.id, state, io.block_addr.bits) 77 } 78 79 // -------------------------------------------------------------------------------- 80 // s_invalid: receive requests 81 // new req entering 82 io.req.ready := state === s_invalid 83 when (io.req.fire()) { 84 assert (remain === 0.U) 85 remain_set := Mux(io.req.bits.hasData, ~0.U(refillCycles.W), 1.U(refillCycles.W)) 86 req := io.req.bits 87 state := s_release_req 88 } 89 90 // -------------------------------------------------------------------------------- 91 // while there beats remaining to be sent, we keep sending 92 // which beat to send in this cycle? 93 val beat = PriorityEncoder(remain) 94 95 val beat_data = Wire(Vec(refillCycles, UInt(beatBits.W))) 96 for (i <- 0 until refillCycles) { 97 beat_data(i) := req.data((i + 1) * beatBits - 1, i * beatBits) 98 } 99 100 val probeResponse = edge.ProbeAck( 101 fromSource = io.id, 102 toAddress = req.addr, 103 lgSize = log2Ceil(cfg.blockBytes).U, 104 reportPermissions = req.param 105 ) 106 107 val probeResponseData = edge.ProbeAck( 108 fromSource = io.id, 109 toAddress = req.addr, 110 lgSize = log2Ceil(cfg.blockBytes).U, 111 reportPermissions = req.param, 112 data = beat_data(beat) 113 ) 114 115 val voluntaryRelease = edge.Release( 116 fromSource = io.id, 117 toAddress = req.addr, 118 lgSize = log2Ceil(cfg.blockBytes).U, 119 shrinkPermissions = req.param 120 )._2 121 122 val voluntaryReleaseData = edge.Release( 123 fromSource = io.id, 124 toAddress = req.addr, 125 lgSize = log2Ceil(cfg.blockBytes).U, 126 shrinkPermissions = req.param, 127 data = beat_data(beat) 128 )._2 129 130 voluntaryReleaseData.echo.lift(DirtyKey).foreach(_ := req.dirty) 131 when(busy) { 132 assert(!req.voluntary || req.hasData) 133 } 134 135 io.mem_release.valid := busy 136 io.mem_release.bits := Mux(req.voluntary, 137 Mux(req.hasData, voluntaryReleaseData, voluntaryRelease), 138 Mux(req.hasData, probeResponseData, probeResponse)) 139 140 when (io.mem_release.fire()) { remain_clr := PriorityEncoderOH(remain) } 141 142 val (_, _, release_done, _) = edge.count(io.mem_release) 143 144 when (state === s_release_req && release_done) { 145 state := Mux(req.voluntary, s_release_resp, s_invalid) 146 } 147 148 // -------------------------------------------------------------------------------- 149 // receive ReleaseAck for Releases 150 when (state === s_release_resp) { 151 io.mem_grant.ready := true.B 152 when (io.mem_grant.fire()) { 153 state := s_invalid 154 } 155 } 156 157 // performance counters 158 XSPerfAccumulate("wb_req", io.req.fire()) 159 XSPerfAccumulate("wb_release", state === s_release_req && release_done && req.voluntary) 160 XSPerfAccumulate("wb_probe_resp", state === s_release_req && release_done && !req.voluntary) 161 XSPerfAccumulate("penalty_blocked_by_channel_C", io.mem_release.valid && !io.mem_release.ready) 162 XSPerfAccumulate("penalty_waiting_for_channel_D", io.mem_grant.ready && !io.mem_grant.valid && state === s_release_resp) 163} 164 165class WritebackQueue(edge: TLEdgeOut)(implicit p: Parameters) extends DCacheModule with HasTLDump 166{ 167 val io = IO(new Bundle { 168 val req = Flipped(DecoupledIO(new WritebackReq)) 169 val mem_release = DecoupledIO(new TLBundleC(edge.bundle)) 170 val mem_grant = Flipped(DecoupledIO(new TLBundleD(edge.bundle))) 171 172 val miss_req = Flipped(Valid(UInt())) 173 val block_miss_req = Output(Bool()) 174 }) 175 176 // allocate a free entry for incoming request 177 val primary_ready = Wire(Vec(cfg.nReleaseEntries, Bool())) 178 val allocate = primary_ready.asUInt.orR 179 val alloc_idx = PriorityEncoder(primary_ready) 180 181 val req = io.req 182 val block_conflict = Wire(Bool()) 183 req.ready := allocate && !block_conflict 184 185 // assign default values to output signals 186 io.mem_release.valid := false.B 187 io.mem_release.bits := DontCare 188 io.mem_grant.ready := false.B 189 190 val entries = (0 until cfg.nReleaseEntries) map { i => 191 val entry = Module(new WritebackEntry(edge)) 192 193 entry.io.id := i.U 194 195 // entry req 196 entry.io.req.valid := (i.U === alloc_idx) && allocate && req.valid && !block_conflict 197 primary_ready(i) := entry.io.req.ready 198 entry.io.req.bits := req.bits 199 200 entry.io.mem_grant.valid := (i.U === io.mem_grant.bits.source) && io.mem_grant.valid 201 entry.io.mem_grant.bits := io.mem_grant.bits 202 when (i.U === io.mem_grant.bits.source) { 203 io.mem_grant.ready := entry.io.mem_grant.ready 204 } 205 206 entry 207 } 208 209 block_conflict := VecInit(entries.map(e => e.io.block_addr.valid && e.io.block_addr.bits === io.req.bits.addr)).asUInt.orR 210 val miss_req_conflict = VecInit(entries.map(e => e.io.block_addr.valid && e.io.block_addr.bits === io.miss_req.bits)).asUInt.orR 211 io.block_miss_req := io.miss_req.valid && miss_req_conflict 212 213 TLArbiter.robin(edge, io.mem_release, entries.map(_.io.mem_release):_*) 214 215 // sanity check 216 // print all input/output requests for debug purpose 217 // print req 218 when (io.req.fire()) { 219 io.req.bits.dump() 220 } 221 222 when (io.mem_release.fire()) { 223 io.mem_release.bits.dump 224 } 225 226 when (io.mem_grant.fire()) { 227 io.mem_grant.bits.dump 228 } 229 230 when (io.miss_req.valid) { 231 XSDebug("miss_req: addr: %x\n", io.miss_req.bits) 232 } 233 234 when (io.block_miss_req) { 235 XSDebug("block_miss_req\n") 236 } 237 238 // performance counters 239 XSPerfAccumulate("wb_req", io.req.fire()) 240} 241