1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // Copyright (C) 2020 Red Hat, Inc. All rights reserved. 3 // SPDX-License-Identifier: Apache-2.0 4 5 //! A wrapper over an `ArcSwap<GuestMemory>` struct to support RCU-style mutability. 6 //! 7 //! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap` 8 //! with `GuestMemoryAtomic<GuestMemoryMmap>` will enable support for mutable memory maps. 9 //! To support mutable memory maps, devices will also need to use 10 //! `GuestAddressSpace::memory()` to gain temporary access to guest memory. 11 12 extern crate arc_swap; 13 14 use arc_swap::{ArcSwap, Guard}; 15 use std::ops::Deref; 16 use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; 17 18 use crate::{GuestAddressSpace, GuestMemory}; 19 20 /// A fast implementation of a mutable collection of memory regions. 21 /// 22 /// This implementation uses `ArcSwap` to provide RCU-like snapshotting of the memory map: 23 /// every update of the memory map creates a completely new `GuestMemory` object, and 24 /// readers will not be blocked because the copies they retrieved will be collected once 25 /// no one can access them anymore. Under the assumption that updates to the memory map 26 /// are rare, this allows a very efficient implementation of the `memory()` method. 27 #[derive(Clone, Debug)] 28 pub struct GuestMemoryAtomic<M: GuestMemory> { 29 // GuestAddressSpace<M>, which we want to implement, is basically a drop-in 30 // replacement for &M. Therefore, we need to pass to devices the `GuestMemoryAtomic` 31 // rather than a reference to it. To obtain this effect we wrap the actual fields 32 // of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the 33 // documentation for GuestAddressSpace for an example. 34 inner: Arc<(ArcSwap<M>, Mutex<()>)>, 35 } 36 37 impl<M: GuestMemory> From<Arc<M>> for GuestMemoryAtomic<M> { 38 /// create a new `GuestMemoryAtomic` object whose initial contents come from 39 /// the `map` reference counted `GuestMemory`. from(map: Arc<M>) -> Self40 fn from(map: Arc<M>) -> Self { 41 let inner = (ArcSwap::new(map), Mutex::new(())); 42 GuestMemoryAtomic { 43 inner: Arc::new(inner), 44 } 45 } 46 } 47 48 impl<M: GuestMemory> GuestMemoryAtomic<M> { 49 /// create a new `GuestMemoryAtomic` object whose initial contents come from 50 /// the `map` `GuestMemory`. new(map: M) -> Self51 pub fn new(map: M) -> Self { 52 Arc::new(map).into() 53 } 54 load(&self) -> Guard<Arc<M>>55 fn load(&self) -> Guard<Arc<M>> { 56 self.inner.0.load() 57 } 58 59 /// Acquires the update mutex for the `GuestMemoryAtomic`, blocking the current 60 /// thread until it is able to do so. The returned RAII guard allows for 61 /// scoped unlock of the mutex (that is, the mutex will be unlocked when 62 /// the guard goes out of scope), and optionally also for replacing the 63 /// contents of the `GuestMemoryAtomic` when the lock is dropped. lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>>64 pub fn lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>> { 65 match self.inner.1.lock() { 66 Ok(guard) => Ok(GuestMemoryExclusiveGuard { 67 parent: self, 68 _guard: guard, 69 }), 70 Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard { 71 parent: self, 72 _guard: err.into_inner(), 73 })), 74 } 75 } 76 } 77 78 impl<M: GuestMemory> GuestAddressSpace for GuestMemoryAtomic<M> { 79 type T = GuestMemoryLoadGuard<M>; 80 type M = M; 81 memory(&self) -> Self::T82 fn memory(&self) -> Self::T { 83 GuestMemoryLoadGuard { guard: self.load() } 84 } 85 } 86 87 /// A guard that provides temporary access to a `GuestMemoryAtomic`. This 88 /// object is returned from the `memory()` method. It dereference to 89 /// a snapshot of the `GuestMemory`, so it can be used transparently to 90 /// access memory. 91 #[derive(Debug)] 92 pub struct GuestMemoryLoadGuard<M: GuestMemory> { 93 guard: Guard<Arc<M>>, 94 } 95 96 impl<M: GuestMemory> GuestMemoryLoadGuard<M> { 97 /// Make a clone of the held pointer and returns it. This is more 98 /// expensive than just using the snapshot, but it allows to hold on 99 /// to the snapshot outside the scope of the guard. It also allows 100 /// writers to proceed, so it is recommended if the reference must 101 /// be held for a long time (including for caching purposes). into_inner(self) -> Arc<M>102 pub fn into_inner(self) -> Arc<M> { 103 Guard::into_inner(self.guard) 104 } 105 } 106 107 impl<M: GuestMemory> Clone for GuestMemoryLoadGuard<M> { clone(&self) -> Self108 fn clone(&self) -> Self { 109 GuestMemoryLoadGuard { 110 guard: Guard::from_inner(Arc::clone(&*self.guard)), 111 } 112 } 113 } 114 115 impl<M: GuestMemory> Deref for GuestMemoryLoadGuard<M> { 116 type Target = M; 117 deref(&self) -> &Self::Target118 fn deref(&self) -> &Self::Target { 119 &self.guard 120 } 121 } 122 123 /// An RAII implementation of a "scoped lock" for `GuestMemoryAtomic`. When 124 /// this structure is dropped (falls out of scope) the lock will be unlocked, 125 /// possibly after updating the memory map represented by the 126 /// `GuestMemoryAtomic` that created the guard. 127 pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> { 128 parent: &'a GuestMemoryAtomic<M>, 129 _guard: MutexGuard<'a, ()>, 130 } 131 132 impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> { 133 /// Replace the memory map in the `GuestMemoryAtomic` that created the guard 134 /// with the new memory map, `map`. The lock is then dropped since this 135 /// method consumes the guard. replace(self, map: M)136 pub fn replace(self, map: M) { 137 self.parent.inner.0.store(Arc::new(map)) 138 } 139 } 140 141 #[cfg(test)] 142 #[cfg(feature = "backend-mmap")] 143 mod tests { 144 use super::*; 145 use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MmapRegion}; 146 147 type GuestMemoryMmap = crate::GuestMemoryMmap<()>; 148 type GuestRegionMmap = crate::GuestRegionMmap<()>; 149 type GuestMemoryMmapAtomic = GuestMemoryAtomic<GuestMemoryMmap>; 150 151 #[test] test_atomic_memory()152 fn test_atomic_memory() { 153 let region_size = 0x400; 154 let regions = vec![ 155 (GuestAddress(0x0), region_size), 156 (GuestAddress(0x1000), region_size), 157 ]; 158 let mut iterated_regions = Vec::new(); 159 let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); 160 let gm = GuestMemoryMmapAtomic::new(gmm); 161 let mem = gm.memory(); 162 163 for region in mem.iter() { 164 assert_eq!(region.len(), region_size as GuestUsize); 165 } 166 167 for region in mem.iter() { 168 iterated_regions.push((region.start_addr(), region.len() as usize)); 169 } 170 assert_eq!(regions, iterated_regions); 171 assert_eq!(mem.num_regions(), 2); 172 assert!(mem.find_region(GuestAddress(0x1000)).is_some()); 173 assert!(mem.find_region(GuestAddress(0x10000)).is_none()); 174 175 assert!(regions 176 .iter() 177 .map(|x| (x.0, x.1)) 178 .eq(iterated_regions.iter().copied())); 179 180 let mem2 = mem.into_inner(); 181 for region in mem2.iter() { 182 assert_eq!(region.len(), region_size as GuestUsize); 183 } 184 assert_eq!(mem2.num_regions(), 2); 185 assert!(mem2.find_region(GuestAddress(0x1000)).is_some()); 186 assert!(mem2.find_region(GuestAddress(0x10000)).is_none()); 187 188 assert!(regions 189 .iter() 190 .map(|x| (x.0, x.1)) 191 .eq(iterated_regions.iter().copied())); 192 193 let mem3 = mem2.memory(); 194 for region in mem3.iter() { 195 assert_eq!(region.len(), region_size as GuestUsize); 196 } 197 assert_eq!(mem3.num_regions(), 2); 198 assert!(mem3.find_region(GuestAddress(0x1000)).is_some()); 199 assert!(mem3.find_region(GuestAddress(0x10000)).is_none()); 200 } 201 202 #[test] test_clone_guard()203 fn test_clone_guard() { 204 let region_size = 0x400; 205 let regions = vec![ 206 (GuestAddress(0x0), region_size), 207 (GuestAddress(0x1000), region_size), 208 ]; 209 let gmm = GuestMemoryMmap::from_ranges(®ions).unwrap(); 210 let gm = GuestMemoryMmapAtomic::new(gmm); 211 let mem = { 212 let guard1 = gm.memory(); 213 Clone::clone(&guard1) 214 }; 215 assert_eq!(mem.num_regions(), 2); 216 } 217 218 #[test] test_atomic_hotplug()219 fn test_atomic_hotplug() { 220 let region_size = 0x1000; 221 let regions = vec![ 222 (GuestAddress(0x0), region_size), 223 (GuestAddress(0x10_0000), region_size), 224 ]; 225 let mut gmm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap()); 226 let gm: GuestMemoryAtomic<_> = gmm.clone().into(); 227 let mem_orig = gm.memory(); 228 assert_eq!(mem_orig.num_regions(), 2); 229 230 { 231 let guard = gm.lock().unwrap(); 232 let new_gmm = Arc::make_mut(&mut gmm); 233 let mmap = Arc::new( 234 GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x8000)) 235 .unwrap(), 236 ); 237 let new_gmm = new_gmm.insert_region(mmap).unwrap(); 238 let mmap = Arc::new( 239 GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0x4000)) 240 .unwrap(), 241 ); 242 let new_gmm = new_gmm.insert_region(mmap).unwrap(); 243 let mmap = Arc::new( 244 GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) 245 .unwrap(), 246 ); 247 let new_gmm = new_gmm.insert_region(mmap).unwrap(); 248 let mmap = Arc::new( 249 GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0xc000)) 250 .unwrap(), 251 ); 252 new_gmm.insert_region(mmap).unwrap_err(); 253 guard.replace(new_gmm); 254 } 255 256 assert_eq!(mem_orig.num_regions(), 2); 257 let mem = gm.memory(); 258 assert_eq!(mem.num_regions(), 5); 259 } 260 } 261